summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/braille/braille_console.c2
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/accessibility/speakup/serialio.h3
-rw-r--r--drivers/acpi/arm64/iort.c360
-rw-r--r--drivers/acpi/processor_thermal.c2
-rw-r--r--drivers/acpi/property.c463
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/utils.c38
-rw-r--r--drivers/acpi/viot.c6
-rw-r--r--drivers/amba/bus.c8
-rw-r--r--drivers/android/binder.c12
-rw-r--r--drivers/android/binder_alloc.c70
-rw-r--r--drivers/android/binder_alloc.h2
-rw-r--r--drivers/android/binder_alloc_selftest.c2
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/ata/libata-scsi.c1
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/atm/idt77252.c1
-rw-r--r--drivers/base/arch_topology.c4
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/dd.c40
-rw-r--r--drivers/base/driver.c6
-rw-r--r--drivers/base/firmware_loader/sysfs.c7
-rw-r--r--drivers/base/firmware_loader/sysfs.h5
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.c12
-rw-r--r--drivers/base/power/domain.c2
-rw-r--r--drivers/base/regmap/regmap-spi.c8
-rw-r--r--drivers/block/rbd.c6
-rw-r--r--drivers/block/virtio_blk.c24
-rw-r--r--drivers/block/xen-blkback/common.h3
-rw-r--r--drivers/block/xen-blkback/xenbus.c24
-rw-r--r--drivers/block/xen-blkfront.c22
-rw-r--r--drivers/block/zram/zcomp.c11
-rw-r--r--drivers/block/zram/zram_drv.c48
-rw-r--r--drivers/block/zram/zram_drv.h1
-rw-r--r--drivers/bus/mhi/host/main.c19
-rw-r--r--drivers/char/agp/intel-gtt.c17
-rw-r--r--drivers/char/hw_random/powernv-rng.c2
-rw-r--r--drivers/char/hw_random/s390-trng.c2
-rw-r--r--drivers/char/mem.c6
-rw-r--r--drivers/char/tpm/Kconfig12
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/tpm.h1
-rw-r--r--drivers/char/tpm/tpm1-cmd.c7
-rw-r--r--drivers/char/tpm/tpm2-cmd.c6
-rw-r--r--drivers/char/tpm/tpm_tis_core.c14
-rw-r--r--drivers/char/tpm/tpm_tis_core.h10
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c390
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c16
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/ti/clk.c1
-rw-r--r--drivers/clocksource/timer-riscv.c40
-rw-r--r--drivers/cpufreq/cpufreq-dt.c19
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c12
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c1
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c14
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c109
-rw-r--r--drivers/cpufreq/sti-cpufreq.c27
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c31
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c4
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c12
-rw-r--r--drivers/cpufreq/ti-cpufreq.c42
-rw-r--r--drivers/cpuidle/cpuidle.c6
-rw-r--r--drivers/cxl/Kconfig9
-rw-r--r--drivers/cxl/acpi.c243
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/core.h51
-rw-r--r--drivers/cxl/core/hdm.c691
-rw-r--r--drivers/cxl/core/mbox.c95
-rw-r--r--drivers/cxl/core/memdev.c4
-rw-r--r--drivers/cxl/core/pci.c181
-rw-r--r--drivers/cxl/core/pmem.c4
-rw-r--r--drivers/cxl/core/port.c738
-rw-r--r--drivers/cxl/core/region.c1896
-rw-r--r--drivers/cxl/cxl.h312
-rw-r--r--drivers/cxl/cxlmem.h42
-rw-r--r--drivers/cxl/cxlpci.h1
-rw-r--r--drivers/cxl/mem.c49
-rw-r--r--drivers/cxl/pci.c46
-rw-r--r--drivers/cxl/pmem.c259
-rw-r--r--drivers/cxl/port.c53
-rw-r--r--drivers/dax/super.c67
-rw-r--r--drivers/devfreq/exynos-bus.c21
-rw-r--r--drivers/devfreq/tegra30-devfreq.c22
-rw-r--r--drivers/dma-buf/dma-resv.c3
-rw-r--r--drivers/dma/ti/k3-udma-private.c6
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c21
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/edac/ppc4xx_edac.c1
-rw-r--r--drivers/firmware/arm_scmi/clock.c6
-rw-r--r--drivers/firmware/arm_scmi/optee.c1
-rw-r--r--drivers/firmware/arm_scmi/reset.c10
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c20
-rw-r--r--drivers/firmware/arm_scmi/sensors.c25
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c107
-rw-r--r--drivers/firmware/dmi_scan.c2
-rw-r--r--drivers/firmware/efi/capsule-loader.c31
-rw-r--r--drivers/firmware/efi/efibc.c3
-rw-r--r--drivers/firmware/efi/libstub/Makefile7
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c13
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c8
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c8
-rw-r--r--drivers/firmware/mtk-adsp-ipc.c36
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c10
-rw-r--r--drivers/gpio/gpio-104-idi-48.c10
-rw-r--r--drivers/gpio/gpio-104-idio-16.c18
-rw-r--r--drivers/gpio/gpio-ftgpio010.c22
-rw-r--r--drivers/gpio/gpio-ixp4xx.c17
-rw-r--r--drivers/gpio/gpio-mockup.c15
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c1
-rw-r--r--drivers/gpio/gpio-mt7621.c21
-rw-r--r--drivers/gpio/gpio-pca953x.c8
-rw-r--r--drivers/gpio/gpio-pxa.c11
-rw-r--r--drivers/gpio/gpio-realtek-otto.c166
-rw-r--r--drivers/gpio/gpio-rockchip.c4
-rw-r--r--drivers/gpio/gpio-tqmx86.c4
-rw-r--r--drivers/gpio/gpio-ws16c48.c10
-rw-r--r--drivers/gpio/gpiolib-cdev.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v3_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c150
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c216
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h220
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/Makefile25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c354
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c422
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c379
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c433
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c268
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c1131
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h116
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c4
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h6
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c15
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h13
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c64
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c43
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c50
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c2
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c24
-rw-r--r--drivers/gpu/drm/drm_gem.c21
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c1
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_prime.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c4
-rw-r--r--drivers/gpu/drm/gma500/gem.c4
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c11
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c5
-rw-r--r--drivers/gpu/drm/gma500/power.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h5
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c15
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c10
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c6
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c1
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c19
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c25
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c77
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c67
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c50
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c120
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h16
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c36
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h1
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c5
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.h6
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c8
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c2
-rw-r--r--drivers/gpu/drm/lima/lima_devfreq.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c24
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c5
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c2
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c24
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c2
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c22
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c3
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c15
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c18
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c18
-rw-r--r--drivers/hid/hid-asus.c7
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c7
-rw-r--r--drivers/hid/hid-nintendo.c6
-rw-r--r--drivers/hid/hid-quirks.c2
-rw-r--r--drivers/hid/hid-steam.c10
-rw-r--r--drivers/hid/hid-thrustmaster.c3
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c68
-rw-r--r--drivers/hv/connection.c11
-rw-r--r--drivers/hv/hv_balloon.c135
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h7
-rw-r--r--drivers/hv/vmbus_drv.c83
-rw-r--r--drivers/hwmon/asus-ec-sensors.c408
-rw-r--r--drivers/hwmon/gpio-fan.c3
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwmon/mr75203.c72
-rw-r--r--drivers/hwmon/nct6775-core.c3
-rw-r--r--drivers/hwmon/nct6775-platform.c2
-rw-r--r--drivers/hwmon/nct6775.h2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c19
-rw-r--r--drivers/hwmon/tps23861.c10
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c6
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h8
-rw-r--r--drivers/i2c/busses/i2c-altera.c2
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c2
-rw-r--r--drivers/i2c/busses/i2c-au1550.c2
-rw-r--r--drivers/i2c/busses/i2c-axxia.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c2
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c2
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c2
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c2
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c2
-rw-r--r--drivers/i2c/busses/i2c-emev2.c2
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c2
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-highlander.c2
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c2
-rw-r--r--drivers/i2c/busses/i2c-icy.c2
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c20
-rw-r--r--drivers/i2c/busses/i2c-kempld.c1
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c2
-rw-r--r--drivers/i2c/busses/i2c-meson.c2
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c2
-rw-r--r--drivers/i2c/busses/i2c-mpc.c7
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c45
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c2
-rw-r--r--drivers/i2c/busses/i2c-mxs.c2
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-opal.c4
-rw-r--r--drivers/i2c/busses/i2c-parport.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c7
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/i2c/busses/i2c-riic.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-scmi.c9
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/busses/i2c-simtec.c2
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c2
-rw-r--r--drivers/i2c/busses/i2c-versatile.c2
-rw-r--r--drivers/i2c/busses/i2c-wmt.c2
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/i2c/i2c-smbus.c2
-rw-r--r--drivers/iio/adc/ad7292.c4
-rw-r--r--drivers/iio/adc/mcp3911.c28
-rw-r--r--drivers/iio/light/cm32181.c2
-rw-r--r--drivers/iio/light/cm3605.c6
-rw-r--r--drivers/infiniband/core/cma.c4
-rw-r--r--drivers/infiniband/core/rw.c45
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c8
-rw-r--r--drivers/infiniband/core/umem_odp.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c25
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c2
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c7
-rw-r--r--drivers/infiniband/hw/irdma/uk.c7
-rw-r--r--drivers/infiniband/hw/irdma/utils.c15
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c13
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx5/main.c36
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c3
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c18
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c7
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c9
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c14
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c3
-rw-r--r--drivers/input/input-core-private.h16
-rw-r--r--drivers/input/input-mt.c48
-rw-r--r--drivers/input/input.c149
-rw-r--r--drivers/input/joystick/adc-joystick.c15
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c1
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c6
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c8
-rw-r--r--drivers/input/joystick/iforce/iforce.h6
-rw-r--r--drivers/input/joystick/sensehat-joystick.c4
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/adp5588-keys.c206
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c89
-rw-r--r--drivers/input/keyboard/mt6779-keypad.c18
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c98
-rw-r--r--drivers/input/keyboard/omap4-keypad.c26
-rw-r--r--drivers/input/misc/iqs7222.c178
-rw-r--r--drivers/input/misc/rk805-pwrkey.c1
-rw-r--r--drivers/input/mouse/cyapa_gen6.c2
-rw-r--r--drivers/input/mouse/gpio_mouse.c2
-rw-r--r--drivers/input/serio/gscps2.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h1270
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c96
-rw-r--r--drivers/input/touchscreen/exc3000.c7
-rw-r--r--drivers/input/touchscreen/goodix.c24
-rw-r--r--drivers/input/touchscreen/zinitix.c112
-rw-r--r--drivers/iommu/Kconfig10
-rw-r--r--drivers/iommu/amd/amd_iommu.h18
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h186
-rw-r--r--drivers/iommu/amd/init.c942
-rw-r--r--drivers/iommu/amd/io_pgtable.c6
-rw-r--r--drivers/iommu/amd/iommu.c588
-rw-r--r--drivers/iommu/amd/iommu_v2.c69
-rw-r--r--drivers/iommu/amd/quirks.c4
-rw-r--r--drivers/iommu/apple-dart.c4
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c144
-rw-r--r--drivers/iommu/arm/arm-smmu/Makefile1
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c142
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c34
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h28
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c73
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h1
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c18
-rw-r--r--drivers/iommu/dma-iommu.c124
-rw-r--r--drivers/iommu/exynos-iommu.c182
-rw-r--r--drivers/iommu/fsl_pamu_domain.c5
-rw-r--r--drivers/iommu/hyperv-iommu.c4
-rw-r--r--drivers/iommu/intel/cap_audit.c2
-rw-r--r--drivers/iommu/intel/debugfs.c51
-rw-r--r--drivers/iommu/intel/dmar.c41
-rw-r--r--drivers/iommu/intel/iommu.c609
-rw-r--r--drivers/iommu/intel/iommu.h842
-rw-r--r--drivers/iommu/intel/irq_remapping.c2
-rw-r--r--drivers/iommu/intel/pasid.c107
-rw-r--r--drivers/iommu/intel/pasid.h1
-rw-r--r--drivers/iommu/intel/perf.c2
-rw-r--r--drivers/iommu/intel/svm.c11
-rw-r--r--drivers/iommu/intel/trace.c2
-rw-r--r--drivers/iommu/intel/trace.h99
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c75
-rw-r--r--drivers/iommu/iommu.c80
-rw-r--r--drivers/iommu/iova.c12
-rw-r--r--drivers/iommu/msm_iommu.c7
-rw-r--r--drivers/iommu/mtk_iommu.c71
-rw-r--r--drivers/iommu/mtk_iommu_v1.c5
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/sprd-iommu.c11
-rw-r--r--drivers/iommu/sun50i-iommu.c3
-rw-r--r--drivers/iommu/tegra-gart.c5
-rw-r--r--drivers/iommu/tegra-smmu.c3
-rw-r--r--drivers/iommu/virtio-iommu.c42
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c2
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c13
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c4
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c2
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c40
-rw-r--r--drivers/irqchip/irq-riscv-intc.c7
-rw-r--r--drivers/irqchip/irq-sifive-plic.c7
-rw-r--r--drivers/irqchip/irq-tegra.c10
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/blink/Kconfig14
-rw-r--r--drivers/leds/blink/Makefile1
-rw-r--r--drivers/leds/blink/leds-bcm63138.c307
-rw-r--r--drivers/leds/leds-is31fl319x.c529
-rw-r--r--drivers/leds/leds-turris-omnia.c4
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c8
-rw-r--r--drivers/macintosh/adb.c2
-rw-r--r--drivers/mailbox/imx-mailbox.c40
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c11
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/dm-bufio.c45
-rw-r--r--drivers/md/dm-ebs-target.c3
-rw-r--r--drivers/md/dm-integrity.c2
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-verity-fec.c4
-rw-r--r--drivers/md/dm-verity-target.c172
-rw-r--r--drivers/md/dm-verity.h6
-rw-r--r--drivers/md/dm-writecache.c3
-rw-r--r--drivers/md/dm-zoned-metadata.c4
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c3
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/i2c/tda1997x.c1
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c10
-rw-r--r--drivers/media/rc/mceusb.c35
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c2
-rw-r--r--drivers/memory/tegra/tegra124-emc.c11
-rw-r--r--drivers/mfd/Kconfig5
-rw-r--r--drivers/mfd/asic3.c9
-rw-r--r--drivers/mfd/axp20x.c9
-rw-r--r--drivers/mfd/cros_ec_dev.c5
-rw-r--r--drivers/mfd/db8500-prcmu.c2
-rw-r--r--drivers/mfd/dln2.c17
-rw-r--r--drivers/mfd/intel-lpss-pci.c13
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c194
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c27
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/max77714.c4
-rw-r--r--drivers/mfd/mt6358-irq.c24
-rw-r--r--drivers/mfd/mt6397-core.c91
-rw-r--r--drivers/mfd/mt6397-irq.c9
-rw-r--r--drivers/mfd/qcom-pm8008.c53
-rw-r--r--drivers/mfd/syscon.c3
-rw-r--r--drivers/mfd/t7l66xb.c6
-rw-r--r--drivers/mfd/tc6393xb.c5
-rw-r--r--drivers/mfd/twl-core.c323
-rw-r--r--drivers/misc/fastrpc.c14
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/mmc/core/sd.c46
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c6
-rw-r--r--drivers/mmc/host/mtk-sd.c6
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c16
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c8
-rw-r--r--drivers/mtd/devices/powernv_flash.c4
-rw-r--r--drivers/mtd/devices/spear_smi.c10
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c23
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c6
-rw-r--r--drivers/mtd/hyperbus/hyperbus-core.c8
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c13
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c4
-rw-r--r--drivers/mtd/maps/physmap-core.c13
-rw-r--r--drivers/mtd/maps/physmap-versatile.c2
-rw-r--r--drivers/mtd/mtdchar.c13
-rw-r--r--drivers/mtd/mtdcore.c63
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c16
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c4
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c9
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c17
-rw-r--r--drivers/mtd/nand/raw/omap2.c6
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c306
-rw-r--r--drivers/mtd/nand/raw/sm_common.c2
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c5
-rw-r--r--drivers/mtd/nand/spi/Makefile2
-rw-r--r--drivers/mtd/nand/spi/ato.c86
-rw-r--r--drivers/mtd/nand/spi/core.c1
-rw-r--r--drivers/mtd/parsers/Kconfig9
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/ofpart_bcm4908.c3
-rw-r--r--drivers/mtd/parsers/redboot.c1
-rw-r--r--drivers/mtd/parsers/scpart.c249
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c8
-rw-r--r--drivers/mtd/spi-nor/core.c70
-rw-r--r--drivers/mtd/spi-nor/core.h21
-rw-r--r--drivers/mtd/spi-nor/debugfs.c2
-rw-r--r--drivers/mtd/spi-nor/esmt.c2
-rw-r--r--drivers/mtd/spi-nor/issi.c31
-rw-r--r--drivers/mtd/spi-nor/micron-st.c12
-rw-r--r--drivers/mtd/spi-nor/otp.c12
-rw-r--r--drivers/mtd/spi-nor/sfdp.c34
-rw-r--r--drivers/mtd/spi-nor/spansion.c185
-rw-r--r--drivers/mtd/spi-nor/xilinx.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c46
-rw-r--r--drivers/net/bonding/bond_alb.c10
-rw-r--r--drivers/net/bonding/bond_main.c139
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c10
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c18
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c21
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c83
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h1
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c4
-rw-r--r--drivers/net/dsa/mv88e6060.c3
-rw-r--r--drivers/net/dsa/ocelot/felix.c3
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c734
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c553
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c2
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c21
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c10
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c8
-rw-r--r--drivers/net/ethernet/cortina/gemini.c24
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c10
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c53
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c32
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c23
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c17
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c9
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c3
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c4
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h4
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c16
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c12
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c20
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c45
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c9
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c72
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c81
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c59
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c9
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c26
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c5
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c8
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c14
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c42
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c62
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c55
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c468
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c84
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c95
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/tx.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/sun/sunhme.c4
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig6
-rw-r--r--drivers/net/fddi/skfp/h/hwmtm.h2
-rw-r--r--drivers/net/geneve.c15
-rw-r--r--drivers/net/ieee802154/adf7242.c3
-rw-r--r--drivers/net/ieee802154/ca8210.c2
-rw-r--r--drivers/net/ieee802154/cc2520.c1
-rw-r--r--drivers/net/ipa/ipa_mem.c2
-rw-r--r--drivers/net/ipa/ipa_qmi.c8
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c8
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h37
-rw-r--r--drivers/net/ipa/ipa_reg.h2
-rw-r--r--drivers/net/ipa/ipa_table.c2
-rw-r--r--drivers/net/ipa/ipa_table.h3
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/ipvlan/ipvtap.c4
-rw-r--r--drivers/net/macsec.c69
-rw-r--r--drivers/net/mdio/fwnode_mdio.c4
-rw-r--r--drivers/net/mdio/of_mdio.c1
-rw-r--r--drivers/net/netdevsim/hwstats.c6
-rw-r--r--drivers/net/netdevsim/netdev.c4
-rw-r--r--drivers/net/phy/aquantia_main.c53
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/meson-gxl.c8
-rw-r--r--drivers/net/phy/micrel.c26
-rw-r--r--drivers/net/phy/microchip_t1.c58
-rw-r--r--drivers/net/phy/phy-c45.c34
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/tap.c20
-rw-r--r--drivers/net/team/team.c24
-rw-r--r--drivers/net/usb/ax88179_178a.c26
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/usb/r8152.c30
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/virtio_net.c292
-rw-r--r--drivers/net/vxlan/vxlan_core.c2
-rw-r--r--drivers/net/wireguard/netlink.c13
-rw-r--r--drivers/net/wireguard/selftest/ratelimiter.c25
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h7
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h14
-rw-r--r--drivers/net/wireless/ath/trace.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h12
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c39
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c15
-rw-r--r--drivers/net/xen-netback/xenbus.c2
-rw-r--r--drivers/nfc/pn533/uart.c1
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c48
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c6
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c12
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.c2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h7
-rw-r--r--drivers/ntb/test/ntb_tool.c8
-rw-r--r--drivers/nvdimm/pmem.c17
-rw-r--r--drivers/nvdimm/region_devs.c28
-rw-r--r--drivers/nvdimm/virtio_pmem.c9
-rw-r--r--drivers/nvme/host/core.c3
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/pci.c81
-rw-r--r--drivers/nvme/target/rdma.c2
-rw-r--r--drivers/of/address.c17
-rw-r--r--drivers/of/fdt.c8
-rw-r--r--drivers/opp/core.c1571
-rw-r--r--drivers/opp/cpu.c12
-rw-r--r--drivers/opp/debugfs.c27
-rw-r--r--drivers/opp/of.c150
-rw-r--r--drivers/opp/opp.h56
-rw-r--r--drivers/opp/ti-opp-supply.c77
-rw-r--r--drivers/parisc/ccio-dma.c14
-rw-r--r--drivers/parisc/iosapic.c11
-rw-r--r--drivers/parisc/lba_pci.c6
-rw-r--r--drivers/parisc/led.c2
-rw-r--r--drivers/pci/Kconfig8
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c10
-rw-r--r--drivers/pci/doe.c536
-rw-r--r--drivers/pci/endpoint/functions/Kconfig12
-rw-r--r--drivers/pci/endpoint/functions/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c1442
-rw-r--r--drivers/pci/p2pdma.c93
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/peci/controller/peci-aspeed.c2
-rw-r--r--drivers/peci/cpu.c3
-rw-r--r--drivers/perf/arm-cmn.c2
-rw-r--r--drivers/perf/arm_pmu_platform.c2
-rw-r--r--drivers/perf/riscv_pmu.c1
-rw-r--r--drivers/perf/riscv_pmu_legacy.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c32
-rw-r--r--drivers/pinctrl/Kconfig2
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.h2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c21
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx93.c1
-rw-r--r--drivers/pinctrl/intel/Kconfig8
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c18
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c16
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c14
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h25
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c14
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorlake.c417
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8192.c296
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c10
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c242
-rw-r--r--drivers/pinctrl/pinctrl-amd.h1376
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c7
-rw-r--r--drivers/pinctrl/pinctrl-at91.c10
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c14
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c64
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c115
-rw-r--r--drivers/pinctrl/pinctrl-starfive.c5
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c11
-rw-r--r--drivers/pinctrl/qcom/Kconfig19
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.h1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c956
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c1544
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c3
-rw-r--r--drivers/pinctrl/renesas/Kconfig18
-rw-r--r--drivers/pinctrl/renesas/Makefile2
-rw-r--r--drivers/pinctrl/renesas/core.c6
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779f0.c2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c4262
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c2
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c1119
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h9
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h8
-rw-r--r--drivers/pinctrl/sunxi/Kconfig8
-rw-r--r--drivers/pinctrl/sunxi/Makefile1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c840
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c3
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c22
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c25
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c156
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h109
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c53
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c47
-rw-r--r--drivers/platform/x86/acer-wmi.c9
-rw-r--r--drivers/platform/x86/asus-wmi.c11
-rw-r--r--drivers/platform/x86/p2sb.c18
-rw-r--r--drivers/platform/x86/pmc_atom.c2
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c7
-rw-r--r--drivers/platform/x86/x86-android-tablets.c14
-rw-r--r--drivers/power/reset/Kconfig6
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-reset.c184
-rw-r--r--drivers/power/reset/pwr-mlxbf.c97
-rw-r--r--drivers/power/supply/ab8500-chargalg.h4
-rw-r--r--drivers/power/supply/ab8500_btemp.c1
-rw-r--r--drivers/power/supply/ab8500_chargalg.c70
-rw-r--r--drivers/power/supply/ab8500_charger.c48
-rw-r--r--drivers/power/supply/ab8500_fg.c3
-rw-r--r--drivers/power/supply/bq24257_charger.c2
-rw-r--r--drivers/power/supply/cros_peripheral_charger.c2
-rw-r--r--drivers/power/supply/goldfish_battery.c4
-rw-r--r--drivers/power/supply/lp8788-charger.c2
-rw-r--r--drivers/power/supply/max77976_charger.c4
-rw-r--r--drivers/power/supply/olpc_battery.c5
-rw-r--r--drivers/power/supply/pm2301_charger.h492
-rw-r--r--drivers/power/supply/power_supply_core.c4
-rw-r--r--drivers/regulator/core.c11
-rw-r--r--drivers/regulator/pfuze100-regulator.c2
-rw-r--r--drivers/remoteproc/imx_rproc.c7
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c3
-rw-r--r--drivers/remoteproc/mtk_scp.c23
-rw-r--r--drivers/remoteproc/omap_remoteproc.c6
-rw-r--r--drivers/remoteproc/pru_rproc.c1
-rw-r--r--drivers/remoteproc/qcom_common.c4
-rw-r--r--drivers/remoteproc/qcom_q6v5.c4
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c3
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c54
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c105
-rw-r--r--drivers/remoteproc/qcom_sysmon.c16
-rw-r--r--drivers/remoteproc/qcom_wcnss.c10
-rw-r--r--drivers/remoteproc/remoteproc_core.c28
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c12
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c2
-rw-r--r--drivers/reset/Kconfig13
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-tps380x.c126
-rw-r--r--drivers/rpmsg/mtk_rpmsg.c2
-rw-r--r--drivers/rpmsg/qcom_glink_native.c10
-rw-r--r--drivers/rpmsg/qcom_glink_ssr.c2
-rw-r--r--drivers/rpmsg/qcom_smd.c9
-rw-r--r--drivers/rpmsg/rpmsg_char.c7
-rw-r--r--drivers/rpmsg/rpmsg_core.c3
-rw-r--r--drivers/rpmsg/rpmsg_internal.h4
-rw-r--r--drivers/rtc/Kconfig41
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/class.c6
-rw-r--r--drivers/rtc/dev.c8
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c5
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c5
-rw-r--r--drivers/rtc/rtc-bq32k.c5
-rw-r--r--drivers/rtc/rtc-cmos.c3
-rw-r--r--drivers/rtc/rtc-core.h5
-rw-r--r--drivers/rtc/rtc-cros-ec.c4
-rw-r--r--drivers/rtc/rtc-ds1374.c5
-rw-r--r--drivers/rtc/rtc-ds1672.c5
-rw-r--r--drivers/rtc/rtc-ds3232.c5
-rw-r--r--drivers/rtc/rtc-em3027.c5
-rw-r--r--drivers/rtc/rtc-fm3130.c5
-rw-r--r--drivers/rtc/rtc-hym8563.c5
-rw-r--r--drivers/rtc/rtc-isl12022.c5
-rw-r--r--drivers/rtc/rtc-isl1208.c10
-rw-r--r--drivers/rtc/rtc-max6900.c5
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c8
-rw-r--r--drivers/rtc/rtc-mpfs.c323
-rw-r--r--drivers/rtc/rtc-nct3018y.c553
-rw-r--r--drivers/rtc/rtc-pcf8523.c5
-rw-r--r--drivers/rtc/rtc-pcf85363.c5
-rw-r--r--drivers/rtc/rtc-pcf8563.c5
-rw-r--r--drivers/rtc/rtc-pcf8583.c5
-rw-r--r--drivers/rtc/rtc-rv3029c2.c5
-rw-r--r--drivers/rtc/rtc-rv8803.c98
-rw-r--r--drivers/rtc/rtc-rx6110.c5
-rw-r--r--drivers/rtc/rtc-rx8025.c22
-rw-r--r--drivers/rtc/rtc-rx8581.c5
-rw-r--r--drivers/rtc/rtc-s35390a.c5
-rw-r--r--drivers/rtc/rtc-sd3078.c5
-rw-r--r--drivers/rtc/rtc-spear.c2
-rw-r--r--drivers/rtc/rtc-sun6i.c2
-rw-r--r--drivers/rtc/rtc-ti-k3.c680
-rw-r--r--drivers/rtc/rtc-vr41xx.c363
-rw-r--r--drivers/rtc/rtc-x1205.c5
-rw-r--r--drivers/rtc/rtc-zynqmp.c115
-rw-r--r--drivers/s390/char/Kconfig2
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/uvdevice.c5
-rw-r--r--drivers/s390/char/zcore.c55
-rw-r--r--drivers/s390/cio/vfio_ccw_async.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c205
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h12
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c58
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c99
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c114
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h13
-rw-r--r--drivers/s390/crypto/ap_bus.c34
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/ap_queue.c2
-rw-r--r--drivers/s390/crypto/pkey_api.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c124
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c1544
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h54
-rw-r--r--drivers/s390/net/qeth_core_main.c168
-rw-r--r--drivers/s390/net/qeth_ethtool.c12
-rw-r--r--drivers/s390/scsi/zfcp_fc.c29
-rw-r--r--drivers/s390/scsi/zfcp_fc.h6
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c4
-rw-r--r--drivers/s390/virtio/virtio_ccw.c3
-rw-r--r--drivers/scsi/FlashPoint.c4
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c1
-rw-r--r--drivers/scsi/hosts.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c4
-rw-r--r--drivers/scsi/mesh.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c14
-rw-r--r--drivers/scsi/scsi_lib.c57
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c1
-rw-r--r--drivers/scsi/scsi_transport_sas.c6
-rw-r--r--drivers/scsi/sd.c91
-rw-r--r--drivers/scsi/sd.h5
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c50
-rw-r--r--drivers/soc/fsl/Kconfig1
-rw-r--r--drivers/soc/imx/gpcv2.c5
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c1
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c3
-rw-r--r--drivers/soc/tegra/common.c49
-rw-r--r--drivers/soc/tegra/pmc.c4
-rw-r--r--drivers/soundwire/intel.c16
-rw-r--r--drivers/soundwire/qcom.c10
-rw-r--r--drivers/spi/spi-bitbang-txrx.h6
-rw-r--r--drivers/spi/spi-cadence-quadspi.c38
-rw-r--r--drivers/spi/spi-meson-spicc.c129
-rw-r--r--drivers/spi/spi-mpc52xx.c2
-rw-r--r--drivers/spi/spi-mux.c1
-rw-r--r--drivers/spi/spi.c19
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c1
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c36
-rw-r--r--drivers/target/target_core_alua.c3
-rw-r--r--drivers/target/target_core_device.c32
-rw-r--r--drivers/target/target_core_pr.c28
-rw-r--r--drivers/target/target_core_stat.c10
-rw-r--r--drivers/target/target_core_xcopy.c2
-rw-r--r--drivers/tee/tee_shm.c4
-rw-r--r--drivers/thermal/Kconfig4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c9
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c2
-rw-r--r--drivers/thermal/thermal_core.c1
-rw-r--r--drivers/thermal/thermal_sysfs.c10
-rw-r--r--drivers/thunderbolt/Kconfig3
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/icm.c1
-rw-r--r--drivers/thunderbolt/nhi.h1
-rw-r--r--drivers/thunderbolt/switch.c6
-rw-r--r--drivers/tty/amiserial.c20
-rw-r--r--drivers/tty/mips_ejtag_fdc.c4
-rw-r--r--drivers/tty/n_gsm.c786
-rw-r--r--drivers/tty/n_tty.c92
-rw-r--r--drivers/tty/serial/8250/8250.h24
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c7
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c24
-rw-r--r--drivers/tty/serial/8250/8250_core.c8
-rw-r--r--drivers/tty/serial/8250/8250_dw.c68
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c152
-rw-r--r--drivers/tty/serial/8250/8250_early.c4
-rw-r--r--drivers/tty/serial/8250/8250_exar.c25
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c31
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c4
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c2
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c28
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c2
-rw-r--r--drivers/tty/serial/8250/8250_of.c1
-rw-r--r--drivers/tty/serial/8250/8250_omap.c7
-rw-r--r--drivers/tty/serial/8250/8250_pci.c135
-rw-r--r--drivers/tty/serial/8250/8250_pericom.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c157
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/Kconfig18
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/amba-pl011.c15
-rw-r--r--drivers/tty/serial/ar933x_uart.c27
-rw-r--r--drivers/tty/serial/atmel_serial.c107
-rw-r--r--drivers/tty/serial/earlycon.c3
-rw-r--r--drivers/tty/serial/fsl_lpuart.c72
-rw-r--r--drivers/tty/serial/imx.c21
-rw-r--r--drivers/tty/serial/kgdboc.c2
-rw-r--r--drivers/tty/serial/max310x.c272
-rw-r--r--drivers/tty/serial/mcf.c10
-rw-r--r--drivers/tty/serial/meson_uart.c2
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c4
-rw-r--r--drivers/tty/serial/msm_serial.c550
-rw-r--r--drivers/tty/serial/mux.c6
-rw-r--r--drivers/tty/serial/mvebu-uart.c11
-rw-r--r--drivers/tty/serial/omap-serial.c18
-rw-r--r--drivers/tty/serial/owl-uart.c2
-rw-r--r--drivers/tty/serial/pch_uart.c7
-rw-r--r--drivers/tty/serial/pic32_uart.c4
-rw-r--r--drivers/tty/serial/pmac_zilog.c1
-rw-r--r--drivers/tty/serial/pxa.c5
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c91
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/samsung_tty.c90
-rw-r--r--drivers/tty/serial/sc16is7xx.c10
-rw-r--r--drivers/tty/serial/serial-tegra.c5
-rw-r--r--drivers/tty/serial/serial_core.c452
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c48
-rw-r--r--drivers/tty/serial/sifive.c10
-rw-r--r--drivers/tty/serial/st-asc.c1
-rw-r--r--drivers/tty/serial/stm32-usart.c79
-rw-r--r--drivers/tty/serial/stm32-usart.h68
-rw-r--r--drivers/tty/serial/sunsu.c4
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/vr41xx_siu.c934
-rw-r--r--drivers/tty/tty_buffer.c63
-rw-r--r--drivers/tty/tty_io.c2
-rw-r--r--drivers/tty/tty_ioctl.c4
-rw-r--r--drivers/tty/tty_port.c21
-rw-r--r--drivers/tty/vt/Makefile2
-rw-r--r--drivers/tty/vt/consolemap.c684
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped6
-rw-r--r--drivers/tty/vt/selection.c3
-rw-r--r--drivers/tty/vt/vt.c28
-rw-r--r--drivers/ufs/core/ufshcd.c16
-rw-r--r--drivers/ufs/host/ufs-exynos.c2
-rw-r--r--drivers/ufs/host/ufshcd-pci.c17
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c4
-rw-r--r--drivers/usb/chipidea/trace.h4
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/hcd.c17
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/dwc2/platform.c8
-rw-r--r--drivers/usb/dwc3/core.c37
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c96
-rw-r--r--drivers/usb/dwc3/gadget.c8
-rw-r--r--drivers/usb/dwc3/host.c11
-rw-r--r--drivers/usb/gadget/function/f_uac2.c16
-rw-r--r--drivers/usb/gadget/function/storage_common.c6
-rw-r--r--drivers/usb/gadget/udc/core.c26
-rw-r--r--drivers/usb/host/ohci-sa1111.c25
-rw-r--r--drivers/usb/host/xhci-hub.c13
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c15
-rw-r--r--drivers/usb/host/xhci-plat.c11
-rw-r--r--drivers/usb/host/xhci-trace.h4
-rw-r--r--drivers/usb/host/xhci.c19
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c5
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c2
-rw-r--r--drivers/usb/mtu3/mtu3_trace.h6
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/musb_trace.h4
-rw-r--r--drivers/usb/serial/ch341.c16
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/option.c21
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/Kconfig1
-rw-r--r--drivers/usb/typec/altmodes/displayport.c4
-rw-r--r--drivers/usb/typec/class.c1
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c9
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c53
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c14
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c144
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h11
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c173
-rw-r--r--drivers/vdpa/vdpa.c14
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c18
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h1
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c176
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c3
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c102
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h8
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c180
-rw-r--r--drivers/vfio/Makefile2
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_private.h2
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c11
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c14
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h4
-rw-r--r--drivers/vfio/pci/mlx5/main.c11
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c7
-rw-r--r--drivers/vfio/pci/vfio_pci_zdev.c8
-rw-r--r--drivers/vfio/platform/vfio_platform_private.h21
-rw-r--r--drivers/vfio/vfio.h17
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c14
-rw-r--r--drivers/vfio/vfio_iommu_type1.c209
-rw-r--r--drivers/vfio/vfio_main.c (renamed from drivers/vfio/vfio.c)192
-rw-r--r--drivers/vhost/scsi.c89
-rw-r--r--drivers/vhost/vdpa.c38
-rw-r--r--drivers/vhost/vringh.c78
-rw-r--r--drivers/video/backlight/lp855x_bl.c21
-rw-r--r--drivers/video/backlight/platform_lcd.c10
-rw-r--r--drivers/video/backlight/rt4831-backlight.c33
-rw-r--r--drivers/video/console/sticore.c2
-rw-r--r--drivers/video/console/vgacon.c12
-rw-r--r--drivers/video/fbdev/68328fb.c7
-rw-r--r--drivers/video/fbdev/amba-clcd.c24
-rw-r--r--drivers/video/fbdev/amifb.c15
-rw-r--r--drivers/video/fbdev/arkfb.c9
-rw-r--r--drivers/video/fbdev/atafb.c103
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c2
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c48
-rw-r--r--drivers/video/fbdev/bw2.c2
-rw-r--r--drivers/video/fbdev/chipsfb.c1
-rw-r--r--drivers/video/fbdev/cirrusfb.c4
-rw-r--r--drivers/video/fbdev/clps711x-fb.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c37
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c4
-rw-r--r--drivers/video/fbdev/cyber2000fb.c8
-rw-r--r--drivers/video/fbdev/dnfb.c2
-rw-r--r--drivers/video/fbdev/ffb.c2
-rw-r--r--drivers/video/fbdev/fm2fb.c4
-rw-r--r--drivers/video/fbdev/geode/gx1fb_core.c6
-rw-r--r--drivers/video/fbdev/gxt4500.c2
-rw-r--r--drivers/video/fbdev/hpfb.c4
-rw-r--r--drivers/video/fbdev/hyperv_fb.c4
-rw-r--r--drivers/video/fbdev/i740fb.c11
-rw-r--r--drivers/video/fbdev/imxfb.c136
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c6
-rw-r--r--drivers/video/fbdev/offb.c1
-rw-r--r--drivers/video/fbdev/omap/hwa742.c3
-rw-r--r--drivers/video/fbdev/omap/omapfb.h9
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c9
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/pm2fb.c5
-rw-r--r--drivers/video/fbdev/pxa168fb.c2
-rw-r--r--drivers/video/fbdev/pxafb.c2
-rw-r--r--drivers/video/fbdev/q40fb.c2
-rw-r--r--drivers/video/fbdev/s3fb.c4
-rw-r--r--drivers/video/fbdev/sa1100fb.c41
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/video/fbdev/sis/init.c4
-rw-r--r--drivers/video/fbdev/sis/sis_main.c278
-rw-r--r--drivers/video/fbdev/skeletonfb.c6
-rw-r--r--drivers/video/fbdev/sm501fb.c2
-rw-r--r--drivers/video/fbdev/ssd1307fb.c2
-rw-r--r--drivers/video/fbdev/sstfb.c2
-rw-r--r--drivers/video/fbdev/sunxvr1000.c2
-rw-r--r--drivers/video/fbdev/sunxvr2500.c2
-rw-r--r--drivers/video/fbdev/sunxvr500.c2
-rw-r--r--drivers/video/fbdev/tcx.c2
-rw-r--r--drivers/video/fbdev/tdfxfb.c4
-rw-r--r--drivers/video/fbdev/tgafb.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c2
-rw-r--r--drivers/video/fbdev/valkyriefb.c10
-rw-r--r--drivers/video/fbdev/vt8623fb.c2
-rw-r--r--drivers/virt/nitro_enclaves/Kconfig2
-rw-r--r--drivers/virtio/Kconfig11
-rw-r--r--drivers/virtio/virtio.c4
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_mem.c6
-rw-r--r--drivers/virtio/virtio_mmio.c5
-rw-r--r--drivers/virtio/virtio_pci_common.c12
-rw-r--r--drivers/virtio/virtio_pci_legacy.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c136
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c39
-rw-r--r--drivers/virtio/virtio_ring.c778
-rw-r--r--drivers/virtio/virtio_vdpa.c2
-rw-r--r--drivers/watchdog/Kconfig8
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c2
-rw-r--r--drivers/watchdog/bcm7038_wdt.c8
-rw-r--r--drivers/watchdog/booke_wdt.c2
-rw-r--r--drivers/watchdog/dw_wdt.c8
-rw-r--r--drivers/watchdog/f71808e_wdt.c4
-rw-r--r--drivers/watchdog/max77620_wdt.c4
-rw-r--r--drivers/watchdog/mtk_wdt.c10
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/pm8916_wdt.c41
-rw-r--r--drivers/watchdog/pseries-wdt.c239
-rw-r--r--drivers/watchdog/realtek_otto_wdt.c1
-rw-r--r--drivers/watchdog/s3c2410_wdt.c9
-rw-r--r--drivers/watchdog/sama5d4_wdt.c8
-rw-r--r--drivers/watchdog/sp5100_tco.c1
-rw-r--r--drivers/watchdog/sp805_wdt.c5
-rw-r--r--drivers/watchdog/st_lpc_wdt.c9
-rw-r--r--drivers/watchdog/tegra_wdt.c14
-rw-r--r--drivers/watchdog/wdat_wdt.c7
-rw-r--r--drivers/xen/events/events_base.c53
-rw-r--r--drivers/xen/grant-table.c3
-rw-r--r--drivers/xen/privcmd.c21
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c2
-rw-r--r--drivers/xen/xen-scsiback.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c9
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c2
1379 files changed, 46535 insertions, 17915 deletions
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index fdc6b593f500..c4d54a5326b1 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -131,7 +131,7 @@ static void vc_refresh(struct vc_data *vc)
for (i = 0; i < WIDTH; i++) {
u16 glyph = screen_glyph(vc,
2 * (vc_x + i) + vc_y * vc->vc_size_row);
- buf[i] = inverse_translate(vc, glyph, 1);
+ buf[i] = inverse_translate(vc, glyph, true);
}
braille_write(buf);
}
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index d726537fa16c..f52265293482 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -470,7 +470,7 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
c |= 0x100;
}
- ch = inverse_translate(vc, c, 1);
+ ch = inverse_translate(vc, c, true);
*attribs = (w & 0xff00) >> 8;
}
return ch;
diff --git a/drivers/accessibility/speakup/serialio.h b/drivers/accessibility/speakup/serialio.h
index 6f8f86f161bb..b4f9a1925b81 100644
--- a/drivers/accessibility/speakup/serialio.h
+++ b/drivers/accessibility/speakup/serialio.h
@@ -33,9 +33,8 @@ struct old_serial_port {
#define NUM_DISABLE_TIMEOUTS 3
/* buffer timeout in ms */
#define SPK_TIMEOUT 100
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
#define spk_serial_tx_busy() \
- ((inb(speakup_info.port_tts + UART_LSR) & BOTH_EMPTY) != BOTH_EMPTY)
+ (!uart_lsr_tx_empty(inb(speakup_info.port_tts + UART_LSR)))
#endif
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index f2f8f05662de..ca2aed86b540 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -788,6 +788,294 @@ void acpi_configure_pmsi_domain(struct device *dev)
}
#ifdef CONFIG_IOMMU_API
+static void iort_rmr_free(struct device *dev,
+ struct iommu_resv_region *region)
+{
+ struct iommu_iort_rmr_data *rmr_data;
+
+ rmr_data = container_of(region, struct iommu_iort_rmr_data, rr);
+ kfree(rmr_data->sids);
+ kfree(rmr_data);
+}
+
+static struct iommu_iort_rmr_data *iort_rmr_alloc(
+ struct acpi_iort_rmr_desc *rmr_desc,
+ int prot, enum iommu_resv_type type,
+ u32 *sids, u32 num_sids)
+{
+ struct iommu_iort_rmr_data *rmr_data;
+ struct iommu_resv_region *region;
+ u32 *sids_copy;
+ u64 addr = rmr_desc->base_address, size = rmr_desc->length;
+
+ rmr_data = kmalloc(sizeof(*rmr_data), GFP_KERNEL);
+ if (!rmr_data)
+ return NULL;
+
+ /* Create a copy of SIDs array to associate with this rmr_data */
+ sids_copy = kmemdup(sids, num_sids * sizeof(*sids), GFP_KERNEL);
+ if (!sids_copy) {
+ kfree(rmr_data);
+ return NULL;
+ }
+ rmr_data->sids = sids_copy;
+ rmr_data->num_sids = num_sids;
+
+ if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) {
+ /* PAGE align base addr and size */
+ addr &= PAGE_MASK;
+ size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address));
+
+ pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n",
+ rmr_desc->base_address,
+ rmr_desc->base_address + rmr_desc->length - 1,
+ addr, addr + size - 1);
+ }
+
+ region = &rmr_data->rr;
+ INIT_LIST_HEAD(&region->list);
+ region->start = addr;
+ region->length = size;
+ region->prot = prot;
+ region->type = type;
+ region->free = iort_rmr_free;
+
+ return rmr_data;
+}
+
+static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc,
+ u32 count)
+{
+ int i, j;
+
+ for (i = 0; i < count; i++) {
+ u64 end, start = desc[i].base_address, length = desc[i].length;
+
+ if (!length) {
+ pr_err(FW_BUG "RMR descriptor[0x%llx] with zero length, continue anyway\n",
+ start);
+ continue;
+ }
+
+ end = start + length - 1;
+
+ /* Check for address overlap */
+ for (j = i + 1; j < count; j++) {
+ u64 e_start = desc[j].base_address;
+ u64 e_end = e_start + desc[j].length - 1;
+
+ if (start <= e_end && end >= e_start)
+ pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n",
+ start, end);
+ }
+ }
+}
+
+/*
+ * Please note, we will keep the already allocated RMR reserve
+ * regions in case of a memory allocation failure.
+ */
+static void iort_get_rmrs(struct acpi_iort_node *node,
+ struct acpi_iort_node *smmu,
+ u32 *sids, u32 num_sids,
+ struct list_head *head)
+{
+ struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data;
+ struct acpi_iort_rmr_desc *rmr_desc;
+ int i;
+
+ rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node,
+ rmr->rmr_offset);
+
+ iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count);
+
+ for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) {
+ struct iommu_iort_rmr_data *rmr_data;
+ enum iommu_resv_type type;
+ int prot = IOMMU_READ | IOMMU_WRITE;
+
+ if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED)
+ type = IOMMU_RESV_DIRECT_RELAXABLE;
+ else
+ type = IOMMU_RESV_DIRECT;
+
+ if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE)
+ prot |= IOMMU_PRIV;
+
+ /* Attributes 0x00 - 0x03 represents device memory */
+ if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <=
+ ACPI_IORT_RMR_ATTR_DEVICE_GRE)
+ prot |= IOMMU_MMIO;
+ else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) ==
+ ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB)
+ prot |= IOMMU_CACHE;
+
+ rmr_data = iort_rmr_alloc(rmr_desc, prot, type,
+ sids, num_sids);
+ if (!rmr_data)
+ return;
+
+ list_add_tail(&rmr_data->rr.list, head);
+ }
+}
+
+static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
+ u32 new_count)
+{
+ u32 *new_sids;
+ u32 total_count = count + new_count;
+ int i;
+
+ new_sids = krealloc_array(sids, count + new_count,
+ sizeof(*new_sids), GFP_KERNEL);
+ if (!new_sids)
+ return NULL;
+
+ for (i = count; i < total_count; i++)
+ new_sids[i] = id_start++;
+
+ return new_sids;
+}
+
+static bool iort_rmr_has_dev(struct device *dev, u32 id_start,
+ u32 id_count)
+{
+ int i;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ /*
+ * Make sure the kernel has preserved the boot firmware PCIe
+ * configuration. This is required to ensure that the RMR PCIe
+ * StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
+ */
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
+
+ if (!host->preserve_config)
+ return false;
+ }
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ if (fwspec->ids[i] >= id_start &&
+ fwspec->ids[i] <= id_start + id_count)
+ return true;
+ }
+
+ return false;
+}
+
+static void iort_node_get_rmr_info(struct acpi_iort_node *node,
+ struct acpi_iort_node *iommu,
+ struct device *dev, struct list_head *head)
+{
+ struct acpi_iort_node *smmu = NULL;
+ struct acpi_iort_rmr *rmr;
+ struct acpi_iort_id_mapping *map;
+ u32 *sids = NULL;
+ u32 num_sids = 0;
+ int i;
+
+ if (!node->mapping_offset || !node->mapping_count) {
+ pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n",
+ node);
+ return;
+ }
+
+ rmr = (struct acpi_iort_rmr *)node->node_data;
+ if (!rmr->rmr_offset || !rmr->rmr_count)
+ return;
+
+ map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
+ node->mapping_offset);
+
+ /*
+ * Go through the ID mappings and see if we have a match for SMMU
+ * and dev(if !NULL). If found, get the sids for the Node.
+ * Please note, id_count is equal to the number of IDs in the
+ * range minus one.
+ */
+ for (i = 0; i < node->mapping_count; i++, map++) {
+ struct acpi_iort_node *parent;
+
+ if (!map->id_count)
+ continue;
+
+ parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
+ map->output_reference);
+ if (parent != iommu)
+ continue;
+
+ /* If dev is valid, check RMR node corresponds to the dev SID */
+ if (dev && !iort_rmr_has_dev(dev, map->output_base,
+ map->id_count))
+ continue;
+
+ /* Retrieve SIDs associated with the Node. */
+ sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base,
+ map->id_count + 1);
+ if (!sids)
+ return;
+
+ num_sids += map->id_count + 1;
+ }
+
+ if (!sids)
+ return;
+
+ iort_get_rmrs(node, smmu, sids, num_sids, head);
+ kfree(sids);
+}
+
+static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev,
+ struct list_head *head)
+{
+ struct acpi_table_iort *iort;
+ struct acpi_iort_node *iort_node, *iort_end;
+ int i;
+
+ /* Only supports ARM DEN 0049E.d onwards */
+ if (iort_table->revision < 5)
+ return;
+
+ iort = (struct acpi_table_iort *)iort_table;
+
+ iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
+ iort->node_offset);
+ iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
+ iort_table->length);
+
+ for (i = 0; i < iort->node_count; i++) {
+ if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
+ "IORT node pointer overflows, bad table!\n"))
+ return;
+
+ if (iort_node->type == ACPI_IORT_NODE_RMR)
+ iort_node_get_rmr_info(iort_node, iommu, dev, head);
+
+ iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
+ iort_node->length);
+ }
+}
+
+/*
+ * Populate the RMR list associated with a given IOMMU and dev(if provided).
+ * If dev is NULL, the function populates all the RMRs associated with the
+ * given IOMMU.
+ */
+static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode,
+ struct device *dev,
+ struct list_head *head)
+{
+ struct acpi_iort_node *iommu;
+
+ iommu = iort_get_iort_node(iommu_fwnode);
+ if (!iommu)
+ return;
+
+ iort_find_rmrs(iommu, dev, head);
+}
+
static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
{
struct acpi_iort_node *iommu;
@@ -806,27 +1094,22 @@ static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
return NULL;
}
-/**
- * iort_iommu_msi_get_resv_regions - Reserved region driver helper
- * @dev: Device from iommu_get_resv_regions()
- * @head: Reserved region list from iommu_get_resv_regions()
- *
- * Returns: Number of msi reserved regions on success (0 if platform
- * doesn't require the reservation or no associated msi regions),
- * appropriate error value otherwise. The ITS interrupt translation
- * spaces (ITS_base + SZ_64K, SZ_64K) associated with the device
- * are the msi reserved regions.
+/*
+ * Retrieve platform specific HW MSI reserve regions.
+ * The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K)
+ * associated with the device are the HW MSI reserved regions.
*/
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
+static void iort_iommu_msi_get_resv_regions(struct device *dev,
+ struct list_head *head)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct acpi_iort_its_group *its;
struct acpi_iort_node *iommu_node, *its_node = NULL;
- int i, resv = 0;
+ int i;
iommu_node = iort_get_msi_resv_iommu(dev);
if (!iommu_node)
- return 0;
+ return;
/*
* Current logic to reserve ITS regions relies on HW topologies
@@ -846,7 +1129,7 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
}
if (!its_node)
- return 0;
+ return;
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)its_node->node_data;
@@ -860,15 +1143,52 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
prot, IOMMU_RESV_MSI);
- if (region) {
+ if (region)
list_add_tail(&region->list, head);
- resv++;
- }
}
}
+}
+
+/**
+ * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
+ * @dev: Device from iommu_get_resv_regions()
+ * @head: Reserved region list from iommu_get_resv_regions()
+ */
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ iort_iommu_msi_get_resv_regions(dev, head);
+ iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head);
+}
+
+/**
+ * iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
+ * associated StreamIDs information.
+ * @iommu_fwnode: fwnode associated with IOMMU
+ * @head: Resereved region list
+ */
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head)
+{
+ iort_iommu_rmr_get_resv_regions(iommu_fwnode, NULL, head);
+}
+EXPORT_SYMBOL_GPL(iort_get_rmr_sids);
+
+/**
+ * iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
+ * @iommu_fwnode: fwnode associated with IOMMU
+ * @head: Resereved region list
+ */
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head)
+{
+ struct iommu_resv_region *entry, *next;
- return (resv == its->its_count) ? resv : -ENODEV;
+ list_for_each_entry_safe(entry, next, head, list)
+ entry->free(NULL, entry);
}
+EXPORT_SYMBOL_GPL(iort_put_rmr_sids);
static inline bool iort_iommu_driver_enabled(u8 type)
{
@@ -1034,8 +1354,8 @@ int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
}
#else
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
-{ return 0; }
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
+{ }
int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
{ return -ENODEV; }
#endif
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index db6ac540e924..e534fd49a67e 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -151,7 +151,7 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
unsigned int cpu;
for_each_cpu(cpu, policy->related_cpus) {
- struct acpi_processor *pr = per_cpu(processors, policy->cpu);
+ struct acpi_processor *pr = per_cpu(processors, cpu);
if (pr)
freq_qos_remove_request(&pr->thermal_req);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index e764f9ac9cf8..d4c168ce428c 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -55,14 +55,19 @@ static const guid_t ads_guid =
GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
+static const guid_t buffer_prop_guid =
+ GUID_INIT(0xedb12dd0, 0x363d, 0x4085,
+ 0xa3, 0xd2, 0x49, 0x52, 0x2c, 0xa1, 0x60, 0xc4);
+
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
- const union acpi_object *desc,
+ union acpi_object *desc,
struct acpi_device_data *data,
struct fwnode_handle *parent);
-static bool acpi_extract_properties(const union acpi_object *desc,
+static bool acpi_extract_properties(acpi_handle handle,
+ union acpi_object *desc,
struct acpi_device_data *data);
-static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
+static bool acpi_nondev_subnode_extract(union acpi_object *desc,
acpi_handle handle,
const union acpi_object *link,
struct list_head *list,
@@ -81,7 +86,7 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
- result = acpi_extract_properties(desc, &dn->data);
+ result = acpi_extract_properties(handle, desc, &dn->data);
if (handle) {
acpi_handle scope;
@@ -155,16 +160,16 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
return acpi_nondev_subnode_data_ok(handle, link, list, parent);
}
-static int acpi_add_nondev_subnodes(acpi_handle scope,
- const union acpi_object *links,
- struct list_head *list,
- struct fwnode_handle *parent)
+static bool acpi_add_nondev_subnodes(acpi_handle scope,
+ union acpi_object *links,
+ struct list_head *list,
+ struct fwnode_handle *parent)
{
bool ret = false;
int i;
for (i = 0; i < links->package.count; i++) {
- const union acpi_object *link, *desc;
+ union acpi_object *link, *desc;
acpi_handle handle;
bool result;
@@ -204,7 +209,7 @@ static int acpi_add_nondev_subnodes(acpi_handle scope,
}
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
- const union acpi_object *desc,
+ union acpi_object *desc,
struct acpi_device_data *data,
struct fwnode_handle *parent)
{
@@ -212,7 +217,8 @@ static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
/* Look for the ACPI data subnodes GUID. */
for (i = 0; i < desc->package.count; i += 2) {
- const union acpi_object *guid, *links;
+ const union acpi_object *guid;
+ union acpi_object *links;
guid = &desc->package.elements[i];
links = &desc->package.elements[i + 1];
@@ -325,7 +331,7 @@ static bool acpi_is_property_guid(const guid_t *guid)
struct acpi_device_properties *
acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
- const union acpi_object *properties)
+ union acpi_object *properties)
{
struct acpi_device_properties *props;
@@ -340,7 +346,141 @@ acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
return props;
}
-static bool acpi_extract_properties(const union acpi_object *desc,
+static void acpi_nondev_subnode_tag(acpi_handle handle, void *context)
+{
+}
+
+static void acpi_untie_nondev_subnodes(struct acpi_device_data *data)
+{
+ struct acpi_data_node *dn;
+
+ list_for_each_entry(dn, &data->subnodes, sibling) {
+ acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
+
+ acpi_untie_nondev_subnodes(&dn->data);
+ }
+}
+
+static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
+{
+ struct acpi_data_node *dn;
+
+ list_for_each_entry(dn, &data->subnodes, sibling) {
+ acpi_status status;
+ bool ret;
+
+ status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
+ if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
+ acpi_handle_err(dn->handle, "Can't tag data node\n");
+ return false;
+ }
+
+ ret = acpi_tie_nondev_subnodes(&dn->data);
+ if (!ret)
+ return ret;
+ }
+
+ return true;
+}
+
+static void acpi_data_add_buffer_props(acpi_handle handle,
+ struct acpi_device_data *data,
+ union acpi_object *properties)
+{
+ struct acpi_device_properties *props;
+ union acpi_object *package;
+ size_t alloc_size;
+ unsigned int i;
+ u32 *count;
+
+ if (check_mul_overflow((size_t)properties->package.count,
+ sizeof(*package) + sizeof(void *),
+ &alloc_size) ||
+ check_add_overflow(sizeof(*props) + sizeof(*package), alloc_size,
+ &alloc_size)) {
+ acpi_handle_warn(handle,
+ "can't allocate memory for %u buffer props",
+ properties->package.count);
+ return;
+ }
+
+ props = kvzalloc(alloc_size, GFP_KERNEL);
+ if (!props)
+ return;
+
+ props->guid = &buffer_prop_guid;
+ props->bufs = (void *)(props + 1);
+ props->properties = (void *)(props->bufs + properties->package.count);
+
+ /* Outer package */
+ package = props->properties;
+ package->type = ACPI_TYPE_PACKAGE;
+ package->package.elements = package + 1;
+ count = &package->package.count;
+ *count = 0;
+
+ /* Inner packages */
+ package++;
+
+ for (i = 0; i < properties->package.count; i++) {
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ union acpi_object *property = &properties->package.elements[i];
+ union acpi_object *prop, *obj, *buf_obj;
+ acpi_status status;
+
+ if (property->type != ACPI_TYPE_PACKAGE ||
+ property->package.count != 2) {
+ acpi_handle_warn(handle,
+ "buffer property %u has %u entries\n",
+ i, property->package.count);
+ continue;
+ }
+
+ prop = &property->package.elements[0];
+ obj = &property->package.elements[1];
+
+ if (prop->type != ACPI_TYPE_STRING ||
+ obj->type != ACPI_TYPE_STRING) {
+ acpi_handle_warn(handle,
+ "wrong object types %u and %u\n",
+ prop->type, obj->type);
+ continue;
+ }
+
+ status = acpi_evaluate_object_typed(handle, obj->string.pointer,
+ NULL, &buf,
+ ACPI_TYPE_BUFFER);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_warn(handle,
+ "can't evaluate \"%*pE\" as buffer\n",
+ obj->string.length,
+ obj->string.pointer);
+ continue;
+ }
+
+ package->type = ACPI_TYPE_PACKAGE;
+ package->package.elements = prop;
+ package->package.count = 2;
+
+ buf_obj = buf.pointer;
+
+ /* Replace the string object with a buffer object */
+ obj->type = ACPI_TYPE_BUFFER;
+ obj->buffer.length = buf_obj->buffer.length;
+ obj->buffer.pointer = buf_obj->buffer.pointer;
+
+ props->bufs[i] = buf.pointer;
+ package++;
+ (*count)++;
+ }
+
+ if (*count)
+ list_add(&props->list, &data->properties);
+ else
+ kvfree(props);
+}
+
+static bool acpi_extract_properties(acpi_handle scope, union acpi_object *desc,
struct acpi_device_data *data)
{
int i;
@@ -350,7 +490,8 @@ static bool acpi_extract_properties(const union acpi_object *desc,
/* Look for the device properties GUID. */
for (i = 0; i < desc->package.count; i += 2) {
- const union acpi_object *guid, *properties;
+ const union acpi_object *guid;
+ union acpi_object *properties;
guid = &desc->package.elements[i];
properties = &desc->package.elements[i + 1];
@@ -364,6 +505,12 @@ static bool acpi_extract_properties(const union acpi_object *desc,
properties->type != ACPI_TYPE_PACKAGE)
break;
+ if (guid_equal((guid_t *)guid->buffer.pointer,
+ &buffer_prop_guid)) {
+ acpi_data_add_buffer_props(scope, data, properties);
+ continue;
+ }
+
if (!acpi_is_property_guid((guid_t *)guid->buffer.pointer))
continue;
@@ -410,7 +557,7 @@ void acpi_init_properties(struct acpi_device *adev)
if (ACPI_FAILURE(status))
goto out;
- if (acpi_extract_properties(buf.pointer, &adev->data)) {
+ if (acpi_extract_properties(adev->handle, buf.pointer, &adev->data)) {
adev->data.pointer = buf.pointer;
if (acpi_of)
acpi_init_of_compatible(adev);
@@ -422,6 +569,9 @@ void acpi_init_properties(struct acpi_device *adev)
if (!adev->data.pointer) {
acpi_handle_debug(adev->handle, "Invalid _DSD data, skipping\n");
ACPI_FREE(buf.pointer);
+ } else {
+ if (!acpi_tie_nondev_subnodes(&adev->data))
+ acpi_untie_nondev_subnodes(&adev->data);
}
out:
@@ -438,8 +588,14 @@ static void acpi_free_device_properties(struct list_head *list)
struct acpi_device_properties *props, *tmp;
list_for_each_entry_safe(props, tmp, list, list) {
+ u32 i;
+
list_del(&props->list);
- kfree(props);
+ /* Buffer data properties were separately allocated */
+ if (props->bufs)
+ for (i = 0; i < props->properties->package.count; i++)
+ ACPI_FREE(props->bufs[i]);
+ kvfree(props);
}
}
@@ -462,6 +618,7 @@ static void acpi_destroy_nondev_subnodes(struct list_head *list)
void acpi_free_properties(struct acpi_device *adev)
{
+ acpi_untie_nondev_subnodes(&adev->data);
acpi_destroy_nondev_subnodes(&adev->data.subnodes);
ACPI_FREE((void *)adev->data.pointer);
adev->data.of_compatible = NULL;
@@ -633,6 +790,58 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
return NULL;
}
+static int acpi_get_ref_args(struct fwnode_reference_args *args,
+ struct fwnode_handle *ref_fwnode,
+ const union acpi_object **element,
+ const union acpi_object *end, size_t num_args)
+{
+ u32 nargs = 0, i;
+
+ /*
+ * Find the referred data extension node under the
+ * referred device node.
+ */
+ for (; *element < end && (*element)->type == ACPI_TYPE_STRING;
+ (*element)++) {
+ const char *child_name = (*element)->string.pointer;
+
+ ref_fwnode = acpi_fwnode_get_named_child_node(ref_fwnode, child_name);
+ if (!ref_fwnode)
+ return -EINVAL;
+ }
+
+ /*
+ * Assume the following integer elements are all args. Stop counting on
+ * the first reference or end of the package arguments. In case of
+ * neither reference, nor integer, return an error, we can't parse it.
+ */
+ for (i = 0; (*element) + i < end && i < num_args; i++) {
+ acpi_object_type type = (*element)[i].type;
+
+ if (type == ACPI_TYPE_LOCAL_REFERENCE)
+ break;
+
+ if (type == ACPI_TYPE_INTEGER)
+ nargs++;
+ else
+ return -EINVAL;
+ }
+
+ if (nargs > NR_FWNODE_REFERENCE_ARGS)
+ return -EINVAL;
+
+ if (args) {
+ args->fwnode = ref_fwnode;
+ args->nargs = nargs;
+ for (i = 0; i < nargs; i++)
+ args->args[i] = (*element)[i].integer.value;
+ }
+
+ (*element) += nargs;
+
+ return 0;
+}
+
/**
* __acpi_node_get_property_reference - returns handle to the referenced object
* @fwnode: Firmware node to get the property from
@@ -686,11 +895,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
if (ret)
return ret == -EINVAL ? -ENOENT : -EINVAL;
- /*
- * The simplest case is when the value is a single reference. Just
- * return that reference then.
- */
- if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) {
+ switch (obj->type) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ /* Plain single reference without arguments. */
if (index)
return -ENOENT;
@@ -701,19 +908,21 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0;
return 0;
+ case ACPI_TYPE_PACKAGE:
+ /*
+ * If it is not a single reference, then it is a package of
+ * references followed by number of ints as follows:
+ *
+ * Package () { REF, INT, REF, INT, INT }
+ *
+ * The index argument is then used to determine which reference
+ * the caller wants (along with the arguments).
+ */
+ break;
+ default:
+ return -EINVAL;
}
- /*
- * If it is not a single reference, then it is a package of
- * references followed by number of ints as follows:
- *
- * Package () { REF, INT, REF, INT, INT }
- *
- * The index argument is then used to determine which reference
- * the caller wants (along with the arguments).
- */
- if (obj->type != ACPI_TYPE_PACKAGE)
- return -EINVAL;
if (index >= obj->package.count)
return -ENOENT;
@@ -721,66 +930,30 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
end = element + obj->package.count;
while (element < end) {
- u32 nargs, i;
-
- if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
- struct fwnode_handle *ref_fwnode;
-
+ switch (element->type) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
device = acpi_fetch_acpi_dev(element->reference.handle);
if (!device)
return -EINVAL;
- nargs = 0;
element++;
- /*
- * Find the referred data extension node under the
- * referred device node.
- */
- for (ref_fwnode = acpi_fwnode_handle(device);
- element < end && element->type == ACPI_TYPE_STRING;
- element++) {
- ref_fwnode = acpi_fwnode_get_named_child_node(
- ref_fwnode, element->string.pointer);
- if (!ref_fwnode)
- return -EINVAL;
- }
-
- /*
- * Assume the following integer elements are all args.
- * Stop counting on the first reference or end of the
- * package arguments. In case of neither reference,
- * nor integer, return an error, we can't parse it.
- */
- for (i = 0; element + i < end && i < num_args; i++) {
- int type = element[i].type;
-
- if (type == ACPI_TYPE_LOCAL_REFERENCE)
- break;
- if (type == ACPI_TYPE_INTEGER)
- nargs++;
- else
- return -EINVAL;
- }
-
- if (nargs > NR_FWNODE_REFERENCE_ARGS)
- return -EINVAL;
-
- if (idx == index) {
- args->fwnode = ref_fwnode;
- args->nargs = nargs;
- for (i = 0; i < nargs; i++)
- args->args[i] = element[i].integer.value;
+ ret = acpi_get_ref_args(idx == index ? args : NULL,
+ acpi_fwnode_handle(device),
+ &element, end, num_args);
+ if (ret < 0)
+ return ret;
+ if (idx == index)
return 0;
- }
- element += nargs;
- } else if (element->type == ACPI_TYPE_INTEGER) {
+ break;
+ case ACPI_TYPE_INTEGER:
if (idx == index)
return -ENOENT;
element++;
- } else {
+ break;
+ default:
return -EINVAL;
}
@@ -852,67 +1025,36 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
return ret;
}
-static int acpi_copy_property_array_u8(const union acpi_object *items, u8 *val,
- size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
- if (items[i].integer.value > U8_MAX)
- return -EOVERFLOW;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
-
-static int acpi_copy_property_array_u16(const union acpi_object *items,
- u16 *val, size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
- if (items[i].integer.value > U16_MAX)
- return -EOVERFLOW;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
-
-static int acpi_copy_property_array_u32(const union acpi_object *items,
- u32 *val, size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
- if (items[i].integer.value > U32_MAX)
- return -EOVERFLOW;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
-
-static int acpi_copy_property_array_u64(const union acpi_object *items,
- u64 *val, size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
+#define acpi_copy_property_array_uint(items, val, nval) \
+ ({ \
+ typeof(items) __items = items; \
+ typeof(val) __val = val; \
+ typeof(nval) __nval = nval; \
+ size_t i; \
+ int ret = 0; \
+ \
+ for (i = 0; i < __nval; i++) { \
+ if (__items->type == ACPI_TYPE_BUFFER) { \
+ __val[i] = __items->buffer.pointer[i]; \
+ continue; \
+ } \
+ if (__items[i].type != ACPI_TYPE_INTEGER) { \
+ ret = -EPROTO; \
+ break; \
+ } \
+ if (__items[i].integer.value > _Generic(__val, \
+ u8 *: U8_MAX, \
+ u16 *: U16_MAX, \
+ u32 *: U32_MAX, \
+ u64 *: U64_MAX)) { \
+ ret = -EOVERFLOW; \
+ break; \
+ } \
+ \
+ __val[i] = __items[i].integer.value; \
+ } \
+ ret; \
+ })
static int acpi_copy_property_array_string(const union acpi_object *items,
char **val, size_t nval)
@@ -954,31 +1096,54 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
}
ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj);
+ if (ret && proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64)
+ ret = acpi_data_get_property(data, propname, ACPI_TYPE_BUFFER,
+ &obj);
if (ret)
return ret;
- if (!val)
+ if (!val) {
+ if (obj->type == ACPI_TYPE_BUFFER)
+ return obj->buffer.length;
+
return obj->package.count;
+ }
- if (proptype != DEV_PROP_STRING && nval > obj->package.count)
- return -EOVERFLOW;
+ switch (proptype) {
+ case DEV_PROP_STRING:
+ break;
+ case DEV_PROP_U8 ... DEV_PROP_U64:
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ if (nval > obj->buffer.length)
+ return -EOVERFLOW;
+ break;
+ }
+ fallthrough;
+ default:
+ if (nval > obj->package.count)
+ return -EOVERFLOW;
+ break;
+ }
if (nval == 0)
return -EINVAL;
- items = obj->package.elements;
+ if (obj->type != ACPI_TYPE_BUFFER)
+ items = obj->package.elements;
+ else
+ items = obj;
switch (proptype) {
case DEV_PROP_U8:
- ret = acpi_copy_property_array_u8(items, (u8 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u8 *)val, nval);
break;
case DEV_PROP_U16:
- ret = acpi_copy_property_array_u16(items, (u16 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u16 *)val, nval);
break;
case DEV_PROP_U32:
- ret = acpi_copy_property_array_u32(items, (u32 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u32 *)val, nval);
break;
case DEV_PROP_U64:
- ret = acpi_copy_property_array_u64(items, (u64 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
ret = acpi_copy_property_array_string(
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index b100e6ca9bb4..42cec8120f18 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1722,6 +1722,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{"INT3515", },
/* Non-conforming _HID for Cirrus Logic already released */
{"CLSA0100", },
+ {"CLSA0101", },
/*
* Some ACPI devs contain SerialBus resources even though they are not
* attached to a serial bus at all.
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 3a9773a09e19..5a7b8065e77f 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -291,6 +291,44 @@ int acpi_get_local_address(acpi_handle handle, u32 *addr)
}
EXPORT_SYMBOL(acpi_get_local_address);
+#define ACPI_MAX_SUB_BUF_SIZE 9
+
+const char *acpi_get_subsystem_id(acpi_handle handle)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ const char *sub;
+ size_t len;
+
+ status = acpi_evaluate_object(handle, METHOD_NAME__SUB, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(handle, "Reading ACPI _SUB failed: %#x\n", status);
+ return ERR_PTR(-ENODATA);
+ }
+
+ obj = buffer.pointer;
+ if (obj->type == ACPI_TYPE_STRING) {
+ len = strlen(obj->string.pointer);
+ if (len < ACPI_MAX_SUB_BUF_SIZE && len > 0) {
+ sub = kstrdup(obj->string.pointer, GFP_KERNEL);
+ if (!sub)
+ sub = ERR_PTR(-ENOMEM);
+ } else {
+ acpi_handle_err(handle, "ACPI _SUB Length %zu is Invalid\n", len);
+ sub = ERR_PTR(-ENODATA);
+ }
+ } else {
+ acpi_handle_warn(handle, "Warning ACPI _SUB did not return a string\n");
+ sub = ERR_PTR(-ENODATA);
+ }
+
+ acpi_os_free(buffer.pointer);
+
+ return sub;
+}
+EXPORT_SYMBOL_GPL(acpi_get_subsystem_id);
+
acpi_status
acpi_evaluate_reference(acpi_handle handle,
acpi_string pathname,
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
index 647f11cf165d..6132092dab2a 100644
--- a/drivers/acpi/viot.c
+++ b/drivers/acpi/viot.c
@@ -88,7 +88,7 @@ static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu,
return -ENODEV;
}
- fwnode = pdev->dev.fwnode;
+ fwnode = dev_fwnode(&pdev->dev);
if (!fwnode) {
/*
* PCI devices aren't necessarily described by ACPI. Create a
@@ -101,7 +101,7 @@ static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu,
}
set_primary_fwnode(&pdev->dev, fwnode);
}
- viommu->fwnode = pdev->dev.fwnode;
+ viommu->fwnode = dev_fwnode(&pdev->dev);
pci_dev_put(pdev);
return 0;
}
@@ -314,7 +314,7 @@ static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu,
return -ENODEV;
/* We're not translating ourself */
- if (viommu->fwnode == dev->fwnode)
+ if (device_match_fwnode(dev, viommu->fwnode))
return -EINVAL;
ops = iommu_ops_from_fwnode(viommu->fwnode);
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 32b0e0b930c1..110a535648d2 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -209,6 +209,7 @@ static int amba_match(struct device *dev, struct device_driver *drv)
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *pcdrv = to_amba_driver(drv);
+ mutex_lock(&pcdev->periphid_lock);
if (!pcdev->periphid) {
int ret = amba_read_periphid(pcdev);
@@ -218,11 +219,14 @@ static int amba_match(struct device *dev, struct device_driver *drv)
* permanent failure in reading pid and cid, simply map it to
* -EPROBE_DEFER.
*/
- if (ret)
+ if (ret) {
+ mutex_unlock(&pcdev->periphid_lock);
return -EPROBE_DEFER;
+ }
dev_set_uevent_suppress(dev, false);
kobject_uevent(&dev->kobj, KOBJ_ADD);
}
+ mutex_unlock(&pcdev->periphid_lock);
/* When driver_override is set, only bind to the matching driver */
if (pcdev->driver_override)
@@ -532,6 +536,7 @@ static void amba_device_release(struct device *dev)
if (d->res.parent)
release_resource(&d->res);
+ mutex_destroy(&d->periphid_lock);
kfree(d);
}
@@ -584,6 +589,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name)
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->res.name = dev_name(&dev->dev);
+ mutex_init(&dev->periphid_lock);
}
/**
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index c964d7c8c384..6428f6be69e3 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1385,6 +1385,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
}
ret = binder_inc_ref_olocked(ref, strong, target_list);
*rdata = ref->data;
+ if (ret && ref == new_ref) {
+ /*
+ * Cleanup the failed reference here as the target
+ * could now be dead and have already released its
+ * references by now. Calling on the new reference
+ * with strong=0 and a tmp_refs will not decrement
+ * the node. The new_ref gets kfree'd below.
+ */
+ binder_cleanup_ref_olocked(new_ref);
+ ref = NULL;
+ }
+
binder_proc_unlock(proc);
if (new_ref && ref != new_ref)
/*
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 5649a0371a1f..9b1778c00610 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -213,7 +213,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
if (mm) {
mmap_read_lock(mm);
- vma = alloc->vma;
+ vma = vma_lookup(mm, alloc->vma_addr);
}
if (!vma && need_mm) {
@@ -313,16 +313,21 @@ err_no_vma:
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
- if (vma)
- alloc->vma_vm_mm = vma->vm_mm;
+ unsigned long vm_start = 0;
+
/*
- * If we see alloc->vma is not NULL, buffer data structures set up
- * completely. Look at smp_rmb side binder_alloc_get_vma.
- * We also want to guarantee new alloc->vma_vm_mm is always visible
- * if alloc->vma is set.
+ * Allow clearing the vma with holding just the read lock to allow
+ * munmapping downgrade of the write lock before freeing and closing the
+ * file using binder_alloc_vma_close().
*/
- smp_wmb();
- alloc->vma = vma;
+ if (vma) {
+ vm_start = vma->vm_start;
+ mmap_assert_write_locked(alloc->vma_vm_mm);
+ } else {
+ mmap_assert_locked(alloc->vma_vm_mm);
+ }
+
+ alloc->vma_addr = vm_start;
}
static inline struct vm_area_struct *binder_alloc_get_vma(
@@ -330,11 +335,9 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
{
struct vm_area_struct *vma = NULL;
- if (alloc->vma) {
- /* Look at description in binder_alloc_set_vma */
- smp_rmb();
- vma = alloc->vma;
- }
+ if (alloc->vma_addr)
+ vma = vma_lookup(alloc->vma_vm_mm, alloc->vma_addr);
+
return vma;
}
@@ -398,12 +401,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
+ mmap_read_lock(alloc->vma_vm_mm);
if (!binder_alloc_get_vma(alloc)) {
+ mmap_read_unlock(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
+ mmap_read_unlock(alloc->vma_vm_mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
@@ -788,7 +794,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
binder_alloc_set_vma(alloc, vma);
- mmgrab(alloc->vma_vm_mm);
return 0;
@@ -817,7 +822,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers = 0;
mutex_lock(&alloc->mutex);
- BUG_ON(alloc->vma);
+ BUG_ON(alloc->vma_addr &&
+ vma_lookup(alloc->vma_vm_mm, alloc->vma_addr));
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -924,17 +930,25 @@ void binder_alloc_print_pages(struct seq_file *m,
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
- free++;
- else if (list_empty(&page->lru))
- active++;
- else
- lru++;
- }
+
+ mmap_read_lock(alloc->vma_vm_mm);
+ if (binder_alloc_get_vma(alloc) == NULL) {
+ mmap_read_unlock(alloc->vma_vm_mm);
+ goto uninitialized;
}
+
+ mmap_read_unlock(alloc->vma_vm_mm);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
+
+uninitialized:
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
@@ -1075,6 +1089,8 @@ static struct shrinker binder_shrinker = {
void binder_alloc_init(struct binder_alloc *alloc)
{
alloc->pid = current->group_leader->pid;
+ alloc->vma_vm_mm = current->mm;
+ mmgrab(alloc->vma_vm_mm);
mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
}
@@ -1084,7 +1100,7 @@ int binder_alloc_shrinker_init(void)
int ret = list_lru_init(&binder_alloc_lru);
if (ret == 0) {
- ret = register_shrinker(&binder_shrinker);
+ ret = register_shrinker(&binder_shrinker, "android-binder");
if (ret)
list_lru_destroy(&binder_alloc_lru);
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 7dea57a84c79..1e4fd37af5e0 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -100,7 +100,7 @@ struct binder_lru_page {
*/
struct binder_alloc {
struct mutex mutex;
- struct vm_area_struct *vma;
+ unsigned long vma_addr;
struct mm_struct *vma_vm_mm;
void __user *buffer;
struct list_head buffers;
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index c2b323bc3b3a..43a881073a42 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
if (!binder_selftest_run)
return;
mutex_lock(&binder_selftest_lock);
- if (!binder_selftest_run || !alloc->vma)
+ if (!binder_selftest_run || !alloc->vma_addr)
goto done;
pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index ef4508d72c02..7c128c89b454 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2122,6 +2122,7 @@ const char *ata_get_cmd_name(u8 command)
{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
{ ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
{ ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
+ { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
{ ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
{ ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
{ ATA_CMD_PIO_READ, "READ SECTOR(S)" },
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 9b999c0e8c37..29e2f55c6faa 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1060,6 +1060,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
dev->flags |= ATA_DFLAG_NO_UNLOAD;
/* configure max sectors */
+ dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors);
blk_queue_max_hw_sectors(q, dev->max_sectors);
if (dev->class == ATA_DEV_ATAPI) {
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 03b6ae37a578..6559b606736d 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -683,7 +683,7 @@ static int mpc52xx_ata_probe(struct platform_device *op)
struct bcom_task *dmatsk;
/* Get ipb frequency */
- ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ ipb_freq = mpc5xxx_get_bus_frequency(&op->dev);
if (!ipb_freq) {
dev_err(&op->dev, "could not determine IPB bus frequency\n");
return -ENODEV;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 81ce81a75fc6..681cb3786794 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3752,6 +3752,7 @@ static void __exit idt77252_exit(void)
card = idt77252_chain;
dev = card->atmdev;
idt77252_chain = card->next;
+ del_timer_sync(&card->tst_timer);
if (dev->phy->stop)
dev->phy->stop(dev);
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 0424b59b695e..46cbe4471e78 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -724,7 +724,7 @@ const struct cpumask *cpu_clustergroup_mask(int cpu)
*/
if (cpumask_subset(cpu_coregroup_mask(cpu),
&cpu_topology[cpu].cluster_sibling))
- return get_cpu_mask(cpu);
+ return topology_sibling_cpumask(cpu);
return &cpu_topology[cpu].cluster_sibling;
}
@@ -735,7 +735,7 @@ void update_siblings_masks(unsigned int cpuid)
int cpu, ret;
ret = detect_cache_attributes(cpuid);
- if (ret)
+ if (ret && ret != -ENOENT)
pr_info("Early cacheinfo failed, ret = %d\n", ret);
/* update core and thread sibling masks */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 753e7cca0f40..5fb4bc51dd8b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1625,7 +1625,7 @@ static int __init fw_devlink_setup(char *arg)
}
early_param("fw_devlink", fw_devlink_setup);
-static bool fw_devlink_strict = true;
+static bool fw_devlink_strict;
static int __init fw_devlink_strict_setup(char *arg)
{
return strtobool(arg, &fw_devlink_strict);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 70f79fc71539..ec69b43f926a 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -274,12 +274,42 @@ static int __init deferred_probe_timeout_setup(char *str)
}
__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
+/**
+ * driver_deferred_probe_check_state() - Check deferred probe state
+ * @dev: device to check
+ *
+ * Return:
+ * * -ENODEV if initcalls have completed and modules are disabled.
+ * * -ETIMEDOUT if the deferred probe timeout was set and has expired
+ * and modules are enabled.
+ * * -EPROBE_DEFER in other cases.
+ *
+ * Drivers or subsystems can opt-in to calling this function instead of directly
+ * returning -EPROBE_DEFER.
+ */
+int driver_deferred_probe_check_state(struct device *dev)
+{
+ if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
+ dev_warn(dev, "ignoring dependency for device, assuming no driver\n");
+ return -ENODEV;
+ }
+
+ if (!driver_deferred_probe_timeout && initcalls_done) {
+ dev_warn(dev, "deferred probe timeout, ignoring dependency\n");
+ return -ETIMEDOUT;
+ }
+
+ return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
+
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
struct device_private *p;
fw_devlink_drivers_done();
+ driver_deferred_probe_timeout = 0;
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
@@ -881,6 +911,11 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
dev_dbg(dev, "Device match requests probe deferral\n");
dev->can_match = true;
driver_deferred_probe_add(dev);
+ /*
+ * Device can't match with a driver right now, so don't attempt
+ * to match or bind with other drivers on the bus.
+ */
+ return ret;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
@@ -1120,6 +1155,11 @@ static int __driver_attach(struct device *dev, void *data)
dev_dbg(dev, "Device match requests probe deferral\n");
dev->can_match = true;
driver_deferred_probe_add(dev);
+ /*
+ * Driver could not match with device, but may match with
+ * another device on the bus.
+ */
+ return 0;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 15a75afe6b84..676b6275d5b5 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -63,6 +63,12 @@ int driver_set_override(struct device *dev, const char **override,
if (len >= (PAGE_SIZE - 1))
return -EINVAL;
+ /*
+ * Compute the real length of the string in case userspace sends us a
+ * bunch of \0 characters like python likes to do.
+ */
+ len = strlen(s);
+
if (!len) {
/* Empty string passed - clear override */
device_lock(dev);
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index 77bad32c481a..5b66b3d1fa16 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -93,10 +93,9 @@ static void fw_dev_release(struct device *dev)
{
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- if (fw_sysfs->fw_upload_priv) {
- free_fw_priv(fw_sysfs->fw_priv);
- kfree(fw_sysfs->fw_upload_priv);
- }
+ if (fw_sysfs->fw_upload_priv)
+ fw_upload_free(fw_sysfs);
+
kfree(fw_sysfs);
}
diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h
index 5d8ff1675c79..df1d5add698f 100644
--- a/drivers/base/firmware_loader/sysfs.h
+++ b/drivers/base/firmware_loader/sysfs.h
@@ -106,12 +106,17 @@ extern struct device_attribute dev_attr_cancel;
extern struct device_attribute dev_attr_remaining_size;
int fw_upload_start(struct fw_sysfs *fw_sysfs);
+void fw_upload_free(struct fw_sysfs *fw_sysfs);
umode_t fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n);
#else
static inline int fw_upload_start(struct fw_sysfs *fw_sysfs)
{
return 0;
}
+
+static inline void fw_upload_free(struct fw_sysfs *fw_sysfs)
+{
+}
#endif
#endif /* __FIRMWARE_SYSFS_H */
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
index 87044d52322a..a0af8f5f13d8 100644
--- a/drivers/base/firmware_loader/sysfs_upload.c
+++ b/drivers/base/firmware_loader/sysfs_upload.c
@@ -264,6 +264,15 @@ int fw_upload_start(struct fw_sysfs *fw_sysfs)
return 0;
}
+void fw_upload_free(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+
+ free_fw_priv(fw_sysfs->fw_priv);
+ kfree(fw_upload_priv->fw_upload);
+ kfree(fw_upload_priv);
+}
+
/**
* firmware_upload_register() - register for the firmware upload sysfs API
* @module: kernel module of this device
@@ -377,6 +386,7 @@ void firmware_upload_unregister(struct fw_upload *fw_upload)
{
struct fw_sysfs *fw_sysfs = fw_upload->priv;
struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+ struct module *module = fw_upload_priv->module;
mutex_lock(&fw_upload_priv->lock);
if (fw_upload_priv->progress == FW_UPLOAD_PROG_IDLE) {
@@ -392,6 +402,6 @@ void firmware_upload_unregister(struct fw_upload *fw_upload)
unregister:
device_unregister(&fw_sysfs->dev);
- module_put(fw_upload_priv->module);
+ module_put(module);
}
EXPORT_SYMBOL_GPL(firmware_upload_unregister);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 5a2e0232862e..55a10e6d4e2a 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2733,7 +2733,7 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
mutex_unlock(&gpd_list_lock);
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
- return -ENODEV;
+ return driver_deferred_probe_check_state(base_dev);
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 719323bc6c7f..37ab23a9d034 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -113,6 +113,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
const struct regmap_config *config)
{
size_t max_size = spi_max_transfer_size(spi);
+ size_t max_msg_size, reg_reserve_size;
struct regmap_bus *bus;
if (max_size != SIZE_MAX) {
@@ -120,9 +121,16 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
if (!bus)
return ERR_PTR(-ENOMEM);
+ max_msg_size = spi_max_message_size(spi);
+ reg_reserve_size = config->reg_bits / BITS_PER_BYTE
+ + config->pad_bits / BITS_PER_BYTE;
+ if (max_size + reg_reserve_size > max_msg_size)
+ max_size -= reg_reserve_size;
+
bus->free_on_exit = true;
bus->max_raw_read = max_size;
bus->max_raw_write = max_size;
+
return bus;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 0d8ec2fe5740..f9e39301c4af 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1297,7 +1297,7 @@ static void rbd_osd_submit(struct ceph_osd_request *osd_req)
dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
__func__, osd_req, obj_req, obj_req->ex.oe_objno,
obj_req->ex.oe_off, obj_req->ex.oe_len);
- ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
+ ceph_osdc_start_request(osd_req->r_osdc, osd_req);
}
/*
@@ -2081,7 +2081,7 @@ static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
if (ret)
return ret;
- ceph_osdc_start_request(osdc, req, false);
+ ceph_osdc_start_request(osdc, req);
return 0;
}
@@ -4768,7 +4768,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
if (ret)
goto out_req;
- ceph_osdc_start_request(osdc, req, false);
+ ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
if (ret >= 0)
ceph_copy_from_page_vector(pages, buf, 0, ret);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index d7d72e8f6e55..30255fcaf181 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -101,6 +101,14 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
}
}
+static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
+{
+ struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
+
+ return vq;
+}
+
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
{
struct scatterlist hdr, status, *sgs[3];
@@ -416,7 +424,7 @@ static void virtio_queue_rqs(struct request **rqlist)
struct request *requeue_list = NULL;
rq_list_for_each_safe(rqlist, req, next) {
- struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+ struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
bool kick;
if (!virtblk_prep_rq_batch(req)) {
@@ -837,7 +845,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
- struct virtio_blk_vq *vq = hctx->driver_data;
+ struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
struct virtblk_req *vbr;
unsigned long flags;
unsigned int len;
@@ -862,22 +870,10 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
return found;
}
-static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int hctx_idx)
-{
- struct virtio_blk *vblk = data;
- struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
-
- WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
- hctx->driver_data = vq;
- return 0;
-}
-
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.queue_rqs = virtio_queue_rqs,
.commit_rqs = virtio_commit_rqs,
- .init_hctx = virtblk_init_hctx,
.complete = virtblk_request_done,
.map_queues = virtblk_map_queues,
.poll = virtblk_poll,
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index bda5c815e441..a28473470e66 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -226,6 +226,9 @@ struct xen_vbd {
sector_t size;
unsigned int flush_support:1;
unsigned int discard_secure:1;
+ /* Connect-time cached feature_persistent parameter value */
+ unsigned int feature_gnt_persistent_parm:1;
+ /* Persistent grants feature negotiation result */
unsigned int feature_gnt_persistent:1;
unsigned int overflow_max_grants:1;
};
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 97de13b14175..c0227dfa4688 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -157,6 +157,11 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
return 0;
}
+/* Enable the persistent grants feature. */
+static bool feature_persistent = true;
+module_param(feature_persistent, bool, 0644);
+MODULE_PARM_DESC(feature_persistent, "Enables the persistent grants feature");
+
static struct xen_blkif *xen_blkif_alloc(domid_t domid)
{
struct xen_blkif *blkif;
@@ -472,12 +477,6 @@ static void xen_vbd_free(struct xen_vbd *vbd)
vbd->bdev = NULL;
}
-/* Enable the persistent grants feature. */
-static bool feature_persistent = true;
-module_param(feature_persistent, bool, 0644);
-MODULE_PARM_DESC(feature_persistent,
- "Enables the persistent grants feature");
-
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
unsigned major, unsigned minor, int readonly,
int cdrom)
@@ -520,8 +519,6 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
if (bdev_max_secure_erase_sectors(bdev))
vbd->discard_secure = true;
- vbd->feature_gnt_persistent = feature_persistent;
-
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
@@ -910,7 +907,7 @@ again:
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
- be->blkif->vbd.feature_gnt_persistent);
+ be->blkif->vbd.feature_gnt_persistent_parm);
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
dev->nodename);
@@ -1087,10 +1084,11 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -ENOSYS;
}
- if (blkif->vbd.feature_gnt_persistent)
- blkif->vbd.feature_gnt_persistent =
- xenbus_read_unsigned(dev->otherend,
- "feature-persistent", 0);
+
+ blkif->vbd.feature_gnt_persistent_parm = feature_persistent;
+ blkif->vbd.feature_gnt_persistent =
+ blkif->vbd.feature_gnt_persistent_parm &&
+ xenbus_read_unsigned(dev->otherend, "feature-persistent", 0);
blkif->vbd.overflow_max_grants = 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index dc48298225a6..35b9bcad9db9 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -213,6 +213,9 @@ struct blkfront_info
unsigned int feature_fua:1;
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
+ /* Connect-time cached feature_persistent parameter */
+ unsigned int feature_persistent_parm:1;
+ /* Persistent grants feature negotiation result */
unsigned int feature_persistent:1;
unsigned int bounce:1;
unsigned int discard_granularity;
@@ -1756,6 +1759,12 @@ abort_transaction:
return err;
}
+/* Enable the persistent grants feature. */
+static bool feature_persistent = true;
+module_param(feature_persistent, bool, 0644);
+MODULE_PARM_DESC(feature_persistent,
+ "Enables the persistent grants feature");
+
/* Common code used when first setting up, and when resuming. */
static int talk_to_blkback(struct xenbus_device *dev,
struct blkfront_info *info)
@@ -1847,8 +1856,9 @@ again:
message = "writing protocol";
goto abort_transaction;
}
+ info->feature_persistent_parm = feature_persistent;
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
- info->feature_persistent);
+ info->feature_persistent_parm);
if (err)
dev_warn(&dev->dev,
"writing persistent grants feature to xenbus");
@@ -1916,12 +1926,6 @@ static int negotiate_mq(struct blkfront_info *info)
return 0;
}
-/* Enable the persistent grants feature. */
-static bool feature_persistent = true;
-module_param(feature_persistent, bool, 0644);
-MODULE_PARM_DESC(feature_persistent,
- "Enables the persistent grants feature");
-
/*
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffer for communication with the backend, and
@@ -1988,8 +1992,6 @@ static int blkfront_probe(struct xenbus_device *dev,
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
- info->feature_persistent = feature_persistent;
-
/* Front end dir is a number, which is used as the id. */
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
dev_set_drvdata(&dev->dev, info);
@@ -2283,7 +2285,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
- if (info->feature_persistent)
+ if (info->feature_persistent_parm)
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 052aa3f65514..0916de952e09 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -63,12 +63,6 @@ static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
bool zcomp_available_algorithm(const char *comp)
{
- int i;
-
- i = sysfs_match_string(backends, comp);
- if (i >= 0)
- return true;
-
/*
* Crypto does not ignore a trailing new line symbol,
* so make sure you don't supply a string containing
@@ -217,6 +211,11 @@ struct zcomp *zcomp_create(const char *compress)
struct zcomp *comp;
int error;
+ /*
+ * Crypto API will execute /sbin/modprobe if the compression module
+ * is not loaded yet. We must do it here, otherwise we are about to
+ * call /sbin/modprobe under CPU hot-plug lock.
+ */
if (!zcomp_available_algorithm(compress))
return ERR_PTR(-EINVAL);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 4abeb261b833..226ea76cc819 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -52,7 +52,9 @@ static unsigned int num_devices = 1;
static size_t huge_class_size;
static const struct block_device_operations zram_devops;
+#ifdef CONFIG_ZRAM_WRITEBACK
static const struct block_device_operations zram_wb_devops;
+#endif
static void zram_free_page(struct zram *zram, size_t index);
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
@@ -1144,14 +1146,15 @@ static ssize_t bd_stat_show(struct device *dev,
static ssize_t debug_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int version = 2;
+ int version = 1;
struct zram *zram = dev_to_zram(dev);
ssize_t ret;
down_read(&zram->init_lock);
ret = scnprintf(buf, PAGE_SIZE,
- "version: %d\n%8llu\n",
+ "version: %d\n%8llu %8llu\n",
version,
+ (u64)atomic64_read(&zram->stats.writestall),
(u64)atomic64_read(&zram->stats.miss_free));
up_read(&zram->init_lock);
@@ -1349,7 +1352,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
{
int ret = 0;
unsigned long alloced_pages;
- unsigned long handle = 0;
+ unsigned long handle = -ENOMEM;
unsigned int comp_len = 0;
void *src, *dst, *mem;
struct zcomp_strm *zstrm;
@@ -1367,6 +1370,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
}
kunmap_atomic(mem);
+compress_again:
zstrm = zcomp_stream_get(zram->comp);
src = kmap_atomic(page);
ret = zcomp_compress(zstrm, src, &comp_len);
@@ -1375,21 +1379,40 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
if (unlikely(ret)) {
zcomp_stream_put(zram->comp);
pr_err("Compression failed! err=%d\n", ret);
+ zs_free(zram->mem_pool, handle);
return ret;
}
if (comp_len >= huge_class_size)
comp_len = PAGE_SIZE;
-
- handle = zs_malloc(zram->mem_pool, comp_len,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
-
- if (unlikely(!handle)) {
+ /*
+ * handle allocation has 2 paths:
+ * a) fast path is executed with preemption disabled (for
+ * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
+ * since we can't sleep;
+ * b) slow path enables preemption and attempts to allocate
+ * the page with __GFP_DIRECT_RECLAIM bit set. we have to
+ * put per-cpu compression stream and, thus, to re-do
+ * the compression once handle is allocated.
+ *
+ * if we have a 'non-null' handle here then we are coming
+ * from the slow path and handle has already been allocated.
+ */
+ if (IS_ERR((void *)handle))
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ __GFP_KSWAPD_RECLAIM |
+ __GFP_NOWARN |
+ __GFP_HIGHMEM |
+ __GFP_MOVABLE);
+ if (IS_ERR((void *)handle)) {
zcomp_stream_put(zram->comp);
- return -ENOMEM;
+ atomic64_inc(&zram->stats.writestall);
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ GFP_NOIO | __GFP_HIGHMEM |
+ __GFP_MOVABLE);
+ if (!IS_ERR((void *)handle))
+ goto compress_again;
+ return PTR_ERR((void *)handle);
}
alloced_pages = zs_get_total_pages(zram->mem_pool);
@@ -1946,6 +1969,7 @@ static int zram_add(void)
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret)
goto out_cleanup_disk;
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 158c91e54850..80c3b43b4828 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -81,6 +81,7 @@ struct zram_stats {
atomic64_t huge_pages_since; /* no. of huge pages since zram set up */
atomic64_t pages_stored; /* no. of pages currently stored */
atomic_long_t max_used_pages; /* no. of maximum pages stored */
+ atomic64_t writestall; /* no. of write slow paths */
atomic64_t miss_free; /* no. of missed free */
#ifdef CONFIG_ZRAM_WRITEBACK
atomic64_t bd_count; /* no. of pages in backing device */
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index f3aef77a6a4a..df0fbfee7b78 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -430,12 +430,25 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
{
struct mhi_event *mhi_event = dev;
struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
- struct mhi_event_ctxt *er_ctxt =
- &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_event_ctxt *er_ctxt;
struct mhi_ring *ev_ring = &mhi_event->ring;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
+ dma_addr_t ptr;
void *dev_rp;
+ /*
+ * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
+ * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
+ * before handling the IRQs.
+ */
+ if (!mhi_cntrl->mhi_ctxt) {
+ dev_dbg(&mhi_cntrl->mhi_dev->dev,
+ "mhi_ctxt has been freed\n");
+ return IRQ_HANDLED;
+ }
+
+ er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ ptr = le64_to_cpu(er_ctxt->rp);
+
if (!is_valid_ring_ptr(ev_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Event ring rp points outside of the event ring\n");
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index fe7e2105e766..bf6716ff863b 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -20,7 +20,7 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
-#include <linux/intel-iommu.h>
+#include <linux/iommu.h>
#include <linux/delay.h>
#include <asm/smp.h>
#include "agp.h"
@@ -573,18 +573,15 @@ static void intel_gtt_cleanup(void)
*/
static inline int needs_ilk_vtd_wa(void)
{
-#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
- /* Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
+ /*
+ * Query iommu subsystem to see if we need the workaround. Presumably
+ * that was loaded first.
*/
- if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
- gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
- intel_iommu_gfx_mapped)
- return 1;
-#endif
- return 0;
+ return ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
+ gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ device_iommu_mapped(&intel_private.pcidev->dev));
}
static bool intel_gtt_can_wc(void)
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
index 8da1d7917bdc..429e956f34e1 100644
--- a/drivers/char/hw_random/powernv-rng.c
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -23,7 +23,7 @@ static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait
buf = (unsigned long *)data;
for (i = 0; i < len; i++)
- powernv_get_random_long(buf++);
+ pnv_get_random_long(buf++);
return len * sizeof(unsigned long);
}
diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c
index 488808dc17a2..795853dfc46b 100644
--- a/drivers/char/hw_random/s390-trng.c
+++ b/drivers/char/hw_random/s390-trng.c
@@ -252,5 +252,5 @@ static void __exit trng_exit(void)
trng_debug_exit();
}
-module_cpu_feature_match(MSA, trng_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, trng_init);
module_exit(trng_exit);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 84ca98ed1dad..32a932a065a6 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -480,6 +480,11 @@ static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
}
+static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ return 0;
+}
+
static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
{
size_t written = 0;
@@ -663,6 +668,7 @@ static const struct file_operations null_fops = {
.read_iter = read_iter_null,
.write_iter = write_iter_null,
.splice_write = splice_write_null,
+ .uring_cmd = uring_cmd_null,
};
static const struct file_operations __maybe_unused port_fops = {
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 4a5516406c22..927088b2c3d3 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -74,6 +74,18 @@ config TCG_TIS_SPI_CR50
If you have a H1 secure module running Cr50 firmware on SPI bus,
say Yes and it will be accessible from within Linux.
+config TCG_TIS_I2C
+ tristate "TPM Interface Specification 1.3 Interface / TPM 2.0 FIFO Interface - (I2C - generic)"
+ depends on I2C
+ select CRC_CCITT
+ select TCG_TIS_CORE
+ help
+ If you have a TPM security chip, compliant with the TCG TPM PTP
+ (I2C interface) specification and connected to an I2C bus master,
+ say Yes and it will be accessible from within Linux.
+ To compile this driver as a module, choose M here;
+ the module will be called tpm_tis_i2c.
+
config TCG_TIS_SYNQUACER
tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface (MMIO - SynQuacer)"
depends on ARCH_SYNQUACER || COMPILE_TEST
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 66d39ea6bd10..0222b1ddb310 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -29,6 +29,7 @@ tpm_tis_spi-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
obj-$(CONFIG_TCG_TIS_I2C_CR50) += tpm_tis_i2c_cr50.o
+obj-$(CONFIG_TCG_TIS_I2C) += tpm_tis_i2c.o
obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 2163c6ee0d36..24ee4e1cc452 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -55,6 +55,7 @@ enum tpm_addr {
#define TPM_WARN_DOING_SELFTEST 0x802
#define TPM_ERR_DEACTIVATED 0x6
#define TPM_ERR_DISABLED 0x7
+#define TPM_ERR_FAILEDSELFTEST 0x1C
#define TPM_ERR_INVALID_POSTINIT 38
#define TPM_TAG_RQU_COMMAND 193
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index f7dc986fa4a0..cf64c7385105 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -709,7 +709,12 @@ int tpm1_auto_startup(struct tpm_chip *chip)
if (rc)
goto out;
rc = tpm1_do_selftest(chip);
- if (rc) {
+ if (rc == TPM_ERR_FAILEDSELFTEST) {
+ dev_warn(&chip->dev, "TPM self test failed, switching to the firmware upgrade mode\n");
+ /* A TPM in this state possibly allows or needs a firmware upgrade */
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ return 0;
+ } else if (rc) {
dev_err(&chip->dev, "TPM self test failed\n");
goto out;
}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index c1eb5d223839..65d03867e114 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -752,6 +752,12 @@ int tpm2_auto_startup(struct tpm_chip *chip)
}
rc = tpm2_get_cc_attrs_tbl(chip);
+ if (rc == TPM2_RC_FAILURE || (rc < 0 && rc != -ENOMEM)) {
+ dev_info(&chip->dev,
+ "TPM in field failure mode, requires firmware upgrade\n");
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ rc = 0;
+ }
out:
/*
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index dc56b976d816..757623bacfd5 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -289,6 +289,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
int size = 0;
int status;
u32 expected;
+ int rc;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
@@ -328,6 +329,13 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
+ rc = tpm_tis_verify_crc(priv, (size_t)size, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for response.\n");
+ size = rc;
+ goto out;
+ }
+
out:
tpm_tis_ready(chip);
return size;
@@ -443,6 +451,12 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
if (rc < 0)
return rc;
+ rc = tpm_tis_verify_crc(priv, len, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for command.\n");
+ return rc;
+ }
+
/* go and do it */
rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO);
if (rc < 0)
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 6c203f36b8a1..66a5a13cd1df 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -121,6 +121,8 @@ struct tpm_tis_phy_ops {
u8 *result, enum tpm_tis_io_mode mode);
int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
const u8 *value, enum tpm_tis_io_mode mode);
+ int (*verify_crc)(struct tpm_tis_data *data, size_t len,
+ const u8 *value);
};
static inline int tpm_tis_read_bytes(struct tpm_tis_data *data, u32 addr,
@@ -188,6 +190,14 @@ static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
return rc;
}
+static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len,
+ const u8 *value)
+{
+ if (!data->phy_ops->verify_crc)
+ return 0;
+ return data->phy_ops->verify_crc(data, len, value);
+}
+
static inline bool is_bsw(void)
{
#ifdef CONFIG_X86
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
new file mode 100644
index 000000000000..ba0911b1d1ff
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2021 Nuvoton Technology corporation
+ * Copyright (C) 2019-2022 Infineon Technologies AG
+ *
+ * This device driver implements the TPM interface as defined in the TCG PC
+ * Client Platform TPM Profile (PTP) Specification for TPM 2.0 v1.04
+ * Revision 14.
+ *
+ * It is based on the tpm_tis_spi device driver.
+ */
+
+#include <linux/i2c.h>
+#include <linux/crc-ccitt.h>
+#include "tpm_tis_core.h"
+
+/* TPM registers */
+#define TPM_I2C_LOC_SEL 0x00
+#define TPM_I2C_ACCESS 0x04
+#define TPM_I2C_INTERFACE_CAPABILITY 0x30
+#define TPM_I2C_DEVICE_ADDRESS 0x38
+#define TPM_I2C_DATA_CSUM_ENABLE 0x40
+#define TPM_DATA_CSUM 0x44
+#define TPM_I2C_DID_VID 0x48
+#define TPM_I2C_RID 0x4C
+
+/* TIS-compatible register address to avoid clash with TPM_ACCESS (0x00) */
+#define TPM_LOC_SEL 0x0FFF
+
+/* Mask to extract the I2C register from TIS register addresses */
+#define TPM_TIS_REGISTER_MASK 0x0FFF
+
+/* Default Guard Time of 250µs until interface capability register is read */
+#define GUARD_TIME_DEFAULT_MIN 250
+#define GUARD_TIME_DEFAULT_MAX 300
+
+/* Guard Time of 250µs after I2C slave NACK */
+#define GUARD_TIME_ERR_MIN 250
+#define GUARD_TIME_ERR_MAX 300
+
+/* Guard Time bit masks; SR is repeated start, RW is read then write, etc. */
+#define TPM_GUARD_TIME_SR_MASK 0x40000000
+#define TPM_GUARD_TIME_RR_MASK 0x00100000
+#define TPM_GUARD_TIME_RW_MASK 0x00080000
+#define TPM_GUARD_TIME_WR_MASK 0x00040000
+#define TPM_GUARD_TIME_WW_MASK 0x00020000
+#define TPM_GUARD_TIME_MIN_MASK 0x0001FE00
+#define TPM_GUARD_TIME_MIN_SHIFT 9
+
+/* Masks with bits that must be read zero */
+#define TPM_ACCESS_READ_ZERO 0x48
+#define TPM_INT_ENABLE_ZERO 0x7FFFFF6
+#define TPM_STS_READ_ZERO 0x23
+#define TPM_INTF_CAPABILITY_ZERO 0x0FFFF000
+#define TPM_I2C_INTERFACE_CAPABILITY_ZERO 0x80000000
+
+struct tpm_tis_i2c_phy {
+ struct tpm_tis_data priv;
+ struct i2c_client *i2c_client;
+ bool guard_time_read;
+ bool guard_time_write;
+ u16 guard_time_min;
+ u16 guard_time_max;
+ u8 *io_buf;
+};
+
+static inline struct tpm_tis_i2c_phy *
+to_tpm_tis_i2c_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_i2c_phy, priv);
+}
+
+/*
+ * tpm_tis_core uses the register addresses as defined in Table 19 "Allocation
+ * of Register Space for FIFO TPM Access" of the TCG PC Client PTP
+ * Specification. In order for this code to work together with tpm_tis_core,
+ * those addresses need to mapped to the registers defined for I2C TPMs in
+ * Table 51 "I2C-TPM Register Overview".
+ *
+ * For most addresses this can be done by simply stripping off the locality
+ * information from the address. A few addresses need to be mapped explicitly,
+ * since the corresponding I2C registers have been moved around. TPM_LOC_SEL is
+ * only defined for I2C TPMs and is also mapped explicitly here to distinguish
+ * it from TPM_ACCESS(0).
+ *
+ * Locality information is ignored, since this driver assumes exclusive access
+ * to the TPM and always uses locality 0.
+ */
+static u8 tpm_tis_i2c_address_to_register(u32 addr)
+{
+ addr &= TPM_TIS_REGISTER_MASK;
+
+ switch (addr) {
+ case TPM_ACCESS(0):
+ return TPM_I2C_ACCESS;
+ case TPM_LOC_SEL:
+ return TPM_I2C_LOC_SEL;
+ case TPM_DID_VID(0):
+ return TPM_I2C_DID_VID;
+ case TPM_RID(0):
+ return TPM_I2C_RID;
+ default:
+ return addr;
+ }
+}
+
+static int tpm_tis_i2c_retry_transfer_until_ack(struct tpm_tis_data *data,
+ struct i2c_msg *msg)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ bool guard_time;
+ int i = 0;
+ int ret;
+
+ if (msg->flags & I2C_M_RD)
+ guard_time = phy->guard_time_read;
+ else
+ guard_time = phy->guard_time_write;
+
+ do {
+ ret = i2c_transfer(phy->i2c_client->adapter, msg, 1);
+ if (ret < 0)
+ usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX);
+ else if (guard_time)
+ usleep_range(phy->guard_time_min, phy->guard_time_max);
+ /* retry on TPM NACK */
+ } while (ret < 0 && i++ < TPM_RETRY);
+
+ return ret;
+}
+
+/* Check that bits which must be read zero are not set */
+static int tpm_tis_i2c_sanity_check_read(u8 reg, u16 len, u8 *buf)
+{
+ u32 zero_mask;
+ u32 value;
+
+ switch (len) {
+ case sizeof(u8):
+ value = buf[0];
+ break;
+ case sizeof(u16):
+ value = le16_to_cpup((__le16 *)buf);
+ break;
+ case sizeof(u32):
+ value = le32_to_cpup((__le32 *)buf);
+ break;
+ default:
+ /* unknown length, skip check */
+ return 0;
+ }
+
+ switch (reg) {
+ case TPM_I2C_ACCESS:
+ zero_mask = TPM_ACCESS_READ_ZERO;
+ break;
+ case TPM_INT_ENABLE(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_INT_ENABLE_ZERO;
+ break;
+ case TPM_STS(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_STS_READ_ZERO;
+ break;
+ case TPM_INTF_CAPS(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_INTF_CAPABILITY_ZERO;
+ break;
+ case TPM_I2C_INTERFACE_CAPABILITY:
+ zero_mask = TPM_I2C_INTERFACE_CAPABILITY_ZERO;
+ break;
+ default:
+ /* unknown register, skip check */
+ return 0;
+ }
+
+ if (unlikely((value & zero_mask) != 0x00)) {
+ pr_debug("TPM I2C read of register 0x%02x failed sanity check: 0x%x\n", reg, value);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int tpm_tis_i2c_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *result, enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ u8 reg = tpm_tis_i2c_address_to_register(addr);
+ int i;
+ int ret;
+
+ for (i = 0; i < TPM_RETRY; i++) {
+ /* write register */
+ msg.len = sizeof(reg);
+ msg.buf = &reg;
+ msg.flags = 0;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ /* read data */
+ msg.buf = result;
+ msg.len = len;
+ msg.flags = I2C_M_RD;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ ret = tpm_tis_i2c_sanity_check_read(reg, len, result);
+ if (ret == 0)
+ return 0;
+
+ usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX);
+ }
+
+ return ret;
+}
+
+static int tpm_tis_i2c_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ const u8 *value,
+ enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ u8 reg = tpm_tis_i2c_address_to_register(addr);
+ int ret;
+
+ if (len > TPM_BUFSIZE - 1)
+ return -EIO;
+
+ /* write register and data in one go */
+ phy->io_buf[0] = reg;
+ memcpy(phy->io_buf + sizeof(reg), value, len);
+
+ msg.len = sizeof(reg) + len;
+ msg.buf = phy->io_buf;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tpm_tis_i2c_verify_crc(struct tpm_tis_data *data, size_t len,
+ const u8 *value)
+{
+ u16 crc_tpm, crc_host;
+ int rc;
+
+ rc = tpm_tis_read16(data, TPM_DATA_CSUM, &crc_tpm);
+ if (rc < 0)
+ return rc;
+
+ /* reflect crc result, regardless of host endianness */
+ crc_host = swab16(crc_ccitt(0, value, len));
+ if (crc_tpm != crc_host)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Guard Time:
+ * After each I2C operation, the TPM might require the master to wait.
+ * The time period is vendor-specific and must be read from the
+ * TPM_I2C_INTERFACE_CAPABILITY register.
+ *
+ * Before the Guard Time is read (or after the TPM failed to send an I2C NACK),
+ * a Guard Time of 250µs applies.
+ *
+ * Various flags in the same register indicate if a guard time is needed:
+ * - SR: <I2C read with repeated start> <guard time> <I2C read>
+ * - RR: <I2C read> <guard time> <I2C read>
+ * - RW: <I2C read> <guard time> <I2C write>
+ * - WR: <I2C write> <guard time> <I2C read>
+ * - WW: <I2C write> <guard time> <I2C write>
+ *
+ * See TCG PC Client PTP Specification v1.04, 8.1.10 GUARD_TIME
+ */
+static int tpm_tis_i2c_init_guard_time(struct tpm_tis_i2c_phy *phy)
+{
+ u32 i2c_caps;
+ int ret;
+
+ phy->guard_time_read = true;
+ phy->guard_time_write = true;
+ phy->guard_time_min = GUARD_TIME_DEFAULT_MIN;
+ phy->guard_time_max = GUARD_TIME_DEFAULT_MAX;
+
+ ret = tpm_tis_i2c_read_bytes(&phy->priv, TPM_I2C_INTERFACE_CAPABILITY,
+ sizeof(i2c_caps), (u8 *)&i2c_caps,
+ TPM_TIS_PHYS_32);
+ if (ret)
+ return ret;
+
+ phy->guard_time_read = (i2c_caps & TPM_GUARD_TIME_RR_MASK) ||
+ (i2c_caps & TPM_GUARD_TIME_RW_MASK);
+ phy->guard_time_write = (i2c_caps & TPM_GUARD_TIME_WR_MASK) ||
+ (i2c_caps & TPM_GUARD_TIME_WW_MASK);
+ phy->guard_time_min = (i2c_caps & TPM_GUARD_TIME_MIN_MASK) >>
+ TPM_GUARD_TIME_MIN_SHIFT;
+ /* guard_time_max = guard_time_min * 1.2 */
+ phy->guard_time_max = phy->guard_time_min + phy->guard_time_min / 5;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static const struct tpm_tis_phy_ops tpm_i2c_phy_ops = {
+ .read_bytes = tpm_tis_i2c_read_bytes,
+ .write_bytes = tpm_tis_i2c_write_bytes,
+ .verify_crc = tpm_tis_i2c_verify_crc,
+};
+
+static int tpm_tis_i2c_probe(struct i2c_client *dev,
+ const struct i2c_device_id *id)
+{
+ struct tpm_tis_i2c_phy *phy;
+ const u8 crc_enable = 1;
+ const u8 locality = 0;
+ int ret;
+
+ phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_i2c_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->io_buf = devm_kzalloc(&dev->dev, TPM_BUFSIZE, GFP_KERNEL);
+ if (!phy->io_buf)
+ return -ENOMEM;
+
+ phy->i2c_client = dev;
+
+ /* must precede all communication with the tpm */
+ ret = tpm_tis_i2c_init_guard_time(phy);
+ if (ret)
+ return ret;
+
+ ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_LOC_SEL, sizeof(locality),
+ &locality, TPM_TIS_PHYS_8);
+ if (ret)
+ return ret;
+
+ ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_I2C_DATA_CSUM_ENABLE,
+ sizeof(crc_enable), &crc_enable,
+ TPM_TIS_PHYS_8);
+ if (ret)
+ return ret;
+
+ return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_i2c_phy_ops,
+ NULL);
+}
+
+static int tpm_tis_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+ return 0;
+}
+
+static const struct i2c_device_id tpm_tis_i2c_id[] = {
+ { "tpm_tis_i2c", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_tis_i2c_match[] = {
+ { .compatible = "infineon,slb9673", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tis_i2c_match);
+#endif
+
+static struct i2c_driver tpm_tis_i2c_driver = {
+ .driver = {
+ .name = "tpm_tis_i2c",
+ .pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(of_tis_i2c_match),
+ },
+ .probe = tpm_tis_i2c_probe,
+ .remove = tpm_tis_i2c_remove,
+ .id_table = tpm_tis_i2c_id,
+};
+module_i2c_driver(tpm_tis_i2c_driver);
+
+MODULE_DESCRIPTION("TPM Driver for native I2C access");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 73518009a0f2..876b37b8683c 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -203,7 +203,7 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_RATE, &val);
if (ret)
- return ret;
+ return 0;
return val;
}
@@ -220,7 +220,7 @@ static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_SET_CLOCK_RATE, &_rate);
if (ret)
- dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d",
+ dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d\n",
clk_hw_get_name(hw), ret);
return ret;
@@ -288,7 +288,7 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
RPI_FIRMWARE_GET_MIN_CLOCK_RATE,
&min_rate);
if (ret) {
- dev_err(rpi->dev, "Failed to get clock %d min freq: %d",
+ dev_err(rpi->dev, "Failed to get clock %d min freq: %d\n",
id, ret);
return ERR_PTR(ret);
}
@@ -344,8 +344,13 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
struct rpi_firmware_get_clocks_response *clks;
int ret;
+ /*
+ * The firmware doesn't guarantee that the last element of
+ * RPI_FIRMWARE_GET_CLOCKS is zeroed. So allocate an additional
+ * zero element as sentinel.
+ */
clks = devm_kcalloc(rpi->dev,
- RPI_FIRMWARE_NUM_CLK_ID, sizeof(*clks),
+ RPI_FIRMWARE_NUM_CLK_ID + 1, sizeof(*clks),
GFP_KERNEL);
if (!clks)
return -ENOMEM;
@@ -360,7 +365,8 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
struct raspberrypi_clk_variant *variant;
if (clks->id > RPI_FIRMWARE_NUM_CLK_ID) {
- dev_err(rpi->dev, "Unknown clock id: %u", clks->id);
+ dev_err(rpi->dev, "Unknown clock id: %u (max: %u)\n",
+ clks->id, RPI_FIRMWARE_NUM_CLK_ID);
return -EINVAL;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 7fc191c15507..bd0b35cac83e 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -840,10 +840,9 @@ static void clk_core_unprepare(struct clk_core *core)
if (core->ops->unprepare)
core->ops->unprepare(core->hw);
- clk_pm_runtime_put(core);
-
trace_clk_unprepare_complete(core);
clk_core_unprepare(core->parent);
+ clk_pm_runtime_put(core);
}
static void clk_core_unprepare_lock(struct clk_core *core)
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index ef2a445c63a3..373e9438b57a 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -135,6 +135,7 @@ static struct device_node *ti_find_clock_provider(struct device_node *from,
continue;
if (!strncmp(n, tmp, strlen(tmp))) {
+ of_node_get(np);
found = true;
break;
}
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 593d5a957b69..969a552da8d2 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -7,6 +7,9 @@
* either be read from the "time" and "timeh" CSRs, and can use the SBI to
* setup events, or directly accessed using MMIO registers.
*/
+
+#define pr_fmt(fmt) "riscv-timer: " fmt
+
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
@@ -20,14 +23,28 @@
#include <linux/of_irq.h>
#include <clocksource/timer-riscv.h>
#include <asm/smp.h>
+#include <asm/hwcap.h>
#include <asm/sbi.h>
#include <asm/timex.h>
+static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
+
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
+ u64 next_tval = get_cycles64() + delta;
+
csr_set(CSR_IE, IE_TIE);
- sbi_set_timer(get_cycles64() + delta);
+ if (static_branch_likely(&riscv_sstc_available)) {
+#if defined(CONFIG_32BIT)
+ csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF);
+ csr_write(CSR_STIMECMPH, next_tval >> 32);
+#else
+ csr_write(CSR_STIMECMP, next_tval);
+#endif
+ } else
+ sbi_set_timer(next_tval);
+
return 0;
}
@@ -101,20 +118,21 @@ static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
static int __init riscv_timer_init_dt(struct device_node *n)
{
- int cpuid, hartid, error;
+ int cpuid, error;
+ unsigned long hartid;
struct device_node *child;
struct irq_domain *domain;
- hartid = riscv_of_processor_hartid(n);
- if (hartid < 0) {
- pr_warn("Not valid hartid for node [%pOF] error = [%d]\n",
+ error = riscv_of_processor_hartid(n, &hartid);
+ if (error < 0) {
+ pr_warn("Not valid hartid for node [%pOF] error = [%lu]\n",
n, hartid);
- return hartid;
+ return error;
}
cpuid = riscv_hartid_to_cpuid(hartid);
if (cpuid < 0) {
- pr_warn("Invalid cpuid for hartid [%d]\n", hartid);
+ pr_warn("Invalid cpuid for hartid [%lu]\n", hartid);
return cpuid;
}
@@ -140,7 +158,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
return -ENODEV;
}
- pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
+ pr_info("%s: Registering clocksource cpuid [%d] hartid [%lu]\n",
__func__, cpuid, hartid);
error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
if (error) {
@@ -165,6 +183,12 @@ static int __init riscv_timer_init_dt(struct device_node *n)
if (error)
pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
error);
+
+ if (riscv_isa_extension_available(NULL, SSTC)) {
+ pr_info("Timer interrupt in S-mode is available via sstc extension\n");
+ static_branch_enable(&riscv_sstc_available);
+ }
+
return error;
}
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 8fcaba541539..d69d13a26414 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -29,9 +29,9 @@ struct private_data {
cpumask_var_t cpus;
struct device *cpu_dev;
- struct opp_table *opp_table;
struct cpufreq_frequency_table *freq_table;
bool have_static_opps;
+ int opp_token;
};
static LIST_HEAD(priv_list);
@@ -193,7 +193,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
struct private_data *priv;
struct device *cpu_dev;
bool fallback = false;
- const char *reg_name;
+ const char *reg_name[] = { NULL, NULL };
int ret;
/* Check if this CPU is already covered by some other policy */
@@ -218,12 +218,11 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
* OPP layer will be taking care of regulators now, but it needs to know
* the name of the regulator first.
*/
- reg_name = find_supply_name(cpu_dev);
- if (reg_name) {
- priv->opp_table = dev_pm_opp_set_regulators(cpu_dev, &reg_name,
- 1);
- if (IS_ERR(priv->opp_table)) {
- ret = PTR_ERR(priv->opp_table);
+ reg_name[0] = find_supply_name(cpu_dev);
+ if (reg_name[0]) {
+ priv->opp_token = dev_pm_opp_set_regulators(cpu_dev, reg_name);
+ if (priv->opp_token < 0) {
+ ret = priv->opp_token;
if (ret != -EPROBE_DEFER)
dev_err(cpu_dev, "failed to set regulators: %d\n",
ret);
@@ -295,7 +294,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
out:
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(priv->cpus);
- dev_pm_opp_put_regulators(priv->opp_table);
+ dev_pm_opp_put_regulators(priv->opp_token);
free_cpumask:
free_cpumask_var(priv->cpus);
return ret;
@@ -309,7 +308,7 @@ static void dt_cpufreq_release(void)
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table);
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(priv->cpus);
- dev_pm_opp_put_regulators(priv->opp_table);
+ dev_pm_opp_put_regulators(priv->opp_token);
free_cpumask_var(priv->cpus);
list_del(&priv->node);
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 954eef26685f..69b3d61852ac 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -532,7 +532,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
target_freq = clamp_val(target_freq, policy->min, policy->max);
- if (!cpufreq_driver->target_index)
+ if (!policy->freq_table)
return target_freq;
idx = cpufreq_frequency_table_target(policy, target_freq, relation);
@@ -1348,15 +1348,15 @@ static int cpufreq_online(unsigned int cpu)
}
if (!new_policy && cpufreq_driver->online) {
+ /* Recover policy->cpus using related_cpus */
+ cpumask_copy(policy->cpus, policy->related_cpus);
+
ret = cpufreq_driver->online(policy);
if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__);
goto out_exit_policy;
}
-
- /* Recover policy->cpus using related_cpus */
- cpumask_copy(policy->cpus, policy->related_cpus);
} else {
cpumask_copy(policy->cpus, cpumask_of(cpu));
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 3fe9125156b4..76e553af2071 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -31,8 +31,8 @@
/* cpufreq-dt device registered by imx-cpufreq-dt */
static struct platform_device *cpufreq_dt_pdev;
-static struct opp_table *cpufreq_opp_table;
static struct device *cpu_dev;
+static int cpufreq_opp_token;
enum IMX7ULP_CPUFREQ_CLKS {
ARM,
@@ -153,9 +153,9 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "cpu speed grade %d mkt segment %d supported-hw %#x %#x\n",
speed_grade, mkt_segment, supported_hw[0], supported_hw[1]);
- cpufreq_opp_table = dev_pm_opp_set_supported_hw(cpu_dev, supported_hw, 2);
- if (IS_ERR(cpufreq_opp_table)) {
- ret = PTR_ERR(cpufreq_opp_table);
+ cpufreq_opp_token = dev_pm_opp_set_supported_hw(cpu_dev, supported_hw, 2);
+ if (cpufreq_opp_token < 0) {
+ ret = cpufreq_opp_token;
dev_err(&pdev->dev, "Failed to set supported opp: %d\n", ret);
return ret;
}
@@ -163,7 +163,7 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
cpufreq_dt_pdev = platform_device_register_data(
&pdev->dev, "cpufreq-dt", -1, NULL, 0);
if (IS_ERR(cpufreq_dt_pdev)) {
- dev_pm_opp_put_supported_hw(cpufreq_opp_table);
+ dev_pm_opp_put_supported_hw(cpufreq_opp_token);
ret = PTR_ERR(cpufreq_dt_pdev);
dev_err(&pdev->dev, "Failed to register cpufreq-dt: %d\n", ret);
return ret;
@@ -176,7 +176,7 @@ static int imx_cpufreq_dt_remove(struct platform_device *pdev)
{
platform_device_unregister(cpufreq_dt_pdev);
if (!of_machine_is_compatible("fsl,imx7ulp"))
- dev_pm_opp_put_supported_hw(cpufreq_opp_table);
+ dev_pm_opp_put_supported_hw(cpufreq_opp_token);
else
clk_bulk_put(ARRAY_SIZE(imx7ulp_clks), imx7ulp_clks);
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 76f6b3884e6b..7f2680bc9a0f 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -478,6 +478,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
if (info->soc_data->ccifreq_supported) {
info->vproc_on_boot = regulator_get_voltage(info->proc_reg);
if (info->vproc_on_boot < 0) {
+ ret = info->vproc_on_boot;
dev_err(info->cpu_dev,
"invalid Vproc value: %d\n", info->vproc_on_boot);
goto out_disable_inter_clock;
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 36c79580fba2..d5ef3c66c762 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -15,6 +15,7 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/units.h>
#define LUT_MAX_ENTRIES 40U
#define LUT_SRC GENMASK(31, 30)
@@ -26,8 +27,6 @@
#define GT_IRQ_STATUS BIT(2)
-#define HZ_PER_KHZ 1000
-
struct qcom_cpufreq_soc_data {
u32 reg_enable;
u32 reg_domain_state;
@@ -428,7 +427,7 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
return 0;
}
- ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
+ ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
data->irq_name, data->throttle_irq);
@@ -445,7 +444,11 @@ static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
if (data->throttle_irq <= 0)
return 0;
- ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
+ mutex_lock(&data->throttle_lock);
+ data->cancel_throttle = false;
+ mutex_unlock(&data->throttle_lock);
+
+ ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
data->irq_name, data->throttle_irq);
@@ -465,7 +468,8 @@ static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
mutex_unlock(&data->throttle_lock);
cancel_delayed_work_sync(&data->throttle_work);
- irq_set_affinity_hint(data->throttle_irq, NULL);
+ irq_set_affinity_and_hint(data->throttle_irq, NULL);
+ disable_irq_nosync(data->throttle_irq);
return 0;
}
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 6dfa86971a75..863548f59c3e 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -55,9 +55,7 @@ struct qcom_cpufreq_match_data {
};
struct qcom_cpufreq_drv {
- struct opp_table **names_opp_tables;
- struct opp_table **hw_opp_tables;
- struct opp_table **genpd_opp_tables;
+ int *opp_tokens;
u32 versions;
const struct qcom_cpufreq_match_data *data;
};
@@ -315,72 +313,43 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
of_node_put(np);
- drv->names_opp_tables = kcalloc(num_possible_cpus(),
- sizeof(*drv->names_opp_tables),
+ drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens),
GFP_KERNEL);
- if (!drv->names_opp_tables) {
+ if (!drv->opp_tokens) {
ret = -ENOMEM;
goto free_drv;
}
- drv->hw_opp_tables = kcalloc(num_possible_cpus(),
- sizeof(*drv->hw_opp_tables),
- GFP_KERNEL);
- if (!drv->hw_opp_tables) {
- ret = -ENOMEM;
- goto free_opp_names;
- }
-
- drv->genpd_opp_tables = kcalloc(num_possible_cpus(),
- sizeof(*drv->genpd_opp_tables),
- GFP_KERNEL);
- if (!drv->genpd_opp_tables) {
- ret = -ENOMEM;
- goto free_opp;
- }
for_each_possible_cpu(cpu) {
+ struct dev_pm_opp_config config = {
+ .supported_hw = NULL,
+ };
+
cpu_dev = get_cpu_device(cpu);
if (NULL == cpu_dev) {
ret = -ENODEV;
- goto free_genpd_opp;
+ goto free_opp;
}
if (drv->data->get_version) {
+ config.supported_hw = &drv->versions;
+ config.supported_hw_count = 1;
- if (pvs_name) {
- drv->names_opp_tables[cpu] = dev_pm_opp_set_prop_name(
- cpu_dev,
- pvs_name);
- if (IS_ERR(drv->names_opp_tables[cpu])) {
- ret = PTR_ERR(drv->names_opp_tables[cpu]);
- dev_err(cpu_dev, "Failed to add OPP name %s\n",
- pvs_name);
- goto free_opp;
- }
- }
-
- drv->hw_opp_tables[cpu] = dev_pm_opp_set_supported_hw(
- cpu_dev, &drv->versions, 1);
- if (IS_ERR(drv->hw_opp_tables[cpu])) {
- ret = PTR_ERR(drv->hw_opp_tables[cpu]);
- dev_err(cpu_dev,
- "Failed to set supported hardware\n");
- goto free_genpd_opp;
- }
+ if (pvs_name)
+ config.prop_name = pvs_name;
}
if (drv->data->genpd_names) {
- drv->genpd_opp_tables[cpu] =
- dev_pm_opp_attach_genpd(cpu_dev,
- drv->data->genpd_names,
- NULL);
- if (IS_ERR(drv->genpd_opp_tables[cpu])) {
- ret = PTR_ERR(drv->genpd_opp_tables[cpu]);
- if (ret != -EPROBE_DEFER)
- dev_err(cpu_dev,
- "Could not attach to pm_domain: %d\n",
- ret);
- goto free_genpd_opp;
+ config.genpd_names = drv->data->genpd_names;
+ config.virt_devs = NULL;
+ }
+
+ if (config.supported_hw || config.genpd_names) {
+ drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config);
+ if (drv->opp_tokens[cpu] < 0) {
+ ret = drv->opp_tokens[cpu];
+ dev_err(cpu_dev, "Failed to set OPP config\n");
+ goto free_opp;
}
}
}
@@ -395,27 +364,10 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
ret = PTR_ERR(cpufreq_dt_pdev);
dev_err(cpu_dev, "Failed to register platform device\n");
-free_genpd_opp:
- for_each_possible_cpu(cpu) {
- if (IS_ERR(drv->genpd_opp_tables[cpu]))
- break;
- dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
- }
- kfree(drv->genpd_opp_tables);
free_opp:
- for_each_possible_cpu(cpu) {
- if (IS_ERR(drv->names_opp_tables[cpu]))
- break;
- dev_pm_opp_put_prop_name(drv->names_opp_tables[cpu]);
- }
- for_each_possible_cpu(cpu) {
- if (IS_ERR(drv->hw_opp_tables[cpu]))
- break;
- dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
- }
- kfree(drv->hw_opp_tables);
-free_opp_names:
- kfree(drv->names_opp_tables);
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
+ kfree(drv->opp_tokens);
free_drv:
kfree(drv);
@@ -429,15 +381,10 @@ static int qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu) {
- dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]);
- dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
- dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
- }
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
- kfree(drv->names_opp_tables);
- kfree(drv->hw_opp_tables);
- kfree(drv->genpd_opp_tables);
+ kfree(drv->opp_tokens);
kfree(drv);
return 0;
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index fdb0a722d881..a67df90848c2 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -156,9 +156,13 @@ static int sti_cpufreq_set_opp_info(void)
unsigned int hw_info_offset;
unsigned int version[VERSION_ELEMENTS];
int pcode, substrate, major, minor;
- int ret;
+ int opp_token, ret;
char name[MAX_PCODE_NAME_LEN];
- struct opp_table *opp_table;
+ struct dev_pm_opp_config config = {
+ .supported_hw = version,
+ .supported_hw_count = ARRAY_SIZE(version),
+ .prop_name = name,
+ };
reg_fields = sti_cpufreq_match();
if (!reg_fields) {
@@ -210,21 +214,14 @@ use_defaults:
snprintf(name, MAX_PCODE_NAME_LEN, "pcode%d", pcode);
- opp_table = dev_pm_opp_set_prop_name(dev, name);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "Failed to set prop name\n");
- return PTR_ERR(opp_table);
- }
-
version[0] = BIT(major);
version[1] = BIT(minor);
version[2] = BIT(substrate);
- opp_table = dev_pm_opp_set_supported_hw(dev, version, VERSION_ELEMENTS);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "Failed to set supported hardware\n");
- ret = PTR_ERR(opp_table);
- goto err_put_prop_name;
+ opp_token = dev_pm_opp_set_config(dev, &config);
+ if (opp_token < 0) {
+ dev_err(dev, "Failed to set OPP config\n");
+ return opp_token;
}
dev_dbg(dev, "pcode: %d major: %d minor: %d substrate: %d\n",
@@ -233,10 +230,6 @@ use_defaults:
version[0], version[1], version[2]);
return 0;
-
-err_put_prop_name:
- dev_pm_opp_put_prop_name(opp_table);
- return ret;
}
static int sti_cpufreq_fetch_syscon_registers(void)
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 75e1bf3a08f7..a4922580ce06 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -86,20 +86,20 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
{
- struct opp_table **opp_tables;
+ int *opp_tokens;
char name[MAX_NAME_LEN];
unsigned int cpu;
u32 speed = 0;
int ret;
- opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables),
+ opp_tokens = kcalloc(num_possible_cpus(), sizeof(*opp_tokens),
GFP_KERNEL);
- if (!opp_tables)
+ if (!opp_tokens)
return -ENOMEM;
ret = sun50i_cpufreq_get_efuse(&speed);
if (ret) {
- kfree(opp_tables);
+ kfree(opp_tokens);
return ret;
}
@@ -113,9 +113,9 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
goto free_opp;
}
- opp_tables[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
- if (IS_ERR(opp_tables[cpu])) {
- ret = PTR_ERR(opp_tables[cpu]);
+ opp_tokens[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
+ if (opp_tokens[cpu] < 0) {
+ ret = opp_tokens[cpu];
pr_err("Failed to set prop name\n");
goto free_opp;
}
@@ -124,7 +124,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
NULL, 0);
if (!IS_ERR(cpufreq_dt_pdev)) {
- platform_set_drvdata(pdev, opp_tables);
+ platform_set_drvdata(pdev, opp_tokens);
return 0;
}
@@ -132,27 +132,24 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
pr_err("Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu) {
- if (IS_ERR_OR_NULL(opp_tables[cpu]))
- break;
- dev_pm_opp_put_prop_name(opp_tables[cpu]);
- }
- kfree(opp_tables);
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ kfree(opp_tokens);
return ret;
}
static int sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
{
- struct opp_table **opp_tables = platform_get_drvdata(pdev);
+ int *opp_tokens = platform_get_drvdata(pdev);
unsigned int cpu;
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu)
- dev_pm_opp_put_prop_name(opp_tables[cpu]);
+ dev_pm_opp_put_prop_name(opp_tokens[cpu]);
- kfree(opp_tables);
+ kfree(opp_tokens);
return 0;
}
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 2a6a98764a8c..1216046cf4c2 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -162,7 +162,7 @@ static struct tegra_cpufreq_ops tegra234_cpufreq_ops = {
.set_cpu_ndiv = tegra234_set_cpu_ndiv,
};
-const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
+static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
.ops = &tegra234_cpufreq_ops,
.actmon_cntr_base = 0x9000,
.maxcpus_per_cluster = 4,
@@ -430,7 +430,7 @@ static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
.set_cpu_ndiv = tegra194_set_cpu_ndiv,
};
-const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
+static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
.ops = &tegra194_cpufreq_ops,
.maxcpus_per_cluster = 2,
};
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
index e8db3d75be25..ab7ac7df9e62 100644
--- a/drivers/cpufreq/tegra20-cpufreq.c
+++ b/drivers/cpufreq/tegra20-cpufreq.c
@@ -32,9 +32,9 @@ static bool cpu0_node_has_opp_v2_prop(void)
return ret;
}
-static void tegra20_cpufreq_put_supported_hw(void *opp_table)
+static void tegra20_cpufreq_put_supported_hw(void *opp_token)
{
- dev_pm_opp_put_supported_hw(opp_table);
+ dev_pm_opp_put_supported_hw((unsigned long) opp_token);
}
static void tegra20_cpufreq_dt_unregister(void *cpufreq_dt)
@@ -45,7 +45,6 @@ static void tegra20_cpufreq_dt_unregister(void *cpufreq_dt)
static int tegra20_cpufreq_probe(struct platform_device *pdev)
{
struct platform_device *cpufreq_dt;
- struct opp_table *opp_table;
struct device *cpu_dev;
u32 versions[2];
int err;
@@ -71,16 +70,15 @@ static int tegra20_cpufreq_probe(struct platform_device *pdev)
if (WARN_ON(!cpu_dev))
return -ENODEV;
- opp_table = dev_pm_opp_set_supported_hw(cpu_dev, versions, 2);
- err = PTR_ERR_OR_ZERO(opp_table);
- if (err) {
+ err = dev_pm_opp_set_supported_hw(cpu_dev, versions, 2);
+ if (err < 0) {
dev_err(&pdev->dev, "failed to set supported hw: %d\n", err);
return err;
}
err = devm_add_action_or_reset(&pdev->dev,
tegra20_cpufreq_put_supported_hw,
- opp_table);
+ (void *)((unsigned long) err));
if (err)
return err;
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 8f9fdd864391..df85a77d476b 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -60,7 +60,6 @@ struct ti_cpufreq_data {
struct device_node *opp_node;
struct regmap *syscon;
const struct ti_cpufreq_soc_data *soc_data;
- struct opp_table *opp_table;
};
static unsigned long amx3_efuse_xlate(struct ti_cpufreq_data *opp_data,
@@ -173,7 +172,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = {
* seems to always read as 0).
*/
-static const char * const omap3_reg_names[] = {"cpu0", "vbb"};
+static const char * const omap3_reg_names[] = {"cpu0", "vbb", NULL};
static struct ti_cpufreq_soc_data omap36xx_soc_data = {
.reg_names = omap3_reg_names,
@@ -324,10 +323,13 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
{
u32 version[VERSION_COUNT];
const struct of_device_id *match;
- struct opp_table *ti_opp_table;
struct ti_cpufreq_data *opp_data;
- const char * const default_reg_names[] = {"vdd", "vbb"};
+ const char * const default_reg_names[] = {"vdd", "vbb", NULL};
int ret;
+ struct dev_pm_opp_config config = {
+ .supported_hw = version,
+ .supported_hw_count = ARRAY_SIZE(version),
+ };
match = dev_get_platdata(&pdev->dev);
if (!match)
@@ -370,33 +372,21 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
if (ret)
goto fail_put_node;
- ti_opp_table = dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
- version, VERSION_COUNT);
- if (IS_ERR(ti_opp_table)) {
- dev_err(opp_data->cpu_dev,
- "Failed to set supported hardware\n");
- ret = PTR_ERR(ti_opp_table);
- goto fail_put_node;
- }
-
- opp_data->opp_table = ti_opp_table;
-
if (opp_data->soc_data->multi_regulator) {
- const char * const *reg_names = default_reg_names;
-
if (opp_data->soc_data->reg_names)
- reg_names = opp_data->soc_data->reg_names;
- ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev,
- reg_names,
- ARRAY_SIZE(default_reg_names));
- if (IS_ERR(ti_opp_table)) {
- dev_pm_opp_put_supported_hw(opp_data->opp_table);
- ret = PTR_ERR(ti_opp_table);
- goto fail_put_node;
- }
+ config.regulator_names = opp_data->soc_data->reg_names;
+ else
+ config.regulator_names = default_reg_names;
+ }
+
+ ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config);
+ if (ret < 0) {
+ dev_err(opp_data->cpu_dev, "Failed to set OPP config\n");
+ goto fail_put_node;
}
of_node_put(opp_data->opp_node);
+
register_cpufreq_dt:
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 62dd956025f3..6eceb1988243 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -8,6 +8,7 @@
* This code is licenced under the GPL.
*/
+#include "linux/percpu-defs.h"
#include <linux/clockchips.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
@@ -279,6 +280,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
/* Shallower states are enabled, so update. */
dev->states_usage[entered_state].above++;
+ trace_cpu_idle_miss(dev->cpu, entered_state, false);
break;
}
} else if (diff > delay) {
@@ -290,8 +292,10 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* Update if a deeper state would have been a
* better match for the observed idle duration.
*/
- if (diff - delay >= drv->states[i].target_residency_ns)
+ if (diff - delay >= drv->states[i].target_residency_ns) {
dev->states_usage[entered_state].below++;
+ trace_cpu_idle_miss(dev->cpu, entered_state, true);
+ }
break;
}
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index f64e3984689f..768ced3d6fe8 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -2,6 +2,7 @@
menuconfig CXL_BUS
tristate "CXL (Compute Express Link) Devices Support"
depends on PCI
+ select PCI_DOE
help
CXL is a bus that is electrically compatible with PCI Express, but
layers three protocols on that signalling (CXL.io, CXL.cache, and
@@ -102,4 +103,12 @@ config CXL_SUSPEND
def_bool y
depends on SUSPEND && CXL_MEM
+config CXL_REGION
+ bool
+ default CXL_BUS
+ # For MAX_PHYSMEM_BITS
+ depends on SPARSEMEM
+ select MEMREGION
+ select GET_FREE_REGION
+
endif
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 40286f5df812..fb649683dd3a 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -9,10 +9,6 @@
#include "cxlpci.h"
#include "cxl.h"
-/* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
-#define CFMWS_INTERLEAVE_WAYS(x) (1 << (x)->interleave_ways)
-#define CFMWS_INTERLEAVE_GRANULARITY(x) ((x)->granularity + 8)
-
static unsigned long cfmws_to_decoder_flags(int restrictions)
{
unsigned long flags = CXL_DECODER_F_ENABLE;
@@ -34,7 +30,8 @@ static unsigned long cfmws_to_decoder_flags(int restrictions)
static int cxl_acpi_cfmws_verify(struct device *dev,
struct acpi_cedt_cfmws *cfmws)
{
- int expected_len;
+ int rc, expected_len;
+ unsigned int ways;
if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) {
dev_err(dev, "CFMWS Unsupported Interleave Arithmetic\n");
@@ -51,14 +48,14 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
return -EINVAL;
}
- if (CFMWS_INTERLEAVE_WAYS(cfmws) > CXL_DECODER_MAX_INTERLEAVE) {
- dev_err(dev, "CFMWS Interleave Ways (%d) too large\n",
- CFMWS_INTERLEAVE_WAYS(cfmws));
+ rc = cxl_to_ways(cfmws->interleave_ways, &ways);
+ if (rc) {
+ dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n",
+ cfmws->interleave_ways);
return -EINVAL;
}
- expected_len = struct_size((cfmws), interleave_targets,
- CFMWS_INTERLEAVE_WAYS(cfmws));
+ expected_len = struct_size(cfmws, interleave_targets, ways);
if (cfmws->header.length < expected_len) {
dev_err(dev, "CFMWS length %d less than expected %d\n",
@@ -76,6 +73,8 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
struct cxl_cfmws_context {
struct device *dev;
struct cxl_port *root_port;
+ struct resource *cxl_res;
+ int id;
};
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
@@ -84,10 +83,14 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct cxl_cfmws_context *ctx = arg;
struct cxl_port *root_port = ctx->root_port;
+ struct resource *cxl_res = ctx->cxl_res;
+ struct cxl_root_decoder *cxlrd;
struct device *dev = ctx->dev;
struct acpi_cedt_cfmws *cfmws;
struct cxl_decoder *cxld;
- int rc, i;
+ unsigned int ways, i, ig;
+ struct resource *res;
+ int rc;
cfmws = (struct acpi_cedt_cfmws *) header;
@@ -99,19 +102,51 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
return 0;
}
- for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
+ rc = cxl_to_ways(cfmws->interleave_ways, &ways);
+ if (rc)
+ return rc;
+ rc = cxl_to_granularity(cfmws->granularity, &ig);
+ if (rc)
+ return rc;
+ for (i = 0; i < ways; i++)
target_map[i] = cfmws->interleave_targets[i];
- cxld = cxl_root_decoder_alloc(root_port, CFMWS_INTERLEAVE_WAYS(cfmws));
- if (IS_ERR(cxld))
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res->name = kasprintf(GFP_KERNEL, "CXL Window %d", ctx->id++);
+ if (!res->name)
+ goto err_name;
+
+ res->start = cfmws->base_hpa;
+ res->end = cfmws->base_hpa + cfmws->window_size - 1;
+ res->flags = IORESOURCE_MEM;
+
+ /* add to the local resource tracking to establish a sort order */
+ rc = insert_resource(cxl_res, res);
+ if (rc)
+ goto err_insert;
+
+ cxlrd = cxl_root_decoder_alloc(root_port, ways);
+ if (IS_ERR(cxlrd))
return 0;
+ cxld = &cxlrd->cxlsd.cxld;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
cxld->target_type = CXL_DECODER_EXPANDER;
- cxld->platform_res = (struct resource)DEFINE_RES_MEM(cfmws->base_hpa,
- cfmws->window_size);
- cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
- cxld->interleave_granularity = CFMWS_INTERLEAVE_GRANULARITY(cfmws);
+ cxld->hpa_range = (struct range) {
+ .start = res->start,
+ .end = res->end,
+ };
+ cxld->interleave_ways = ways;
+ /*
+ * Minimize the x1 granularity to advertise support for any
+ * valid region granularity
+ */
+ if (ways == 1)
+ ig = CXL_DECODER_MIN_GRANULARITY;
+ cxld->interleave_granularity = ig;
rc = cxl_decoder_add(cxld, target_map);
if (rc)
@@ -119,15 +154,22 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
else
rc = cxl_decoder_autoremove(dev, cxld);
if (rc) {
- dev_err(dev, "Failed to add decoder for %pr\n",
- &cxld->platform_res);
+ dev_err(dev, "Failed to add decode range [%#llx - %#llx]\n",
+ cxld->hpa_range.start, cxld->hpa_range.end);
return 0;
}
- dev_dbg(dev, "add: %s node: %d range %pr\n", dev_name(&cxld->dev),
- phys_to_target_node(cxld->platform_res.start),
- &cxld->platform_res);
+ dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
+ dev_name(&cxld->dev),
+ phys_to_target_node(cxld->hpa_range.start),
+ cxld->hpa_range.start, cxld->hpa_range.end);
return 0;
+
+err_insert:
+ kfree(res->name);
+err_name:
+ kfree(res);
+ return -ENOMEM;
}
__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
@@ -175,8 +217,7 @@ static int add_host_bridge_uport(struct device *match, void *arg)
if (rc)
return rc;
- port = devm_cxl_add_port(host, match, dport->component_reg_phys,
- root_port);
+ port = devm_cxl_add_port(host, match, dport->component_reg_phys, dport);
if (IS_ERR(port))
return PTR_ERR(port);
dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
@@ -282,9 +323,127 @@ static void cxl_acpi_lock_reset_class(void *dev)
device_lock_reset_class(dev);
}
+static void del_cxl_resource(struct resource *res)
+{
+ kfree(res->name);
+ kfree(res);
+}
+
+static void cxl_set_public_resource(struct resource *priv, struct resource *pub)
+{
+ priv->desc = (unsigned long) pub;
+}
+
+static struct resource *cxl_get_public_resource(struct resource *priv)
+{
+ return (struct resource *) priv->desc;
+}
+
+static void remove_cxl_resources(void *data)
+{
+ struct resource *res, *next, *cxl = data;
+
+ for (res = cxl->child; res; res = next) {
+ struct resource *victim = cxl_get_public_resource(res);
+
+ next = res->sibling;
+ remove_resource(res);
+
+ if (victim) {
+ remove_resource(victim);
+ kfree(victim);
+ }
+
+ del_cxl_resource(res);
+ }
+}
+
+/**
+ * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource
+ * @cxl_res: A standalone resource tree where each CXL window is a sibling
+ *
+ * Walk each CXL window in @cxl_res and add it to iomem_resource potentially
+ * expanding its boundaries to ensure that any conflicting resources become
+ * children. If a window is expanded it may then conflict with a another window
+ * entry and require the window to be truncated or trimmed. Consider this
+ * situation:
+ *
+ * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
+ * |--------------- "System RAM" -------------|
+ *
+ * ...where platform firmware has established as System RAM resource across 2
+ * windows, but has left some portion of window 1 for dynamic CXL region
+ * provisioning. In this case "Window 0" will span the entirety of the "System
+ * RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end
+ * of that "System RAM" resource.
+ */
+static int add_cxl_resources(struct resource *cxl_res)
+{
+ struct resource *res, *new, *next;
+
+ for (res = cxl_res->child; res; res = next) {
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->name = res->name;
+ new->start = res->start;
+ new->end = res->end;
+ new->flags = IORESOURCE_MEM;
+ new->desc = IORES_DESC_CXL;
+
+ /*
+ * Record the public resource in the private cxl_res tree for
+ * later removal.
+ */
+ cxl_set_public_resource(res, new);
+
+ insert_resource_expand_to_fit(&iomem_resource, new);
+
+ next = res->sibling;
+ while (next && resource_overlaps(new, next)) {
+ if (resource_contains(new, next)) {
+ struct resource *_next = next->sibling;
+
+ remove_resource(next);
+ del_cxl_resource(next);
+ next = _next;
+ } else
+ next->start = new->end + 1;
+ }
+ }
+ return 0;
+}
+
+static int pair_cxl_resource(struct device *dev, void *data)
+{
+ struct resource *cxl_res = data;
+ struct resource *p;
+
+ if (!is_root_decoder(dev))
+ return 0;
+
+ for (p = cxl_res->child; p; p = p->sibling) {
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct resource res = {
+ .start = cxld->hpa_range.start,
+ .end = cxld->hpa_range.end,
+ .flags = IORESOURCE_MEM,
+ };
+
+ if (resource_contains(p, &res)) {
+ cxlrd->res = cxl_get_public_resource(p);
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int cxl_acpi_probe(struct platform_device *pdev)
{
int rc;
+ struct resource *cxl_res;
struct cxl_port *root_port;
struct device *host = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(host);
@@ -296,6 +455,14 @@ static int cxl_acpi_probe(struct platform_device *pdev)
if (rc)
return rc;
+ cxl_res = devm_kzalloc(host, sizeof(*cxl_res), GFP_KERNEL);
+ if (!cxl_res)
+ return -ENOMEM;
+ cxl_res->name = "CXL mem";
+ cxl_res->start = 0;
+ cxl_res->end = -1;
+ cxl_res->flags = IORESOURCE_MEM;
+
root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
if (IS_ERR(root_port))
return PTR_ERR(root_port);
@@ -306,11 +473,28 @@ static int cxl_acpi_probe(struct platform_device *pdev)
if (rc < 0)
return rc;
+ rc = devm_add_action_or_reset(host, remove_cxl_resources, cxl_res);
+ if (rc)
+ return rc;
+
ctx = (struct cxl_cfmws_context) {
.dev = host,
.root_port = root_port,
+ .cxl_res = cxl_res,
};
- acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
+ rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
+ if (rc < 0)
+ return -ENXIO;
+
+ rc = add_cxl_resources(cxl_res);
+ if (rc)
+ return rc;
+
+ /*
+ * Populate the root decoders with their related iomem resource,
+ * if present
+ */
+ device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource);
/*
* Root level scanned with host-bridge as dports, now scan host-bridges
@@ -337,12 +521,19 @@ static const struct acpi_device_id cxl_acpi_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
+static const struct platform_device_id cxl_test_ids[] = {
+ { "cxl_acpi" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, cxl_test_ids);
+
static struct platform_driver cxl_acpi_driver = {
.probe = cxl_acpi_probe,
.driver = {
.name = KBUILD_MODNAME,
.acpi_match_table = cxl_acpi_ids,
},
+ .id_table = cxl_test_ids,
};
module_platform_driver(cxl_acpi_driver);
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 9d35085d25af..79c7257f4107 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -10,3 +10,4 @@ cxl_core-y += memdev.o
cxl_core-y += mbox.o
cxl_core-y += pci.o
cxl_core-y += hdm.o
+cxl_core-$(CONFIG_CXL_REGION) += region.o
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 1a50c0fc399c..1d8f87be283f 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -9,6 +9,36 @@ extern const struct device_type cxl_nvdimm_type;
extern struct attribute_group cxl_base_attribute_group;
+#ifdef CONFIG_CXL_REGION
+extern struct device_attribute dev_attr_create_pmem_region;
+extern struct device_attribute dev_attr_delete_region;
+extern struct device_attribute dev_attr_region;
+extern const struct device_type cxl_pmem_region_type;
+extern const struct device_type cxl_region_type;
+void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
+#define CXL_REGION_ATTR(x) (&dev_attr_##x.attr)
+#define CXL_REGION_TYPE(x) (&cxl_region_type)
+#define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr),
+#define CXL_PMEM_REGION_TYPE(x) (&cxl_pmem_region_type)
+int cxl_region_init(void);
+void cxl_region_exit(void);
+#else
+static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+{
+}
+static inline int cxl_region_init(void)
+{
+ return 0;
+}
+static inline void cxl_region_exit(void)
+{
+}
+#define CXL_REGION_ATTR(x) NULL
+#define CXL_REGION_TYPE(x) NULL
+#define SET_CXL_REGION_ATTR(x)
+#define CXL_PMEM_REGION_TYPE(x) NULL
+#endif
+
struct cxl_send_command;
struct cxl_mem_query_commands;
int cxl_query_cmd(struct cxl_memdev *cxlmd,
@@ -17,9 +47,28 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s);
void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
resource_size_t length);
+struct dentry *cxl_debugfs_create_dir(const char *dir);
+int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
+ enum cxl_decoder_mode mode);
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
+int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
+resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
+resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
+extern struct rw_semaphore cxl_dpa_rwsem;
+
+bool is_switch_decoder(struct device *dev);
+struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
+static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
+ struct cxl_memdev *cxlmd)
+{
+ if (!port)
+ return NULL;
+
+ return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
+}
+
int cxl_memdev_init(void);
void cxl_memdev_exit(void);
void cxl_mbox_init(void);
-void cxl_mbox_exit(void);
#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index bfc8ee876278..d1d2caea5c62 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/delay.h>
@@ -16,6 +17,8 @@
* for enumerating these registers and capabilities.
*/
+DECLARE_RWSEM(cxl_dpa_rwsem);
+
static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
int *target_map)
{
@@ -46,20 +49,22 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
*/
int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
{
- struct cxl_decoder *cxld;
- struct cxl_dport *dport;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_dport *dport = NULL;
int single_port_map[1];
+ unsigned long index;
- cxld = cxl_switch_decoder_alloc(port, 1);
- if (IS_ERR(cxld))
- return PTR_ERR(cxld);
+ cxlsd = cxl_switch_decoder_alloc(port, 1);
+ if (IS_ERR(cxlsd))
+ return PTR_ERR(cxlsd);
device_lock_assert(&port->dev);
- dport = list_first_entry(&port->dports, typeof(*dport), list);
+ xa_for_each(&port->dports, index, dport)
+ break;
single_port_map[0] = dport->port_id;
- return add_hdm_decoder(port, cxld, single_port_map);
+ return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
@@ -124,47 +129,577 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
return ERR_PTR(-ENXIO);
}
+ dev_set_drvdata(dev, cxlhdm);
+
return cxlhdm;
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
-static int to_interleave_granularity(u32 ctrl)
+static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
+{
+ unsigned long long start = r->start, end = r->end;
+
+ seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
+ r->name);
+}
+
+void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
+{
+ struct resource *p1, *p2;
+
+ down_read(&cxl_dpa_rwsem);
+ for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
+ __cxl_dpa_debug(file, p1, 0);
+ for (p2 = p1->child; p2; p2 = p2->sibling)
+ __cxl_dpa_debug(file, p2, 1);
+ }
+ up_read(&cxl_dpa_rwsem);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
+
+/*
+ * Must be called in a context that synchronizes against this decoder's
+ * port ->remove() callback (like an endpoint decoder sysfs attribute)
+ */
+static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct resource *res = cxled->dpa_res;
+ resource_size_t skip_start;
+
+ lockdep_assert_held_write(&cxl_dpa_rwsem);
+
+ /* save @skip_start, before @res is released */
+ skip_start = res->start - cxled->skip;
+ __release_region(&cxlds->dpa_res, res->start, resource_size(res));
+ if (cxled->skip)
+ __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
+ cxled->skip = 0;
+ cxled->dpa_res = NULL;
+ put_device(&cxled->cxld.dev);
+ port->hdm_end--;
+}
+
+static void cxl_dpa_release(void *cxled)
+{
+ down_write(&cxl_dpa_rwsem);
+ __cxl_dpa_release(cxled);
+ up_write(&cxl_dpa_rwsem);
+}
+
+/*
+ * Must be called from context that will not race port device
+ * unregistration, like decoder sysfs attribute methods
+ */
+static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *port = cxled_to_port(cxled);
+
+ lockdep_assert_held_write(&cxl_dpa_rwsem);
+ devm_remove_action(&port->dev, cxl_dpa_release, cxled);
+ __cxl_dpa_release(cxled);
+}
+
+static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ resource_size_t base, resource_size_t len,
+ resource_size_t skipped)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *dev = &port->dev;
+ struct resource *res;
+
+ lockdep_assert_held_write(&cxl_dpa_rwsem);
+
+ if (!len)
+ goto success;
+
+ if (cxled->dpa_res) {
+ dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
+ port->id, cxled->cxld.id, cxled->dpa_res);
+ return -EBUSY;
+ }
+
+ if (port->hdm_end + 1 != cxled->cxld.id) {
+ /*
+ * Assumes alloc and commit order is always in hardware instance
+ * order per expectations from 8.2.5.12.20 Committing Decoder
+ * Programming that enforce decoder[m] committed before
+ * decoder[m+1] commit start.
+ */
+ dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
+ cxled->cxld.id, port->id, port->hdm_end + 1);
+ return -EBUSY;
+ }
+
+ if (skipped) {
+ res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
+ dev_name(&cxled->cxld.dev), 0);
+ if (!res) {
+ dev_dbg(dev,
+ "decoder%d.%d: failed to reserve skipped space\n",
+ port->id, cxled->cxld.id);
+ return -EBUSY;
+ }
+ }
+ res = __request_region(&cxlds->dpa_res, base, len,
+ dev_name(&cxled->cxld.dev), 0);
+ if (!res) {
+ dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
+ port->id, cxled->cxld.id);
+ if (skipped)
+ __release_region(&cxlds->dpa_res, base - skipped,
+ skipped);
+ return -EBUSY;
+ }
+ cxled->dpa_res = res;
+ cxled->skip = skipped;
+
+ if (resource_contains(&cxlds->pmem_res, res))
+ cxled->mode = CXL_DECODER_PMEM;
+ else if (resource_contains(&cxlds->ram_res, res))
+ cxled->mode = CXL_DECODER_RAM;
+ else {
+ dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
+ cxled->cxld.id, cxled->dpa_res);
+ cxled->mode = CXL_DECODER_MIXED;
+ }
+
+success:
+ port->hdm_end++;
+ get_device(&cxled->cxld.dev);
+ return 0;
+}
+
+static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ resource_size_t base, resource_size_t len,
+ resource_size_t skipped)
{
- int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl);
+ struct cxl_port *port = cxled_to_port(cxled);
+ int rc;
+
+ down_write(&cxl_dpa_rwsem);
+ rc = __cxl_dpa_reserve(cxled, base, len, skipped);
+ up_write(&cxl_dpa_rwsem);
+
+ if (rc)
+ return rc;
- return 256 << val;
+ return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
}
-static int to_interleave_ways(u32 ctrl)
+resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
{
- int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl);
+ resource_size_t size = 0;
+
+ down_read(&cxl_dpa_rwsem);
+ if (cxled->dpa_res)
+ size = resource_size(cxled->dpa_res);
+ up_read(&cxl_dpa_rwsem);
- switch (val) {
- case 0 ... 4:
- return 1 << val;
- case 8 ... 10:
- return 3 << (val - 8);
+ return size;
+}
+
+resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
+{
+ resource_size_t base = -1;
+
+ down_read(&cxl_dpa_rwsem);
+ if (cxled->dpa_res)
+ base = cxled->dpa_res->start;
+ up_read(&cxl_dpa_rwsem);
+
+ return base;
+}
+
+int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct device *dev = &cxled->cxld.dev;
+ int rc;
+
+ down_write(&cxl_dpa_rwsem);
+ if (!cxled->dpa_res) {
+ rc = 0;
+ goto out;
+ }
+ if (cxled->cxld.region) {
+ dev_dbg(dev, "decoder assigned to: %s\n",
+ dev_name(&cxled->cxld.region->dev));
+ rc = -EBUSY;
+ goto out;
+ }
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
+ dev_dbg(dev, "decoder enabled\n");
+ rc = -EBUSY;
+ goto out;
+ }
+ if (cxled->cxld.id != port->hdm_end) {
+ dev_dbg(dev, "expected decoder%d.%d\n", port->id,
+ port->hdm_end);
+ rc = -EBUSY;
+ goto out;
+ }
+ devm_cxl_dpa_release(cxled);
+ rc = 0;
+out:
+ up_write(&cxl_dpa_rwsem);
+ return rc;
+}
+
+int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
+ enum cxl_decoder_mode mode)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *dev = &cxled->cxld.dev;
+ int rc;
+
+ switch (mode) {
+ case CXL_DECODER_RAM:
+ case CXL_DECODER_PMEM:
+ break;
default:
+ dev_dbg(dev, "unsupported mode: %d\n", mode);
+ return -EINVAL;
+ }
+
+ down_write(&cxl_dpa_rwsem);
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * Only allow modes that are supported by the current partition
+ * configuration
+ */
+ if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
+ dev_dbg(dev, "no available pmem capacity\n");
+ rc = -ENXIO;
+ goto out;
+ }
+ if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
+ dev_dbg(dev, "no available ram capacity\n");
+ rc = -ENXIO;
+ goto out;
+ }
+
+ cxled->mode = mode;
+ rc = 0;
+out:
+ up_write(&cxl_dpa_rwsem);
+
+ return rc;
+}
+
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ resource_size_t free_ram_start, free_pmem_start;
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *dev = &cxled->cxld.dev;
+ resource_size_t start, avail, skip;
+ struct resource *p, *last;
+ int rc;
+
+ down_write(&cxl_dpa_rwsem);
+ if (cxled->cxld.region) {
+ dev_dbg(dev, "decoder attached to %s\n",
+ dev_name(&cxled->cxld.region->dev));
+ rc = -EBUSY;
+ goto out;
+ }
+
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
+ dev_dbg(dev, "decoder enabled\n");
+ rc = -EBUSY;
+ goto out;
+ }
+
+ for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last)
+ free_ram_start = last->end + 1;
+ else
+ free_ram_start = cxlds->ram_res.start;
+
+ for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last)
+ free_pmem_start = last->end + 1;
+ else
+ free_pmem_start = cxlds->pmem_res.start;
+
+ if (cxled->mode == CXL_DECODER_RAM) {
+ start = free_ram_start;
+ avail = cxlds->ram_res.end - start + 1;
+ skip = 0;
+ } else if (cxled->mode == CXL_DECODER_PMEM) {
+ resource_size_t skip_start, skip_end;
+
+ start = free_pmem_start;
+ avail = cxlds->pmem_res.end - start + 1;
+ skip_start = free_ram_start;
+
+ /*
+ * If some pmem is already allocated, then that allocation
+ * already handled the skip.
+ */
+ if (cxlds->pmem_res.child &&
+ skip_start == cxlds->pmem_res.child->start)
+ skip_end = skip_start - 1;
+ else
+ skip_end = start - 1;
+ skip = skip_end - skip_start + 1;
+ } else {
+ dev_dbg(dev, "mode not set\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (size > avail) {
+ dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
+ cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
+ &avail);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ rc = __cxl_dpa_reserve(cxled, start, size, skip);
+out:
+ up_write(&cxl_dpa_rwsem);
+
+ if (rc)
+ return rc;
+
+ return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
+}
+
+static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
+{
+ u16 eig;
+ u8 eiw;
+
+ /*
+ * Input validation ensures these warns never fire, but otherwise
+ * suppress unititalized variable usage warnings.
+ */
+ if (WARN_ONCE(ways_to_cxl(cxld->interleave_ways, &eiw),
+ "invalid interleave_ways: %d\n", cxld->interleave_ways))
+ return;
+ if (WARN_ONCE(granularity_to_cxl(cxld->interleave_granularity, &eig),
+ "invalid interleave_granularity: %d\n",
+ cxld->interleave_granularity))
+ return;
+
+ u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
+ u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
+ *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
+}
+
+static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
+{
+ u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
+ CXL_HDM_DECODER0_CTRL_TYPE);
+}
+
+static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
+{
+ struct cxl_dport **t = &cxlsd->target[0];
+ int ways = cxlsd->cxld.interleave_ways;
+
+ if (dev_WARN_ONCE(&cxlsd->cxld.dev,
+ ways > 8 || ways > cxlsd->nr_targets,
+ "ways: %d overflows targets: %d\n", ways,
+ cxlsd->nr_targets))
+ return -ENXIO;
+
+ *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
+ if (ways > 1)
+ *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
+ if (ways > 2)
+ *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
+ if (ways > 3)
+ *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
+ if (ways > 4)
+ *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
+ if (ways > 5)
+ *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
+ if (ways > 6)
+ *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
+ if (ways > 7)
+ *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
+
+ return 0;
+}
+
+/*
+ * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
+ * committed or error within 10ms, but just be generous with 20ms to account for
+ * clock skew and other marginal behavior
+ */
+#define COMMIT_TIMEOUT_MS 20
+static int cxld_await_commit(void __iomem *hdm, int id)
+{
+ u32 ctrl;
+ int i;
+
+ for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
+ ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ return -EIO;
+ }
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
+ return 0;
+ fsleep(1000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int cxl_decoder_commit(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ int id = cxld->id, rc;
+ u64 base, size;
+ u32 ctrl;
+
+ if (cxld->flags & CXL_DECODER_F_ENABLE)
+ return 0;
+
+ if (port->commit_end + 1 != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order commit, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id, port->commit_end + 1);
+ return -EBUSY;
+ }
+
+ down_read(&cxl_dpa_rwsem);
+ /* common decoder settings */
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
+ cxld_set_interleave(cxld, &ctrl);
+ cxld_set_type(cxld, &ctrl);
+ base = cxld->hpa_range.start;
+ size = range_len(&cxld->hpa_range);
+
+ writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
+ writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
+ writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
+ writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
+
+ if (is_switch_decoder(&cxld->dev)) {
+ struct cxl_switch_decoder *cxlsd =
+ to_cxl_switch_decoder(&cxld->dev);
+ void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
+ void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
+ u64 targets;
+
+ rc = cxlsd_set_targets(cxlsd, &targets);
+ if (rc) {
+ dev_dbg(&port->dev, "%s: target configuration error\n",
+ dev_name(&cxld->dev));
+ goto err;
+ }
+
+ writel(upper_32_bits(targets), tl_hi);
+ writel(lower_32_bits(targets), tl_lo);
+ } else {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+ void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
+ void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
+
+ writel(upper_32_bits(cxled->skip), sk_hi);
+ writel(lower_32_bits(cxled->skip), sk_lo);
+ }
+
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ up_read(&cxl_dpa_rwsem);
+
+ port->commit_end++;
+ rc = cxld_await_commit(hdm, cxld->id);
+err:
+ if (rc) {
+ dev_dbg(&port->dev, "%s: error %d committing decoder\n",
+ dev_name(&cxld->dev), rc);
+ cxld->reset(cxld);
+ return rc;
+ }
+ cxld->flags |= CXL_DECODER_F_ENABLE;
+
+ return 0;
+}
+
+static int cxl_decoder_reset(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ int id = cxld->id;
+ u32 ctrl;
+
+ if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
return 0;
+
+ if (port->commit_end != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order reset, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id, port->commit_end);
+ return -EBUSY;
}
+
+ down_read(&cxl_dpa_rwsem);
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+
+ writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
+ writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
+ writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
+ writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
+ up_read(&cxl_dpa_rwsem);
+
+ port->commit_end--;
+ cxld->flags &= ~CXL_DECODER_F_ENABLE;
+
+ return 0;
}
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map, void __iomem *hdm, int which)
+ int *target_map, void __iomem *hdm, int which,
+ u64 *dpa_base)
{
- u64 size, base;
+ struct cxl_endpoint_decoder *cxled = NULL;
+ u64 size, base, skip, dpa_size;
+ bool committed;
+ u32 remainder;
+ int i, rc;
u32 ctrl;
- int i;
union {
u64 value;
unsigned char target_id[8];
} target_list;
+ if (is_endpoint_decoder(&cxld->dev))
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
+
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
+ committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
+ cxld->commit = cxl_decoder_commit;
+ cxld->reset = cxl_decoder_reset;
- if (!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED))
+ if (!committed)
size = 0;
if (base == U64_MAX || size == U64_MAX) {
dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
@@ -172,39 +707,77 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
return -ENXIO;
}
- cxld->decoder_range = (struct range) {
+ cxld->hpa_range = (struct range) {
.start = base,
.end = base + size - 1,
};
- /* switch decoders are always enabled if committed */
- if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) {
+ /* decoders are enabled if committed */
+ if (committed) {
cxld->flags |= CXL_DECODER_F_ENABLE;
if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
cxld->flags |= CXL_DECODER_F_LOCK;
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
+ cxld->target_type = CXL_DECODER_EXPANDER;
+ else
+ cxld->target_type = CXL_DECODER_ACCELERATOR;
+ if (cxld->id != port->commit_end + 1) {
+ dev_warn(&port->dev,
+ "decoder%d.%d: Committed out of order\n",
+ port->id, cxld->id);
+ return -ENXIO;
+ }
+ port->commit_end = cxld->id;
+ } else {
+ /* unless / until type-2 drivers arrive, assume type-3 */
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
+ ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
+ }
+ cxld->target_type = CXL_DECODER_EXPANDER;
}
- cxld->interleave_ways = to_interleave_ways(ctrl);
- if (!cxld->interleave_ways) {
+ rc = cxl_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
+ &cxld->interleave_ways);
+ if (rc) {
dev_warn(&port->dev,
"decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
port->id, cxld->id, ctrl);
- return -ENXIO;
+ return rc;
}
- cxld->interleave_granularity = to_interleave_granularity(ctrl);
+ rc = cxl_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
+ &cxld->interleave_granularity);
+ if (rc)
+ return rc;
- if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
- cxld->target_type = CXL_DECODER_EXPANDER;
- else
- cxld->target_type = CXL_DECODER_ACCELERATOR;
+ if (!cxled) {
+ target_list.value =
+ ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
+ for (i = 0; i < cxld->interleave_ways; i++)
+ target_map[i] = target_list.target_id[i];
- if (is_endpoint_decoder(&cxld->dev))
return 0;
+ }
- target_list.value =
- ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
- for (i = 0; i < cxld->interleave_ways; i++)
- target_map[i] = target_list.target_id[i];
+ if (!committed)
+ return 0;
+ dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
+ if (remainder) {
+ dev_err(&port->dev,
+ "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
+ port->id, cxld->id, size, cxld->interleave_ways);
+ return -ENXIO;
+ }
+ skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
+ rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
+ if (rc) {
+ dev_err(&port->dev,
+ "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
+ port->id, cxld->id, *dpa_base,
+ *dpa_base + dpa_size + skip - 1, rc);
+ return rc;
+ }
+ *dpa_base += dpa_size + skip;
return 0;
}
@@ -216,7 +789,8 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
{
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
struct cxl_port *port = cxlhdm->port;
- int i, committed, failed;
+ int i, committed;
+ u64 dpa_base = 0;
u32 ctrl;
/*
@@ -236,27 +810,37 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
if (committed != cxlhdm->decoder_count)
msleep(20);
- for (i = 0, failed = 0; i < cxlhdm->decoder_count; i++) {
+ for (i = 0; i < cxlhdm->decoder_count; i++) {
int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
int rc, target_count = cxlhdm->target_count;
struct cxl_decoder *cxld;
- if (is_cxl_endpoint(port))
- cxld = cxl_endpoint_decoder_alloc(port);
- else
- cxld = cxl_switch_decoder_alloc(port, target_count);
- if (IS_ERR(cxld)) {
- dev_warn(&port->dev,
- "Failed to allocate the decoder\n");
- return PTR_ERR(cxld);
+ if (is_cxl_endpoint(port)) {
+ struct cxl_endpoint_decoder *cxled;
+
+ cxled = cxl_endpoint_decoder_alloc(port);
+ if (IS_ERR(cxled)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxled);
+ }
+ cxld = &cxled->cxld;
+ } else {
+ struct cxl_switch_decoder *cxlsd;
+
+ cxlsd = cxl_switch_decoder_alloc(port, target_count);
+ if (IS_ERR(cxlsd)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxlsd);
+ }
+ cxld = &cxlsd->cxld;
}
- rc = init_hdm_decoder(port, cxld, target_map,
- cxlhdm->regs.hdm_decoder, i);
+ rc = init_hdm_decoder(port, cxld, target_map, hdm, i, &dpa_base);
if (rc) {
put_device(&cxld->dev);
- failed++;
- continue;
+ return rc;
}
rc = add_hdm_decoder(port, cxld, target_map);
if (rc) {
@@ -266,11 +850,6 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
}
}
- if (failed == cxlhdm->decoder_count) {
- dev_err(&port->dev, "No valid decoders found\n");
- return -ENXIO;
- }
-
return 0;
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index cbf23beebebe..16176b9278b4 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -718,12 +718,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
*/
static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
{
- struct cxl_mbox_get_partition_info {
- __le64 active_volatile_cap;
- __le64 active_persistent_cap;
- __le64 next_volatile_cap;
- __le64 next_persistent_cap;
- } __packed pi;
+ struct cxl_mbox_get_partition_info pi;
int rc;
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0,
@@ -773,15 +768,6 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
cxlds->partition_align_bytes =
le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
- dev_dbg(cxlds->dev,
- "Identify Memory Device\n"
- " total_bytes = %#llx\n"
- " volatile_only_bytes = %#llx\n"
- " persistent_only_bytes = %#llx\n"
- " partition_align_bytes = %#llx\n",
- cxlds->total_bytes, cxlds->volatile_only_bytes,
- cxlds->persistent_only_bytes, cxlds->partition_align_bytes);
-
cxlds->lsa_size = le32_to_cpu(id.lsa_size);
memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
@@ -789,42 +775,63 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
}
EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
-int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
+static int add_dpa_res(struct device *dev, struct resource *parent,
+ struct resource *res, resource_size_t start,
+ resource_size_t size, const char *type)
{
int rc;
- if (cxlds->partition_align_bytes == 0) {
- cxlds->ram_range.start = 0;
- cxlds->ram_range.end = cxlds->volatile_only_bytes - 1;
- cxlds->pmem_range.start = cxlds->volatile_only_bytes;
- cxlds->pmem_range.end = cxlds->volatile_only_bytes +
- cxlds->persistent_only_bytes - 1;
+ res->name = type;
+ res->start = start;
+ res->end = start + size - 1;
+ res->flags = IORESOURCE_MEM;
+ if (resource_size(res) == 0) {
+ dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
return 0;
}
-
- rc = cxl_mem_get_partition_info(cxlds);
+ rc = request_resource(parent, res);
if (rc) {
- dev_err(cxlds->dev, "Failed to query partition information\n");
+ dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
+ res, rc);
return rc;
}
- dev_dbg(cxlds->dev,
- "Get Partition Info\n"
- " active_volatile_bytes = %#llx\n"
- " active_persistent_bytes = %#llx\n"
- " next_volatile_bytes = %#llx\n"
- " next_persistent_bytes = %#llx\n",
- cxlds->active_volatile_bytes, cxlds->active_persistent_bytes,
- cxlds->next_volatile_bytes, cxlds->next_persistent_bytes);
+ dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
+
+ return 0;
+}
- cxlds->ram_range.start = 0;
- cxlds->ram_range.end = cxlds->active_volatile_bytes - 1;
+int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
+{
+ struct device *dev = cxlds->dev;
+ int rc;
- cxlds->pmem_range.start = cxlds->active_volatile_bytes;
- cxlds->pmem_range.end =
- cxlds->active_volatile_bytes + cxlds->active_persistent_bytes - 1;
+ cxlds->dpa_res =
+ (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes);
- return 0;
+ if (cxlds->partition_align_bytes == 0) {
+ rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
+ cxlds->volatile_only_bytes, "ram");
+ if (rc)
+ return rc;
+ return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
+ cxlds->volatile_only_bytes,
+ cxlds->persistent_only_bytes, "pmem");
+ }
+
+ rc = cxl_mem_get_partition_info(cxlds);
+ if (rc) {
+ dev_err(dev, "Failed to query partition information\n");
+ return rc;
+ }
+
+ rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
+ cxlds->active_volatile_bytes, "ram");
+ if (rc)
+ return rc;
+ return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
+ cxlds->active_volatile_bytes,
+ cxlds->active_persistent_bytes, "pmem");
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
@@ -845,19 +852,11 @@ struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
-static struct dentry *cxl_debugfs;
-
void __init cxl_mbox_init(void)
{
struct dentry *mbox_debugfs;
- cxl_debugfs = debugfs_create_dir("cxl", NULL);
- mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
+ mbox_debugfs = cxl_debugfs_create_dir("mbox");
debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
&cxl_raw_allow_all);
}
-
-void cxl_mbox_exit(void)
-{
- debugfs_remove_recursive(cxl_debugfs);
-}
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index f7cdcd33504a..20ce488a7754 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -68,7 +68,7 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- unsigned long long len = range_len(&cxlds->ram_range);
+ unsigned long long len = resource_size(&cxlds->ram_res);
return sysfs_emit(buf, "%#llx\n", len);
}
@@ -81,7 +81,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- unsigned long long len = range_len(&cxlds->pmem_range);
+ unsigned long long len = resource_size(&cxlds->pmem_res);
return sysfs_emit(buf, "%#llx\n", len);
}
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index c4c99ff7b55e..9240df53ed87 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -4,6 +4,7 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/pci.h>
+#include <linux/pci-doe.h>
#include <cxlpci.h>
#include <cxlmem.h>
#include <cxl.h>
@@ -225,7 +226,6 @@ static int dvsec_range_allowed(struct device *dev, void *arg)
{
struct range *dev_range = arg;
struct cxl_decoder *cxld;
- struct range root_range;
if (!is_root_decoder(dev))
return 0;
@@ -237,12 +237,7 @@ static int dvsec_range_allowed(struct device *dev, void *arg)
if (!(cxld->flags & CXL_DECODER_F_RAM))
return 0;
- root_range = (struct range) {
- .start = cxld->platform_res.start,
- .end = cxld->platform_res.end,
- };
-
- return range_contains(&root_range, dev_range);
+ return range_contains(&cxld->hpa_range, dev_range);
}
static void disable_hdm(void *_cxlhdm)
@@ -458,3 +453,175 @@ hdm_init:
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
+
+#define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff
+#define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0
+#define CXL_DOE_TABLE_ACCESS_TABLE_TYPE 0x0000ff00
+#define CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA 0
+#define CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE 0xffff0000
+#define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff
+#define CXL_DOE_PROTOCOL_TABLE_ACCESS 2
+
+static struct pci_doe_mb *find_cdat_doe(struct device *uport)
+{
+ struct cxl_memdev *cxlmd;
+ struct cxl_dev_state *cxlds;
+ unsigned long index;
+ void *entry;
+
+ cxlmd = to_cxl_memdev(uport);
+ cxlds = cxlmd->cxlds;
+
+ xa_for_each(&cxlds->doe_mbs, index, entry) {
+ struct pci_doe_mb *cur = entry;
+
+ if (pci_doe_supports_prot(cur, PCI_DVSEC_VENDOR_ID_CXL,
+ CXL_DOE_PROTOCOL_TABLE_ACCESS))
+ return cur;
+ }
+
+ return NULL;
+}
+
+#define CDAT_DOE_REQ(entry_handle) \
+ (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
+ CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
+ FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
+ CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \
+ FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle)))
+
+static void cxl_doe_task_complete(struct pci_doe_task *task)
+{
+ complete(task->private);
+}
+
+struct cdat_doe_task {
+ u32 request_pl;
+ u32 response_pl[32];
+ struct completion c;
+ struct pci_doe_task task;
+};
+
+#define DECLARE_CDAT_DOE_TASK(req, cdt) \
+struct cdat_doe_task cdt = { \
+ .c = COMPLETION_INITIALIZER_ONSTACK(cdt.c), \
+ .request_pl = req, \
+ .task = { \
+ .prot.vid = PCI_DVSEC_VENDOR_ID_CXL, \
+ .prot.type = CXL_DOE_PROTOCOL_TABLE_ACCESS, \
+ .request_pl = &cdt.request_pl, \
+ .request_pl_sz = sizeof(cdt.request_pl), \
+ .response_pl = cdt.response_pl, \
+ .response_pl_sz = sizeof(cdt.response_pl), \
+ .complete = cxl_doe_task_complete, \
+ .private = &cdt.c, \
+ } \
+}
+
+static int cxl_cdat_get_length(struct device *dev,
+ struct pci_doe_mb *cdat_doe,
+ size_t *length)
+{
+ DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(0), t);
+ int rc;
+
+ rc = pci_doe_submit_task(cdat_doe, &t.task);
+ if (rc < 0) {
+ dev_err(dev, "DOE submit failed: %d", rc);
+ return rc;
+ }
+ wait_for_completion(&t.c);
+ if (t.task.rv < sizeof(u32))
+ return -EIO;
+
+ *length = t.response_pl[1];
+ dev_dbg(dev, "CDAT length %zu\n", *length);
+
+ return 0;
+}
+
+static int cxl_cdat_read_table(struct device *dev,
+ struct pci_doe_mb *cdat_doe,
+ struct cxl_cdat *cdat)
+{
+ size_t length = cdat->length;
+ u32 *data = cdat->table;
+ int entry_handle = 0;
+
+ do {
+ DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
+ size_t entry_dw;
+ u32 *entry;
+ int rc;
+
+ rc = pci_doe_submit_task(cdat_doe, &t.task);
+ if (rc < 0) {
+ dev_err(dev, "DOE submit failed: %d", rc);
+ return rc;
+ }
+ wait_for_completion(&t.c);
+ /* 1 DW header + 1 DW data min */
+ if (t.task.rv < (2 * sizeof(u32)))
+ return -EIO;
+
+ /* Get the CXL table access header entry handle */
+ entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
+ t.response_pl[0]);
+ entry = t.response_pl + 1;
+ entry_dw = t.task.rv / sizeof(u32);
+ /* Skip Header */
+ entry_dw -= 1;
+ entry_dw = min(length / sizeof(u32), entry_dw);
+ /* Prevent length < 1 DW from causing a buffer overflow */
+ if (entry_dw) {
+ memcpy(data, entry, entry_dw * sizeof(u32));
+ length -= entry_dw * sizeof(u32);
+ data += entry_dw;
+ }
+ } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
+
+ return 0;
+}
+
+/**
+ * read_cdat_data - Read the CDAT data on this port
+ * @port: Port to read data from
+ *
+ * This call will sleep waiting for responses from the DOE mailbox.
+ */
+void read_cdat_data(struct cxl_port *port)
+{
+ struct pci_doe_mb *cdat_doe;
+ struct device *dev = &port->dev;
+ struct device *uport = port->uport;
+ size_t cdat_length;
+ int rc;
+
+ cdat_doe = find_cdat_doe(uport);
+ if (!cdat_doe) {
+ dev_dbg(dev, "No CDAT mailbox\n");
+ return;
+ }
+
+ port->cdat_available = true;
+
+ if (cxl_cdat_get_length(dev, cdat_doe, &cdat_length)) {
+ dev_dbg(dev, "No CDAT length\n");
+ return;
+ }
+
+ port->cdat.table = devm_kzalloc(dev, cdat_length, GFP_KERNEL);
+ if (!port->cdat.table)
+ return;
+
+ port->cdat.length = cdat_length;
+ rc = cxl_cdat_read_table(dev, cdat_doe, &port->cdat);
+ if (rc) {
+ /* Don't leave table data allocated on error */
+ devm_kfree(dev, port->cdat.table);
+ port->cdat.table = NULL;
+ port->cdat.length = 0;
+ dev_err(dev, "CDAT data read error\n");
+ }
+}
+EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index bec7cfb54ebf..1d12a8206444 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -62,9 +62,9 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
return is_cxl_nvdimm_bridge(dev);
}
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *start)
{
- struct cxl_port *port = find_cxl_root(&cxl_nvd->dev);
+ struct cxl_port *port = find_cxl_root(start);
struct device *dev;
if (!port)
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index dbce99bdffab..bffde862de0b 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/memregion.h>
#include <linux/workqueue.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -42,6 +44,8 @@ static int cxl_device_id(struct device *dev)
return CXL_DEVICE_NVDIMM_BRIDGE;
if (dev->type == &cxl_nvdimm_type)
return CXL_DEVICE_NVDIMM;
+ if (dev->type == CXL_PMEM_REGION_TYPE())
+ return CXL_DEVICE_PMEM_REGION;
if (is_cxl_port(dev)) {
if (is_cxl_root(to_cxl_port(dev)))
return CXL_DEVICE_ROOT;
@@ -49,6 +53,8 @@ static int cxl_device_id(struct device *dev)
}
if (is_cxl_memdev(dev))
return CXL_DEVICE_MEMORY_EXPANDER;
+ if (dev->type == CXL_REGION_TYPE())
+ return CXL_DEVICE_REGION;
return 0;
}
@@ -73,14 +79,8 @@ static ssize_t start_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_decoder *cxld = to_cxl_decoder(dev);
- u64 start;
- if (is_root_decoder(dev))
- start = cxld->platform_res.start;
- else
- start = cxld->decoder_range.start;
-
- return sysfs_emit(buf, "%#llx\n", start);
+ return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start);
}
static DEVICE_ATTR_ADMIN_RO(start);
@@ -88,14 +88,8 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_decoder *cxld = to_cxl_decoder(dev);
- u64 size;
-
- if (is_root_decoder(dev))
- size = resource_size(&cxld->platform_res);
- else
- size = range_len(&cxld->decoder_range);
- return sysfs_emit(buf, "%#llx\n", size);
+ return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range));
}
static DEVICE_ATTR_RO(size);
@@ -131,20 +125,21 @@ static ssize_t target_type_show(struct device *dev,
}
static DEVICE_ATTR_RO(target_type);
-static ssize_t emit_target_list(struct cxl_decoder *cxld, char *buf)
+static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf)
{
+ struct cxl_decoder *cxld = &cxlsd->cxld;
ssize_t offset = 0;
int i, rc = 0;
for (i = 0; i < cxld->interleave_ways; i++) {
- struct cxl_dport *dport = cxld->target[i];
+ struct cxl_dport *dport = cxlsd->target[i];
struct cxl_dport *next = NULL;
if (!dport)
break;
if (i + 1 < cxld->interleave_ways)
- next = cxld->target[i + 1];
+ next = cxlsd->target[i + 1];
rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
next ? "," : "");
if (rc < 0)
@@ -158,15 +153,15 @@ static ssize_t emit_target_list(struct cxl_decoder *cxld, char *buf)
static ssize_t target_list_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct cxl_decoder *cxld = to_cxl_decoder(dev);
+ struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
ssize_t offset;
unsigned int seq;
int rc;
do {
- seq = read_seqbegin(&cxld->target_lock);
- rc = emit_target_list(cxld, buf);
- } while (read_seqretry(&cxld->target_lock, seq));
+ seq = read_seqbegin(&cxlsd->target_lock);
+ rc = emit_target_list(cxlsd, buf);
+ } while (read_seqretry(&cxlsd->target_lock, seq));
if (rc < 0)
return rc;
@@ -180,10 +175,121 @@ static ssize_t target_list_show(struct device *dev,
}
static DEVICE_ATTR_RO(target_list);
+static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+
+ switch (cxled->mode) {
+ case CXL_DECODER_RAM:
+ return sysfs_emit(buf, "ram\n");
+ case CXL_DECODER_PMEM:
+ return sysfs_emit(buf, "pmem\n");
+ case CXL_DECODER_NONE:
+ return sysfs_emit(buf, "none\n");
+ case CXL_DECODER_MIXED:
+ default:
+ return sysfs_emit(buf, "mixed\n");
+ }
+}
+
+static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ enum cxl_decoder_mode mode;
+ ssize_t rc;
+
+ if (sysfs_streq(buf, "pmem"))
+ mode = CXL_DECODER_PMEM;
+ else if (sysfs_streq(buf, "ram"))
+ mode = CXL_DECODER_RAM;
+ else
+ return -EINVAL;
+
+ rc = cxl_dpa_set_mode(cxled, mode);
+ if (rc)
+ return rc;
+
+ return len;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ u64 base = cxl_dpa_resource_start(cxled);
+
+ return sysfs_emit(buf, "%#llx\n", base);
+}
+static DEVICE_ATTR_RO(dpa_resource);
+
+static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ resource_size_t size = cxl_dpa_size(cxled);
+
+ return sysfs_emit(buf, "%pa\n", &size);
+}
+
+static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ unsigned long long size;
+ ssize_t rc;
+
+ rc = kstrtoull(buf, 0, &size);
+ if (rc)
+ return rc;
+
+ if (!IS_ALIGNED(size, SZ_256M))
+ return -EINVAL;
+
+ rc = cxl_dpa_free(cxled);
+ if (rc)
+ return rc;
+
+ if (size == 0)
+ return len;
+
+ rc = cxl_dpa_alloc(cxled, size);
+ if (rc)
+ return rc;
+
+ return len;
+}
+static DEVICE_ATTR_RW(dpa_size);
+
+static ssize_t interleave_granularity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+ return sysfs_emit(buf, "%d\n", cxld->interleave_granularity);
+}
+
+static DEVICE_ATTR_RO(interleave_granularity);
+
+static ssize_t interleave_ways_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+ return sysfs_emit(buf, "%d\n", cxld->interleave_ways);
+}
+
+static DEVICE_ATTR_RO(interleave_ways);
+
static struct attribute *cxl_decoder_base_attrs[] = {
&dev_attr_start.attr,
&dev_attr_size.attr,
&dev_attr_locked.attr,
+ &dev_attr_interleave_granularity.attr,
+ &dev_attr_interleave_ways.attr,
NULL,
};
@@ -197,11 +303,35 @@ static struct attribute *cxl_decoder_root_attrs[] = {
&dev_attr_cap_type2.attr,
&dev_attr_cap_type3.attr,
&dev_attr_target_list.attr,
+ SET_CXL_REGION_ATTR(create_pmem_region)
+ SET_CXL_REGION_ATTR(delete_region)
NULL,
};
+static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
+{
+ unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM;
+
+ return (cxlrd->cxlsd.cxld.flags & flags) == flags;
+}
+
+static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+ if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd))
+ return 0;
+
+ if (a == CXL_REGION_ATTR(delete_region) && !can_create_pmem(cxlrd))
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_decoder_root_attribute_group = {
.attrs = cxl_decoder_root_attrs,
+ .is_visible = cxl_root_decoder_visible,
};
static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
@@ -214,6 +344,7 @@ static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
static struct attribute *cxl_decoder_switch_attrs[] = {
&dev_attr_target_type.attr,
&dev_attr_target_list.attr,
+ SET_CXL_REGION_ATTR(region)
NULL,
};
@@ -230,6 +361,10 @@ static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
static struct attribute *cxl_decoder_endpoint_attrs[] = {
&dev_attr_target_type.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_dpa_size.attr,
+ &dev_attr_dpa_resource.attr,
+ SET_CXL_REGION_ATTR(region)
NULL,
};
@@ -244,31 +379,64 @@ static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = {
NULL,
};
-static void cxl_decoder_release(struct device *dev)
+static void __cxl_decoder_release(struct cxl_decoder *cxld)
{
- struct cxl_decoder *cxld = to_cxl_decoder(dev);
- struct cxl_port *port = to_cxl_port(dev->parent);
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
ida_free(&port->decoder_ida, cxld->id);
- kfree(cxld);
put_device(&port->dev);
}
+static void cxl_endpoint_decoder_release(struct device *dev)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+
+ __cxl_decoder_release(&cxled->cxld);
+ kfree(cxled);
+}
+
+static void cxl_switch_decoder_release(struct device *dev)
+{
+ struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
+
+ __cxl_decoder_release(&cxlsd->cxld);
+ kfree(cxlsd);
+}
+
+struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_root_decoder(dev),
+ "not a cxl_root_decoder device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
+
+static void cxl_root_decoder_release(struct device *dev)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+ if (atomic_read(&cxlrd->region_id) >= 0)
+ memregion_free(atomic_read(&cxlrd->region_id));
+ __cxl_decoder_release(&cxlrd->cxlsd.cxld);
+ kfree(cxlrd);
+}
+
static const struct device_type cxl_decoder_endpoint_type = {
.name = "cxl_decoder_endpoint",
- .release = cxl_decoder_release,
+ .release = cxl_endpoint_decoder_release,
.groups = cxl_decoder_endpoint_attribute_groups,
};
static const struct device_type cxl_decoder_switch_type = {
.name = "cxl_decoder_switch",
- .release = cxl_decoder_release,
+ .release = cxl_switch_decoder_release,
.groups = cxl_decoder_switch_attribute_groups,
};
static const struct device_type cxl_decoder_root_type = {
.name = "cxl_decoder_root",
- .release = cxl_decoder_release,
+ .release = cxl_root_decoder_release,
.groups = cxl_decoder_root_attribute_groups,
};
@@ -283,39 +451,63 @@ bool is_root_decoder(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
-bool is_cxl_decoder(struct device *dev)
+bool is_switch_decoder(struct device *dev)
{
- return dev->type && dev->type->release == cxl_decoder_release;
+ return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
}
-EXPORT_SYMBOL_NS_GPL(is_cxl_decoder, CXL);
struct cxl_decoder *to_cxl_decoder(struct device *dev)
{
- if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
+ if (dev_WARN_ONCE(dev,
+ !is_switch_decoder(dev) && !is_endpoint_decoder(dev),
"not a cxl_decoder device\n"))
return NULL;
return container_of(dev, struct cxl_decoder, dev);
}
EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
+struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev),
+ "not a cxl_endpoint_decoder device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
+
+struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_switch_decoder(dev),
+ "not a cxl_switch_decoder device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_switch_decoder, cxld.dev);
+}
+
static void cxl_ep_release(struct cxl_ep *ep)
{
- if (!ep)
- return;
- list_del(&ep->list);
put_device(ep->ep);
kfree(ep);
}
+static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
+{
+ if (!ep)
+ return;
+ xa_erase(&port->endpoints, (unsigned long) ep->ep);
+ cxl_ep_release(ep);
+}
+
static void cxl_port_release(struct device *dev)
{
struct cxl_port *port = to_cxl_port(dev);
- struct cxl_ep *ep, *_e;
+ unsigned long index;
+ struct cxl_ep *ep;
- device_lock(dev);
- list_for_each_entry_safe(ep, _e, &port->endpoints, list)
- cxl_ep_release(ep);
- device_unlock(dev);
+ xa_for_each(&port->endpoints, index, ep)
+ cxl_ep_remove(port, ep);
+ xa_destroy(&port->endpoints);
+ xa_destroy(&port->dports);
+ xa_destroy(&port->regions);
ida_free(&cxl_port_ida, port->id);
kfree(port);
}
@@ -370,7 +562,7 @@ static void unregister_port(void *_port)
lock_dev = &parent->dev;
device_lock_assert(lock_dev);
- port->uport = NULL;
+ port->dead = true;
device_unregister(&port->dev);
}
@@ -395,7 +587,7 @@ static struct lock_class_key cxl_port_key;
static struct cxl_port *cxl_port_alloc(struct device *uport,
resource_size_t component_reg_phys,
- struct cxl_port *parent_port)
+ struct cxl_dport *parent_dport)
{
struct cxl_port *port;
struct device *dev;
@@ -409,6 +601,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
if (rc < 0)
goto err;
port->id = rc;
+ port->uport = uport;
/*
* The top-level cxl_port "cxl_root" does not have a cxl_port as
@@ -417,17 +610,37 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
* description.
*/
dev = &port->dev;
- if (parent_port) {
+ if (parent_dport) {
+ struct cxl_port *parent_port = parent_dport->port;
+ struct cxl_port *iter;
+
dev->parent = &parent_port->dev;
port->depth = parent_port->depth + 1;
+ port->parent_dport = parent_dport;
+
+ /*
+ * walk to the host bridge, or the first ancestor that knows
+ * the host bridge
+ */
+ iter = port;
+ while (!iter->host_bridge &&
+ !is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+ if (iter->host_bridge)
+ port->host_bridge = iter->host_bridge;
+ else
+ port->host_bridge = iter->uport;
+ dev_dbg(uport, "host-bridge: %s\n", dev_name(port->host_bridge));
} else
dev->parent = uport;
- port->uport = uport;
port->component_reg_phys = component_reg_phys;
ida_init(&port->decoder_ida);
- INIT_LIST_HEAD(&port->dports);
- INIT_LIST_HEAD(&port->endpoints);
+ port->hdm_end = -1;
+ port->commit_end = -1;
+ xa_init(&port->dports);
+ xa_init(&port->endpoints);
+ xa_init(&port->regions);
device_initialize(dev);
lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
@@ -447,24 +660,24 @@ err:
* @host: host device for devm operations
* @uport: "physical" device implementing this upstream port
* @component_reg_phys: (optional) for configurable cxl_port instances
- * @parent_port: next hop up in the CXL memory decode hierarchy
+ * @parent_dport: next hop up in the CXL memory decode hierarchy
*/
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys,
- struct cxl_port *parent_port)
+ struct cxl_dport *parent_dport)
{
struct cxl_port *port;
struct device *dev;
int rc;
- port = cxl_port_alloc(uport, component_reg_phys, parent_port);
+ port = cxl_port_alloc(uport, component_reg_phys, parent_dport);
if (IS_ERR(port))
return port;
dev = &port->dev;
if (is_cxl_memdev(uport))
rc = dev_set_name(dev, "endpoint%d", port->id);
- else if (parent_port)
+ else if (parent_dport)
rc = dev_set_name(dev, "port%d", port->id);
else
rc = dev_set_name(dev, "root%d", port->id);
@@ -556,17 +769,13 @@ static int match_root_child(struct device *dev, const void *match)
return 0;
port = to_cxl_port(dev);
- device_lock(dev);
- list_for_each_entry(dport, &port->dports, list) {
- iter = match;
- while (iter) {
- if (iter == dport->dport)
- goto out;
- iter = iter->parent;
- }
+ iter = match;
+ while (iter) {
+ dport = cxl_find_dport_by_dev(port, iter);
+ if (dport)
+ break;
+ iter = iter->parent;
}
-out:
- device_unlock(dev);
return !!iter;
}
@@ -590,9 +799,10 @@ EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
struct cxl_dport *dport;
+ unsigned long index;
device_lock_assert(&port->dev);
- list_for_each_entry (dport, &port->dports, list)
+ xa_for_each(&port->dports, index, dport)
if (dport->port_id == id)
return dport;
return NULL;
@@ -604,15 +814,15 @@ static int add_dport(struct cxl_port *port, struct cxl_dport *new)
device_lock_assert(&port->dev);
dup = find_dport(port, new->port_id);
- if (dup)
+ if (dup) {
dev_err(&port->dev,
"unable to add dport%d-%s non-unique port id (%s)\n",
new->port_id, dev_name(new->dport),
dev_name(dup->dport));
- else
- list_add_tail(&new->list, &port->dports);
-
- return dup ? -EEXIST : 0;
+ return -EBUSY;
+ }
+ return xa_insert(&port->dports, (unsigned long)new->dport, new,
+ GFP_KERNEL);
}
/*
@@ -639,10 +849,8 @@ static void cxl_dport_remove(void *data)
struct cxl_dport *dport = data;
struct cxl_port *port = dport->port;
+ xa_erase(&port->dports, (unsigned long) dport->dport);
put_device(dport->dport);
- cond_cxl_root_lock(port);
- list_del(&dport->list);
- cond_cxl_root_unlock(port);
}
static void cxl_dport_unlink(void *data)
@@ -694,7 +902,6 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
if (!dport)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&dport->list);
dport->dport = dport_dev;
dport->port_id = port_id;
dport->component_reg_phys = component_reg_phys;
@@ -723,44 +930,33 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
-static struct cxl_ep *find_ep(struct cxl_port *port, struct device *ep_dev)
-{
- struct cxl_ep *ep;
-
- device_lock_assert(&port->dev);
- list_for_each_entry(ep, &port->endpoints, list)
- if (ep->ep == ep_dev)
- return ep;
- return NULL;
-}
-
-static int add_ep(struct cxl_port *port, struct cxl_ep *new)
+static int add_ep(struct cxl_ep *new)
{
- struct cxl_ep *dup;
+ struct cxl_port *port = new->dport->port;
+ int rc;
device_lock(&port->dev);
if (port->dead) {
device_unlock(&port->dev);
return -ENXIO;
}
- dup = find_ep(port, new->ep);
- if (!dup)
- list_add_tail(&new->list, &port->endpoints);
+ rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
+ GFP_KERNEL);
device_unlock(&port->dev);
- return dup ? -EEXIST : 0;
+ return rc;
}
/**
* cxl_add_ep - register an endpoint's interest in a port
- * @port: a port in the endpoint's topology ancestry
+ * @dport: the dport that routes to @ep_dev
* @ep_dev: device representing the endpoint
*
* Intermediate CXL ports are scanned based on the arrival of endpoints.
* When those endpoints depart the port can be destroyed once all
* endpoints that care about that port have been removed.
*/
-static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev)
+static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
{
struct cxl_ep *ep;
int rc;
@@ -769,10 +965,10 @@ static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev)
if (!ep)
return -ENOMEM;
- INIT_LIST_HEAD(&ep->list);
ep->ep = get_device(ep_dev);
+ ep->dport = dport;
- rc = add_ep(port, ep);
+ rc = add_ep(ep);
if (rc)
cxl_ep_release(ep);
return rc;
@@ -781,11 +977,13 @@ static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev)
struct cxl_find_port_ctx {
const struct device *dport_dev;
const struct cxl_port *parent_port;
+ struct cxl_dport **dport;
};
static int match_port_by_dport(struct device *dev, const void *data)
{
const struct cxl_find_port_ctx *ctx = data;
+ struct cxl_dport *dport;
struct cxl_port *port;
if (!is_cxl_port(dev))
@@ -794,7 +992,10 @@ static int match_port_by_dport(struct device *dev, const void *data)
return 0;
port = to_cxl_port(dev);
- return cxl_find_dport_by_dev(port, ctx->dport_dev) != NULL;
+ dport = cxl_find_dport_by_dev(port, ctx->dport_dev);
+ if (ctx->dport)
+ *ctx->dport = dport;
+ return dport != NULL;
}
static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
@@ -810,24 +1011,32 @@ static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
return NULL;
}
-static struct cxl_port *find_cxl_port(struct device *dport_dev)
+static struct cxl_port *find_cxl_port(struct device *dport_dev,
+ struct cxl_dport **dport)
{
struct cxl_find_port_ctx ctx = {
.dport_dev = dport_dev,
+ .dport = dport,
};
+ struct cxl_port *port;
- return __find_cxl_port(&ctx);
+ port = __find_cxl_port(&ctx);
+ return port;
}
static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
- struct device *dport_dev)
+ struct device *dport_dev,
+ struct cxl_dport **dport)
{
struct cxl_find_port_ctx ctx = {
.dport_dev = dport_dev,
.parent_port = parent_port,
+ .dport = dport,
};
+ struct cxl_port *port;
- return __find_cxl_port(&ctx);
+ port = __find_cxl_port(&ctx);
+ return port;
}
/*
@@ -851,13 +1060,13 @@ static void delete_endpoint(void *data)
struct cxl_port *parent_port;
struct device *parent;
- parent_port = cxl_mem_find_port(cxlmd);
+ parent_port = cxl_mem_find_port(cxlmd, NULL);
if (!parent_port)
goto out;
parent = &parent_port->dev;
device_lock(parent);
- if (parent->driver && endpoint->uport) {
+ if (parent->driver && !endpoint->dead) {
devm_release_action(parent, cxl_unlink_uport, endpoint);
devm_release_action(parent, unregister_port, endpoint);
}
@@ -883,21 +1092,70 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
* for a port to be unregistered is when all memdevs beneath that port have gone
* through ->remove(). This "bottom-up" removal selectively removes individual
* child ports manually. This depends on devm_cxl_add_port() to not change is
- * devm action registration order.
+ * devm action registration order, and for dports to have already been
+ * destroyed by reap_dports().
*/
-static void delete_switch_port(struct cxl_port *port, struct list_head *dports)
+static void delete_switch_port(struct cxl_port *port)
+{
+ devm_release_action(port->dev.parent, cxl_unlink_uport, port);
+ devm_release_action(port->dev.parent, unregister_port, port);
+}
+
+static void reap_dports(struct cxl_port *port)
{
- struct cxl_dport *dport, *_d;
+ struct cxl_dport *dport;
+ unsigned long index;
+
+ device_lock_assert(&port->dev);
- list_for_each_entry_safe(dport, _d, dports, list) {
+ xa_for_each(&port->dports, index, dport) {
devm_release_action(&port->dev, cxl_dport_unlink, dport);
devm_release_action(&port->dev, cxl_dport_remove, dport);
devm_kfree(&port->dev, dport);
}
- devm_release_action(port->dev.parent, cxl_unlink_uport, port);
- devm_release_action(port->dev.parent, unregister_port, port);
}
+int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
+ struct cxl_dport *parent_dport)
+{
+ struct cxl_port *parent_port = parent_dport->port;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_port *endpoint, *iter, *down;
+ int rc;
+
+ /*
+ * Now that the path to the root is established record all the
+ * intervening ports in the chain.
+ */
+ for (iter = parent_port, down = NULL; !is_cxl_root(iter);
+ down = iter, iter = to_cxl_port(iter->dev.parent)) {
+ struct cxl_ep *ep;
+
+ ep = cxl_ep_load(iter, cxlmd);
+ ep->next = down;
+ }
+
+ endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
+ cxlds->component_reg_phys, parent_dport);
+ if (IS_ERR(endpoint))
+ return PTR_ERR(endpoint);
+
+ dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev));
+
+ rc = cxl_endpoint_autoremove(cxlmd, endpoint);
+ if (rc)
+ return rc;
+
+ if (!endpoint->dev.driver) {
+ dev_err(&cxlmd->dev, "%s failed probe\n",
+ dev_name(&endpoint->dev));
+ return -ENXIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_endpoint, CXL);
+
static void cxl_detach_ep(void *data)
{
struct cxl_memdev *cxlmd = data;
@@ -906,13 +1164,13 @@ static void cxl_detach_ep(void *data)
for (iter = &cxlmd->dev; iter; iter = grandparent(iter)) {
struct device *dport_dev = grandparent(iter);
struct cxl_port *port, *parent_port;
- LIST_HEAD(reap_dports);
struct cxl_ep *ep;
+ bool died = false;
if (!dport_dev)
break;
- port = find_cxl_port(dport_dev);
+ port = find_cxl_port(dport_dev, NULL);
if (!port)
continue;
@@ -936,26 +1194,27 @@ static void cxl_detach_ep(void *data)
}
device_lock(&port->dev);
- ep = find_ep(port, &cxlmd->dev);
+ ep = cxl_ep_load(port, cxlmd);
dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
- cxl_ep_release(ep);
- if (ep && !port->dead && list_empty(&port->endpoints) &&
+ cxl_ep_remove(port, ep);
+ if (ep && !port->dead && xa_empty(&port->endpoints) &&
!is_cxl_root(parent_port)) {
/*
* This was the last ep attached to a dynamically
* enumerated port. Block new cxl_add_ep() and garbage
* collect the port.
*/
+ died = true;
port->dead = true;
- list_splice_init(&port->dports, &reap_dports);
+ reap_dports(port);
}
device_unlock(&port->dev);
- if (!list_empty(&reap_dports)) {
+ if (died) {
dev_dbg(&cxlmd->dev, "delete %s\n",
dev_name(&port->dev));
- delete_switch_port(port, &reap_dports);
+ delete_switch_port(port);
}
put_device(&port->dev);
device_unlock(&parent_port->dev);
@@ -986,6 +1245,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
{
struct device *dparent = grandparent(dport_dev);
struct cxl_port *port, *parent_port = NULL;
+ struct cxl_dport *dport, *parent_dport;
resource_size_t component_reg_phys;
int rc;
@@ -1000,7 +1260,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
return -ENXIO;
}
- parent_port = find_cxl_port(dparent);
+ parent_port = find_cxl_port(dparent, &parent_dport);
if (!parent_port) {
/* iterate to create this parent_port */
return -EAGAIN;
@@ -1015,13 +1275,14 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
goto out;
}
- port = find_cxl_port_at(parent_port, dport_dev);
+ port = find_cxl_port_at(parent_port, dport_dev, &dport);
if (!port) {
component_reg_phys = find_component_registers(uport_dev);
port = devm_cxl_add_port(&parent_port->dev, uport_dev,
- component_reg_phys, parent_port);
+ component_reg_phys, parent_dport);
+ /* retry find to pick up the new dport information */
if (!IS_ERR(port))
- get_device(&port->dev);
+ port = find_cxl_port_at(parent_port, dport_dev, &dport);
}
out:
device_unlock(&parent_port->dev);
@@ -1031,8 +1292,8 @@ out:
else {
dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
dev_name(&port->dev), dev_name(port->uport));
- rc = cxl_add_ep(port, &cxlmd->dev);
- if (rc == -EEXIST) {
+ rc = cxl_add_ep(dport, &cxlmd->dev);
+ if (rc == -EBUSY) {
/*
* "can't" happen, but this error code means
* something to the caller, so translate it.
@@ -1065,6 +1326,7 @@ retry:
for (iter = dev; iter; iter = grandparent(iter)) {
struct device *dport_dev = grandparent(iter);
struct device *uport_dev;
+ struct cxl_dport *dport;
struct cxl_port *port;
if (!dport_dev)
@@ -1080,12 +1342,12 @@ retry:
dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
dev_name(iter), dev_name(dport_dev),
dev_name(uport_dev));
- port = find_cxl_port(dport_dev);
+ port = find_cxl_port(dport_dev, &dport);
if (port) {
dev_dbg(&cxlmd->dev,
"found already registered port %s:%s\n",
dev_name(&port->dev), dev_name(port->uport));
- rc = cxl_add_ep(port, &cxlmd->dev);
+ rc = cxl_add_ep(dport, &cxlmd->dev);
/*
* If the endpoint already exists in the port's list,
@@ -1094,7 +1356,7 @@ retry:
* the parent_port lock as the current port may be being
* reaped.
*/
- if (rc && rc != -EEXIST) {
+ if (rc && rc != -EBUSY) {
put_device(&port->dev);
return rc;
}
@@ -1124,30 +1386,14 @@ retry:
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
-struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd)
+struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
+ struct cxl_dport **dport)
{
- return find_cxl_port(grandparent(&cxlmd->dev));
+ return find_cxl_port(grandparent(&cxlmd->dev), dport);
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
-struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
- const struct device *dev)
-{
- struct cxl_dport *dport;
-
- device_lock(&port->dev);
- list_for_each_entry(dport, &port->dports, list)
- if (dport->dport == dev) {
- device_unlock(&port->dev);
- return dport;
- }
-
- device_unlock(&port->dev);
- return NULL;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_find_dport_by_dev, CXL);
-
-static int decoder_populate_targets(struct cxl_decoder *cxld,
+static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
struct cxl_port *port, int *target_map)
{
int i, rc = 0;
@@ -1157,88 +1403,92 @@ static int decoder_populate_targets(struct cxl_decoder *cxld,
device_lock_assert(&port->dev);
- if (list_empty(&port->dports))
+ if (xa_empty(&port->dports))
return -EINVAL;
- write_seqlock(&cxld->target_lock);
- for (i = 0; i < cxld->nr_targets; i++) {
+ write_seqlock(&cxlsd->target_lock);
+ for (i = 0; i < cxlsd->nr_targets; i++) {
struct cxl_dport *dport = find_dport(port, target_map[i]);
if (!dport) {
rc = -ENXIO;
break;
}
- cxld->target[i] = dport;
+ cxlsd->target[i] = dport;
}
- write_sequnlock(&cxld->target_lock);
+ write_sequnlock(&cxlsd->target_lock);
return rc;
}
+static struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
+{
+ struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
+ struct cxl_decoder *cxld = &cxlsd->cxld;
+ int iw;
+
+ iw = cxld->interleave_ways;
+ if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets,
+ "misconfigured root decoder\n"))
+ return NULL;
+
+ return cxlrd->cxlsd.target[pos % iw];
+}
+
static struct lock_class_key cxl_decoder_key;
/**
- * cxl_decoder_alloc - Allocate a new CXL decoder
+ * cxl_decoder_init - Common decoder setup / initialization
* @port: owning port of this decoder
- * @nr_targets: downstream targets accessible by this decoder. All upstream
- * ports and root ports must have at least 1 target. Endpoint
- * devices will have 0 targets. Callers wishing to register an
- * endpoint device should specify 0.
- *
- * A port should contain one or more decoders. Each of those decoders enable
- * some address space for CXL.mem utilization. A decoder is expected to be
- * configured by the caller before registering.
+ * @cxld: common decoder properties to initialize
*
- * Return: A new cxl decoder to be registered by cxl_decoder_add(). The decoder
- * is initialized to be a "passthrough" decoder.
+ * A port may contain one or more decoders. Each of those decoders
+ * enable some address space for CXL.mem utilization. A decoder is
+ * expected to be configured by the caller before registering via
+ * cxl_decoder_add()
*/
-static struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets)
+static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
{
- struct cxl_decoder *cxld;
struct device *dev;
- int rc = 0;
-
- if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
- return ERR_PTR(-EINVAL);
-
- cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
- if (!cxld)
- return ERR_PTR(-ENOMEM);
+ int rc;
rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
if (rc < 0)
- goto err;
+ return rc;
/* need parent to stick around to release the id */
get_device(&port->dev);
cxld->id = rc;
- cxld->nr_targets = nr_targets;
- seqlock_init(&cxld->target_lock);
dev = &cxld->dev;
device_initialize(dev);
lockdep_set_class(&dev->mutex, &cxl_decoder_key);
device_set_pm_not_required(dev);
dev->parent = &port->dev;
dev->bus = &cxl_bus_type;
- if (is_cxl_root(port))
- cxld->dev.type = &cxl_decoder_root_type;
- else if (is_cxl_endpoint(port))
- cxld->dev.type = &cxl_decoder_endpoint_type;
- else
- cxld->dev.type = &cxl_decoder_switch_type;
/* Pre initialize an "empty" decoder */
cxld->interleave_ways = 1;
cxld->interleave_granularity = PAGE_SIZE;
cxld->target_type = CXL_DECODER_EXPANDER;
- cxld->platform_res = (struct resource)DEFINE_RES_MEM(0, 0);
+ cxld->hpa_range = (struct range) {
+ .start = 0,
+ .end = -1,
+ };
- return cxld;
-err:
- kfree(cxld);
- return ERR_PTR(rc);
+ return 0;
+}
+
+static int cxl_switch_decoder_init(struct cxl_port *port,
+ struct cxl_switch_decoder *cxlsd,
+ int nr_targets)
+{
+ if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
+ return -EINVAL;
+
+ cxlsd->nr_targets = nr_targets;
+ seqlock_init(&cxlsd->target_lock);
+ return cxl_decoder_init(port, &cxlsd->cxld);
}
/**
@@ -1251,13 +1501,46 @@ err:
* firmware description of CXL resources into a CXL standard decode
* topology.
*/
-struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets)
+struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets)
{
+ struct cxl_root_decoder *cxlrd;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_decoder *cxld;
+ int rc;
+
if (!is_cxl_root(port))
return ERR_PTR(-EINVAL);
- return cxl_decoder_alloc(port, nr_targets);
+ cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets),
+ GFP_KERNEL);
+ if (!cxlrd)
+ return ERR_PTR(-ENOMEM);
+
+ cxlsd = &cxlrd->cxlsd;
+ rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
+ if (rc) {
+ kfree(cxlrd);
+ return ERR_PTR(rc);
+ }
+
+ cxlrd->calc_hb = cxl_hb_modulo;
+
+ cxld = &cxlsd->cxld;
+ cxld->dev.type = &cxl_decoder_root_type;
+ /*
+ * cxl_root_decoder_release() special cases negative ids to
+ * detect memregion_alloc() failures.
+ */
+ atomic_set(&cxlrd->region_id, -1);
+ rc = memregion_alloc(GFP_KERNEL);
+ if (rc < 0) {
+ put_device(&cxld->dev);
+ return ERR_PTR(rc);
+ }
+
+ atomic_set(&cxlrd->region_id, rc);
+ return cxlrd;
}
EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
@@ -1272,13 +1555,29 @@ EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
* that sit between Switch Upstream Ports / Switch Downstream Ports and
* Host Bridges / Root Ports.
*/
-struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets)
+struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets)
{
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_decoder *cxld;
+ int rc;
+
if (is_cxl_root(port) || is_cxl_endpoint(port))
return ERR_PTR(-EINVAL);
- return cxl_decoder_alloc(port, nr_targets);
+ cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL);
+ if (!cxlsd)
+ return ERR_PTR(-ENOMEM);
+
+ rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
+ if (rc) {
+ kfree(cxlsd);
+ return ERR_PTR(rc);
+ }
+
+ cxld = &cxlsd->cxld;
+ cxld->dev.type = &cxl_decoder_switch_type;
+ return cxlsd;
}
EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
@@ -1288,18 +1587,35 @@ EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
*
* Return: A new cxl decoder to be registered by cxl_decoder_add()
*/
-struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
+struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
{
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_decoder *cxld;
+ int rc;
+
if (!is_cxl_endpoint(port))
return ERR_PTR(-EINVAL);
- return cxl_decoder_alloc(port, 0);
+ cxled = kzalloc(sizeof(*cxled), GFP_KERNEL);
+ if (!cxled)
+ return ERR_PTR(-ENOMEM);
+
+ cxled->pos = -1;
+ cxld = &cxled->cxld;
+ rc = cxl_decoder_init(port, cxld);
+ if (rc) {
+ kfree(cxled);
+ return ERR_PTR(rc);
+ }
+
+ cxld->dev.type = &cxl_decoder_endpoint_type;
+ return cxled;
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
/**
* cxl_decoder_add_locked - Add a decoder with targets
- * @cxld: The cxl decoder allocated by cxl_decoder_alloc()
+ * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
* @target_map: A list of downstream ports that this decoder can direct memory
* traffic to. These numbers should correspond with the port number
* in the PCIe Link Capabilities structure.
@@ -1335,7 +1651,9 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
port = to_cxl_port(cxld->dev.parent);
if (!is_endpoint_decoder(dev)) {
- rc = decoder_populate_targets(cxld, port, target_map);
+ struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
+
+ rc = decoder_populate_targets(cxlsd, port, target_map);
if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
dev_err(&port->dev,
"Failed to populate active decoder targets\n");
@@ -1347,20 +1665,13 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
if (rc)
return rc;
- /*
- * Platform decoder resources should show up with a reasonable name. All
- * other resources are just sub ranges within the main decoder resource.
- */
- if (is_root_decoder(dev))
- cxld->platform_res.name = dev_name(dev);
-
return device_add(dev);
}
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
/**
* cxl_decoder_add - Add a decoder with targets
- * @cxld: The cxl decoder allocated by cxl_decoder_alloc()
+ * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
* @target_map: A list of downstream ports that this decoder can direct memory
* traffic to. These numbers should correspond with the port number
* in the PCIe Link Capabilities structure.
@@ -1394,6 +1705,13 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
static void cxld_unregister(void *dev)
{
+ struct cxl_endpoint_decoder *cxled;
+
+ if (is_endpoint_decoder(dev)) {
+ cxled = to_cxl_endpoint_decoder(dev);
+ cxl_decoder_kill_region(cxled);
+ }
+
device_unregister(dev);
}
@@ -1521,10 +1839,20 @@ struct bus_type cxl_bus_type = {
};
EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
+static struct dentry *cxl_debugfs;
+
+struct dentry *cxl_debugfs_create_dir(const char *dir)
+{
+ return debugfs_create_dir(dir, cxl_debugfs);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
+
static __init int cxl_core_init(void)
{
int rc;
+ cxl_debugfs = debugfs_create_dir("cxl", NULL);
+
cxl_mbox_init();
rc = cxl_memdev_init();
@@ -1541,22 +1869,28 @@ static __init int cxl_core_init(void)
if (rc)
goto err_bus;
+ rc = cxl_region_init();
+ if (rc)
+ goto err_region;
+
return 0;
+err_region:
+ bus_unregister(&cxl_bus_type);
err_bus:
destroy_workqueue(cxl_bus_wq);
err_wq:
cxl_memdev_exit();
- cxl_mbox_exit();
return rc;
}
static void cxl_core_exit(void)
{
+ cxl_region_exit();
bus_unregister(&cxl_bus_type);
destroy_workqueue(cxl_bus_wq);
cxl_memdev_exit();
- cxl_mbox_exit();
+ debugfs_remove_recursive(cxl_debugfs);
}
module_init(cxl_core_init);
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
new file mode 100644
index 000000000000..401148016978
--- /dev/null
+++ b/drivers/cxl/core/region.c
@@ -0,0 +1,1896 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#include <linux/memregion.h>
+#include <linux/genalloc.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/idr.h>
+#include <cxlmem.h>
+#include <cxl.h>
+#include "core.h"
+
+/**
+ * DOC: cxl core region
+ *
+ * CXL Regions represent mapped memory capacity in system physical address
+ * space. Whereas the CXL Root Decoders identify the bounds of potential CXL
+ * Memory ranges, Regions represent the active mapped capacity by the HDM
+ * Decoder Capability structures throughout the Host Bridges, Switches, and
+ * Endpoints in the topology.
+ *
+ * Region configuration has ordering constraints. UUID may be set at any time
+ * but is only visible for persistent regions.
+ * 1. Interleave granularity
+ * 2. Interleave size
+ * 3. Decoder targets
+ */
+
+/*
+ * All changes to the interleave configuration occur with this lock held
+ * for write.
+ */
+static DECLARE_RWSEM(cxl_region_rwsem);
+
+static struct cxl_region *to_cxl_region(struct device *dev);
+
+static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static int is_dup(struct device *match, void *data)
+{
+ struct cxl_region_params *p;
+ struct cxl_region *cxlr;
+ uuid_t *uuid = data;
+
+ if (!is_cxl_region(match))
+ return 0;
+
+ lockdep_assert_held(&cxl_region_rwsem);
+ cxlr = to_cxl_region(match);
+ p = &cxlr->params;
+
+ if (uuid_equal(&p->uuid, uuid)) {
+ dev_dbg(match, "already has uuid: %pUb\n", uuid);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ uuid_t temp;
+ ssize_t rc;
+
+ if (len != UUID_STRING_LEN + 1)
+ return -EINVAL;
+
+ rc = uuid_parse(buf, &temp);
+ if (rc)
+ return rc;
+
+ if (uuid_is_null(&temp))
+ return -EINVAL;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (uuid_equal(&p->uuid, &temp))
+ goto out;
+
+ rc = -EBUSY;
+ if (p->state >= CXL_CONFIG_ACTIVE)
+ goto out;
+
+ rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
+ if (rc < 0)
+ goto out;
+
+ uuid_copy(&p->uuid, &temp);
+out:
+ up_write(&cxl_region_rwsem);
+
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(uuid);
+
+static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ return xa_load(&port->regions, (unsigned long)cxlr);
+}
+
+static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int i;
+
+ for (i = count - 1; i >= 0; i--) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *iter = cxled_to_port(cxled);
+ struct cxl_ep *ep;
+ int rc;
+
+ while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+
+ for (ep = cxl_ep_load(iter, cxlmd); iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+ struct cxl_region_ref *cxl_rr;
+ struct cxl_decoder *cxld;
+
+ cxl_rr = cxl_rr_load(iter, cxlr);
+ cxld = cxl_rr->decoder;
+ rc = cxld->reset(cxld);
+ if (rc)
+ return rc;
+ }
+
+ rc = cxled->cxld.reset(&cxled->cxld);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int cxl_region_decode_commit(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int i, rc = 0;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_region_ref *cxl_rr;
+ struct cxl_decoder *cxld;
+ struct cxl_port *iter;
+ struct cxl_ep *ep;
+
+ /* commit bottom up */
+ for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent)) {
+ cxl_rr = cxl_rr_load(iter, cxlr);
+ cxld = cxl_rr->decoder;
+ rc = cxld->commit(cxld);
+ if (rc)
+ break;
+ }
+
+ if (rc) {
+ /* programming @iter failed, teardown */
+ for (ep = cxl_ep_load(iter, cxlmd); ep && iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+ cxl_rr = cxl_rr_load(iter, cxlr);
+ cxld = cxl_rr->decoder;
+ cxld->reset(cxld);
+ }
+
+ cxled->cxld.reset(&cxled->cxld);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ /* undo the targets that were successfully committed */
+ cxl_region_decode_reset(cxlr, i);
+ return rc;
+}
+
+static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ bool commit;
+ ssize_t rc;
+
+ rc = kstrtobool(buf, &commit);
+ if (rc)
+ return rc;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ /* Already in the requested state? */
+ if (commit && p->state >= CXL_CONFIG_COMMIT)
+ goto out;
+ if (!commit && p->state < CXL_CONFIG_COMMIT)
+ goto out;
+
+ /* Not ready to commit? */
+ if (commit && p->state < CXL_CONFIG_ACTIVE) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (commit)
+ rc = cxl_region_decode_commit(cxlr);
+ else {
+ p->state = CXL_CONFIG_RESET_PENDING;
+ up_write(&cxl_region_rwsem);
+ device_release_driver(&cxlr->dev);
+ down_write(&cxl_region_rwsem);
+
+ /*
+ * The lock was dropped, so need to revalidate that the reset is
+ * still pending.
+ */
+ if (p->state == CXL_CONFIG_RESET_PENDING)
+ rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
+ }
+
+ if (rc)
+ goto out;
+
+ if (commit)
+ p->state = CXL_CONFIG_COMMIT;
+ else if (p->state == CXL_CONFIG_RESET_PENDING)
+ p->state = CXL_CONFIG_ACTIVE;
+
+out:
+ up_write(&cxl_region_rwsem);
+
+ if (rc)
+ return rc;
+ return len;
+}
+
+static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+static DEVICE_ATTR_RW(commit);
+
+static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
+ return 0;
+ return a->mode;
+}
+
+static ssize_t interleave_ways_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static const struct attribute_group *get_cxl_region_target_group(void);
+
+static ssize_t interleave_ways_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ unsigned int val, save;
+ int rc;
+ u8 iw;
+
+ rc = kstrtouint(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ rc = ways_to_cxl(val, &iw);
+ if (rc)
+ return rc;
+
+ /*
+ * Even for x3, x9, and x12 interleaves the region interleave must be a
+ * power of 2 multiple of the host bridge interleave.
+ */
+ if (!is_power_of_2(val / cxld->interleave_ways) ||
+ (val % cxld->interleave_ways)) {
+ dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val);
+ return -EINVAL;
+ }
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ save = p->interleave_ways;
+ p->interleave_ways = val;
+ rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
+ if (rc)
+ p->interleave_ways = save;
+out:
+ up_write(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(interleave_ways);
+
+static ssize_t interleave_granularity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static ssize_t interleave_granularity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ int rc, val;
+ u16 ig;
+
+ rc = kstrtoint(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ rc = granularity_to_cxl(val, &ig);
+ if (rc)
+ return rc;
+
+ /*
+ * When the host-bridge is interleaved, disallow region granularity !=
+ * root granularity. Regions with a granularity less than the root
+ * interleave result in needing multiple endpoints to support a single
+ * slot in the interleave (possible to suport in the future). Regions
+ * with a granularity greater than the root interleave result in invalid
+ * DPA translations (invalid to support).
+ */
+ if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
+ return -EINVAL;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ p->interleave_granularity = val;
+out:
+ up_write(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(interleave_granularity);
+
+static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ u64 resource = -1ULL;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->res)
+ resource = p->res->start;
+ rc = sysfs_emit(buf, "%#llx\n", resource);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(resource);
+
+static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_region_params *p = &cxlr->params;
+ struct resource *res;
+ u32 remainder = 0;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ /* Nothing to do... */
+ if (p->res && resource_size(p->res) == size)
+ return 0;
+
+ /* To change size the old size must be freed first */
+ if (p->res)
+ return -EBUSY;
+
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
+ return -EBUSY;
+
+ /* ways, granularity and uuid (if PMEM) need to be set before HPA */
+ if (!p->interleave_ways || !p->interleave_granularity ||
+ (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
+ return -ENXIO;
+
+ div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder);
+ if (remainder)
+ return -EINVAL;
+
+ res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
+ dev_name(&cxlr->dev));
+ if (IS_ERR(res)) {
+ dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n",
+ PTR_ERR(res));
+ return PTR_ERR(res);
+ }
+
+ p->res = res;
+ p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
+
+ return 0;
+}
+
+static void cxl_region_iomem_release(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+
+ if (device_is_registered(&cxlr->dev))
+ lockdep_assert_held_write(&cxl_region_rwsem);
+ if (p->res) {
+ remove_resource(p->res);
+ kfree(p->res);
+ p->res = NULL;
+ }
+}
+
+static int free_hpa(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ if (!p->res)
+ return 0;
+
+ if (p->state >= CXL_CONFIG_ACTIVE)
+ return -EBUSY;
+
+ cxl_region_iomem_release(cxlr);
+ p->state = CXL_CONFIG_IDLE;
+ return 0;
+}
+
+static ssize_t size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ u64 val;
+ int rc;
+
+ rc = kstrtou64(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (val)
+ rc = alloc_hpa(cxlr, val);
+ else
+ rc = free_hpa(cxlr);
+ up_write(&cxl_region_rwsem);
+
+ if (rc)
+ return rc;
+
+ return len;
+}
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ u64 size = 0;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->res)
+ size = resource_size(p->res);
+ rc = sysfs_emit(buf, "%#llx\n", size);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+static DEVICE_ATTR_RW(size);
+
+static struct attribute *cxl_region_attrs[] = {
+ &dev_attr_uuid.attr,
+ &dev_attr_commit.attr,
+ &dev_attr_interleave_ways.attr,
+ &dev_attr_interleave_granularity.attr,
+ &dev_attr_resource.attr,
+ &dev_attr_size.attr,
+ NULL,
+};
+
+static const struct attribute_group cxl_region_group = {
+ .attrs = cxl_region_attrs,
+ .is_visible = cxl_region_visible,
+};
+
+static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled;
+ int rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
+ p->interleave_ways);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ cxled = p->targets[pos];
+ if (!cxled)
+ rc = sysfs_emit(buf, "\n");
+ else
+ rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
+out:
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static int match_free_decoder(struct device *dev, void *data)
+{
+ struct cxl_decoder *cxld;
+ int *id = data;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxld = to_cxl_decoder(dev);
+
+ /* enforce ordered allocation */
+ if (cxld->id != *id)
+ return 0;
+
+ if (!cxld->region)
+ return 1;
+
+ (*id)++;
+
+ return 0;
+}
+
+static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ struct device *dev;
+ int id = 0;
+
+ dev = device_find_child(&port->dev, &id, match_free_decoder);
+ if (!dev)
+ return NULL;
+ /*
+ * This decoder is pinned registered as long as the endpoint decoder is
+ * registered, and endpoint decoder unregistration holds the
+ * cxl_region_rwsem over unregister events, so no need to hold on to
+ * this extra reference.
+ */
+ put_device(dev);
+ return to_cxl_decoder(dev);
+}
+
+static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_region_ref *cxl_rr, *iter;
+ unsigned long index;
+ int rc;
+
+ xa_for_each(&port->regions, index, iter) {
+ struct cxl_region_params *ip = &iter->region->params;
+
+ if (ip->res->start > p->res->start) {
+ dev_dbg(&cxlr->dev,
+ "%s: HPA order violation %s:%pr vs %pr\n",
+ dev_name(&port->dev),
+ dev_name(&iter->region->dev), ip->res, p->res);
+ return ERR_PTR(-EBUSY);
+ }
+ }
+
+ cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
+ if (!cxl_rr)
+ return ERR_PTR(-ENOMEM);
+ cxl_rr->port = port;
+ cxl_rr->region = cxlr;
+ cxl_rr->nr_targets = 1;
+ xa_init(&cxl_rr->endpoints);
+
+ rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
+ if (rc) {
+ dev_dbg(&cxlr->dev,
+ "%s: failed to track region reference: %d\n",
+ dev_name(&port->dev), rc);
+ kfree(cxl_rr);
+ return ERR_PTR(rc);
+ }
+
+ return cxl_rr;
+}
+
+static void free_region_ref(struct cxl_region_ref *cxl_rr)
+{
+ struct cxl_port *port = cxl_rr->port;
+ struct cxl_region *cxlr = cxl_rr->region;
+ struct cxl_decoder *cxld = cxl_rr->decoder;
+
+ dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n");
+ if (cxld->region == cxlr) {
+ cxld->region = NULL;
+ put_device(&cxlr->dev);
+ }
+
+ xa_erase(&port->regions, (unsigned long)cxlr);
+ xa_destroy(&cxl_rr->endpoints);
+ kfree(cxl_rr);
+}
+
+static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ int rc;
+ struct cxl_port *port = cxl_rr->port;
+ struct cxl_region *cxlr = cxl_rr->region;
+ struct cxl_decoder *cxld = cxl_rr->decoder;
+ struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
+
+ if (ep) {
+ rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+ }
+ cxl_rr->nr_eps++;
+
+ if (!cxld->region) {
+ cxld->region = cxlr;
+ get_device(&cxlr->dev);
+ }
+
+ return 0;
+}
+
+/**
+ * cxl_port_attach_region() - track a region's interest in a port by endpoint
+ * @port: port to add a new region reference 'struct cxl_region_ref'
+ * @cxlr: region to attach to @port
+ * @cxled: endpoint decoder used to create or further pin a region reference
+ * @pos: interleave position of @cxled in @cxlr
+ *
+ * The attach event is an opportunity to validate CXL decode setup
+ * constraints and record metadata needed for programming HDM decoders,
+ * in particular decoder target lists.
+ *
+ * The steps are:
+ *
+ * - validate that there are no other regions with a higher HPA already
+ * associated with @port
+ * - establish a region reference if one is not already present
+ *
+ * - additionally allocate a decoder instance that will host @cxlr on
+ * @port
+ *
+ * - pin the region reference by the endpoint
+ * - account for how many entries in @port's target list are needed to
+ * cover all of the added endpoints.
+ */
+static int cxl_port_attach_region(struct cxl_port *port,
+ struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
+ struct cxl_region_ref *cxl_rr;
+ bool nr_targets_inc = false;
+ struct cxl_decoder *cxld;
+ unsigned long index;
+ int rc = -EBUSY;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ cxl_rr = cxl_rr_load(port, cxlr);
+ if (cxl_rr) {
+ struct cxl_ep *ep_iter;
+ int found = 0;
+
+ /*
+ * Walk the existing endpoints that have been attached to
+ * @cxlr at @port and see if they share the same 'next' port
+ * in the downstream direction. I.e. endpoints that share common
+ * upstream switch.
+ */
+ xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
+ if (ep_iter == ep)
+ continue;
+ if (ep_iter->next == ep->next) {
+ found++;
+ break;
+ }
+ }
+
+ /*
+ * New target port, or @port is an endpoint port that always
+ * accounts its own local decode as a target.
+ */
+ if (!found || !ep->next) {
+ cxl_rr->nr_targets++;
+ nr_targets_inc = true;
+ }
+
+ /*
+ * The decoder for @cxlr was allocated when the region was first
+ * attached to @port.
+ */
+ cxld = cxl_rr->decoder;
+ } else {
+ cxl_rr = alloc_region_ref(port, cxlr);
+ if (IS_ERR(cxl_rr)) {
+ dev_dbg(&cxlr->dev,
+ "%s: failed to allocate region reference\n",
+ dev_name(&port->dev));
+ return PTR_ERR(cxl_rr);
+ }
+ nr_targets_inc = true;
+
+ if (port == cxled_to_port(cxled))
+ cxld = &cxled->cxld;
+ else
+ cxld = cxl_region_find_decoder(port, cxlr);
+ if (!cxld) {
+ dev_dbg(&cxlr->dev, "%s: no decoder available\n",
+ dev_name(&port->dev));
+ goto out_erase;
+ }
+
+ if (cxld->region) {
+ dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
+ dev_name(&port->dev), dev_name(&cxld->dev),
+ dev_name(&cxld->region->dev));
+ rc = -EBUSY;
+ goto out_erase;
+ }
+
+ cxl_rr->decoder = cxld;
+ }
+
+ rc = cxl_rr_ep_add(cxl_rr, cxled);
+ if (rc) {
+ dev_dbg(&cxlr->dev,
+ "%s: failed to track endpoint %s:%s reference\n",
+ dev_name(&port->dev), dev_name(&cxlmd->dev),
+ dev_name(&cxld->dev));
+ goto out_erase;
+ }
+
+ dev_dbg(&cxlr->dev,
+ "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxld->dev), dev_name(&cxlmd->dev),
+ dev_name(&cxled->cxld.dev), pos,
+ ep ? ep->next ? dev_name(ep->next->uport) :
+ dev_name(&cxlmd->dev) :
+ "none",
+ cxl_rr->nr_eps, cxl_rr->nr_targets);
+
+ return 0;
+out_erase:
+ if (nr_targets_inc)
+ cxl_rr->nr_targets--;
+ if (cxl_rr->nr_eps == 0)
+ free_region_ref(cxl_rr);
+ return rc;
+}
+
+static void cxl_port_detach_region(struct cxl_port *port,
+ struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_region_ref *cxl_rr;
+ struct cxl_ep *ep = NULL;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ cxl_rr = cxl_rr_load(port, cxlr);
+ if (!cxl_rr)
+ return;
+
+ /*
+ * Endpoint ports do not carry cxl_ep references, and they
+ * never target more than one endpoint by definition
+ */
+ if (cxl_rr->decoder == &cxled->cxld)
+ cxl_rr->nr_eps--;
+ else
+ ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
+ if (ep) {
+ struct cxl_ep *ep_iter;
+ unsigned long index;
+ int found = 0;
+
+ cxl_rr->nr_eps--;
+ xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
+ if (ep_iter->next == ep->next) {
+ found++;
+ break;
+ }
+ }
+ if (!found)
+ cxl_rr->nr_targets--;
+ }
+
+ if (cxl_rr->nr_eps == 0)
+ free_region_ref(cxl_rr);
+}
+
+static int check_last_peer(struct cxl_endpoint_decoder *cxled,
+ struct cxl_ep *ep, struct cxl_region_ref *cxl_rr,
+ int distance)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_region *cxlr = cxl_rr->region;
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled_peer;
+ struct cxl_port *port = cxl_rr->port;
+ struct cxl_memdev *cxlmd_peer;
+ struct cxl_ep *ep_peer;
+ int pos = cxled->pos;
+
+ /*
+ * If this position wants to share a dport with the last endpoint mapped
+ * then that endpoint, at index 'position - distance', must also be
+ * mapped by this dport.
+ */
+ if (pos < distance) {
+ dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+ return -ENXIO;
+ }
+ cxled_peer = p->targets[pos - distance];
+ cxlmd_peer = cxled_to_memdev(cxled_peer);
+ ep_peer = cxl_ep_load(port, cxlmd_peer);
+ if (ep->dport != ep_peer->dport) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
+ dev_name(&cxlmd_peer->dev),
+ dev_name(&cxled_peer->cxld.dev));
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int cxl_port_setup_targets(struct cxl_port *port,
+ struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
+ struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
+ struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_decoder *cxld = cxl_rr->decoder;
+ struct cxl_switch_decoder *cxlsd;
+ u16 eig, peig;
+ u8 eiw, peiw;
+
+ /*
+ * While root level decoders support x3, x6, x12, switch level
+ * decoders only support powers of 2 up to x16.
+ */
+ if (!is_power_of_2(cxl_rr->nr_targets)) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ cxl_rr->nr_targets);
+ return -EINVAL;
+ }
+
+ cxlsd = to_cxl_switch_decoder(&cxld->dev);
+ if (cxl_rr->nr_targets_set) {
+ int i, distance;
+
+ distance = p->nr_targets / cxl_rr->nr_targets;
+ for (i = 0; i < cxl_rr->nr_targets_set; i++)
+ if (ep->dport == cxlsd->target[i]) {
+ rc = check_last_peer(cxled, ep, cxl_rr,
+ distance);
+ if (rc)
+ return rc;
+ goto out_target_set;
+ }
+ goto add_target;
+ }
+
+ if (is_cxl_root(parent_port)) {
+ parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
+ parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
+ /*
+ * For purposes of address bit routing, use power-of-2 math for
+ * switch ports.
+ */
+ if (!is_power_of_2(parent_iw))
+ parent_iw /= 3;
+ } else {
+ struct cxl_region_ref *parent_rr;
+ struct cxl_decoder *parent_cxld;
+
+ parent_rr = cxl_rr_load(parent_port, cxlr);
+ parent_cxld = parent_rr->decoder;
+ parent_ig = parent_cxld->interleave_granularity;
+ parent_iw = parent_cxld->interleave_ways;
+ }
+
+ rc = granularity_to_cxl(parent_ig, &peig);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
+ dev_name(parent_port->uport),
+ dev_name(&parent_port->dev), parent_ig);
+ return rc;
+ }
+
+ rc = ways_to_cxl(parent_iw, &peiw);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
+ dev_name(parent_port->uport),
+ dev_name(&parent_port->dev), parent_iw);
+ return rc;
+ }
+
+ iw = cxl_rr->nr_targets;
+ rc = ways_to_cxl(iw, &eiw);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
+ dev_name(port->uport), dev_name(&port->dev), iw);
+ return rc;
+ }
+
+ /*
+ * If @parent_port is masking address bits, pick the next unused address
+ * bit to route @port's targets.
+ */
+ if (parent_iw > 1 && cxl_rr->nr_targets > 1) {
+ u32 address_bit = max(peig + peiw, eiw + peig);
+
+ eig = address_bit - eiw + 1;
+ } else {
+ eiw = peiw;
+ eig = peig;
+ }
+
+ rc = cxl_to_granularity(eig, &ig);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ 256 << eig);
+ return rc;
+ }
+
+ cxld->interleave_ways = iw;
+ cxld->interleave_granularity = ig;
+ cxld->hpa_range = (struct range) {
+ .start = p->res->start,
+ .end = p->res->end,
+ };
+ dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport),
+ dev_name(&port->dev), iw, ig);
+add_target:
+ if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s: targets full trying to add %s:%s at %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+ return -ENXIO;
+ }
+ cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
+ inc = 1;
+out_target_set:
+ cxl_rr->nr_targets_set += inc;
+ dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+
+ return 0;
+}
+
+static void cxl_port_reset_targets(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
+ struct cxl_decoder *cxld;
+
+ /*
+ * After the last endpoint has been detached the entire cxl_rr may now
+ * be gone.
+ */
+ if (!cxl_rr)
+ return;
+ cxl_rr->nr_targets_set = 0;
+
+ cxld = cxl_rr->decoder;
+ cxld->hpa_range = (struct range) {
+ .start = 0,
+ .end = -1,
+ };
+}
+
+static void cxl_region_teardown_targets(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_memdev *cxlmd;
+ struct cxl_port *iter;
+ struct cxl_ep *ep;
+ int i;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ cxlmd = cxled_to_memdev(cxled);
+
+ iter = cxled_to_port(cxled);
+ while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+
+ for (ep = cxl_ep_load(iter, cxlmd); iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
+ cxl_port_reset_targets(iter, cxlr);
+ }
+}
+
+static int cxl_region_setup_targets(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_memdev *cxlmd;
+ struct cxl_port *iter;
+ struct cxl_ep *ep;
+ int i, rc;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ cxlmd = cxled_to_memdev(cxled);
+
+ iter = cxled_to_port(cxled);
+ while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+
+ /*
+ * Descend the topology tree programming targets while
+ * looking for conflicts.
+ */
+ for (ep = cxl_ep_load(iter, cxlmd); iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+ rc = cxl_port_setup_targets(iter, cxlr, cxled);
+ if (rc) {
+ cxl_region_teardown_targets(cxlr);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int cxl_region_attach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *ep_port, *root_port, *iter;
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_dport *dport;
+ int i, rc = -ENXIO;
+
+ if (cxled->mode == CXL_DECODER_DEAD) {
+ dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
+ return -ENODEV;
+ }
+
+ /* all full of members, or interleave config not established? */
+ if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ dev_dbg(&cxlr->dev, "region already active\n");
+ return -EBUSY;
+ } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ dev_dbg(&cxlr->dev, "interleave config missing\n");
+ return -ENXIO;
+ }
+
+ if (pos < 0 || pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
+ p->interleave_ways);
+ return -ENXIO;
+ }
+
+ if (p->targets[pos] == cxled)
+ return 0;
+
+ if (p->targets[pos]) {
+ struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
+ struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
+
+ dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
+ pos, dev_name(&cxlmd_target->dev),
+ dev_name(&cxled_target->cxld.dev));
+ return -EBUSY;
+ }
+
+ for (i = 0; i < p->interleave_ways; i++) {
+ struct cxl_endpoint_decoder *cxled_target;
+ struct cxl_memdev *cxlmd_target;
+
+ cxled_target = p->targets[pos];
+ if (!cxled_target)
+ continue;
+
+ cxlmd_target = cxled_to_memdev(cxled_target);
+ if (cxlmd_target == cxlmd) {
+ dev_dbg(&cxlr->dev,
+ "%s already specified at position %d via: %s\n",
+ dev_name(&cxlmd->dev), pos,
+ dev_name(&cxled_target->cxld.dev));
+ return -EBUSY;
+ }
+ }
+
+ ep_port = cxled_to_port(cxled);
+ root_port = cxlrd_to_port(cxlrd);
+ dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
+ if (!dport) {
+ dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ dev_name(cxlr->dev.parent));
+ return -ENXIO;
+ }
+
+ if (cxlrd->calc_hb(cxlrd, pos) != dport) {
+ dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ dev_name(&cxlrd->cxlsd.cxld.dev));
+ return -ENXIO;
+ }
+
+ if (cxled->cxld.target_type != cxlr->type) {
+ dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ cxled->cxld.target_type, cxlr->type);
+ return -ENXIO;
+ }
+
+ if (!cxled->dpa_res) {
+ dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev));
+ return -ENXIO;
+ }
+
+ if (resource_size(cxled->dpa_res) * p->interleave_ways !=
+ resource_size(p->res)) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ (u64)resource_size(cxled->dpa_res), p->interleave_ways,
+ (u64)resource_size(p->res));
+ return -EINVAL;
+ }
+
+ for (iter = ep_port; !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent)) {
+ rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
+ if (rc)
+ goto err;
+ }
+
+ p->targets[pos] = cxled;
+ cxled->pos = pos;
+ p->nr_targets++;
+
+ if (p->nr_targets == p->interleave_ways) {
+ rc = cxl_region_setup_targets(cxlr);
+ if (rc)
+ goto err_decrement;
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
+ cxled->cxld.interleave_ways = p->interleave_ways;
+ cxled->cxld.interleave_granularity = p->interleave_granularity;
+ cxled->cxld.hpa_range = (struct range) {
+ .start = p->res->start,
+ .end = p->res->end,
+ };
+
+ return 0;
+
+err_decrement:
+ p->nr_targets--;
+err:
+ for (iter = ep_port; !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent))
+ cxl_port_detach_region(iter, cxlr, cxled);
+ return rc;
+}
+
+static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
+ struct cxl_region *cxlr = cxled->cxld.region;
+ struct cxl_region_params *p;
+ int rc = 0;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ if (!cxlr)
+ return 0;
+
+ p = &cxlr->params;
+ get_device(&cxlr->dev);
+
+ if (p->state > CXL_CONFIG_ACTIVE) {
+ /*
+ * TODO: tear down all impacted regions if a device is
+ * removed out of order
+ */
+ rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
+ if (rc)
+ goto out;
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
+ for (iter = ep_port; !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent))
+ cxl_port_detach_region(iter, cxlr, cxled);
+
+ if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
+ p->targets[cxled->pos] != cxled) {
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+
+ dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ cxled->pos);
+ goto out;
+ }
+
+ if (p->state == CXL_CONFIG_ACTIVE) {
+ p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
+ cxl_region_teardown_targets(cxlr);
+ }
+ p->targets[cxled->pos] = NULL;
+ p->nr_targets--;
+ cxled->cxld.hpa_range = (struct range) {
+ .start = 0,
+ .end = -1,
+ };
+
+ /* notify the region driver that one of its targets has departed */
+ up_write(&cxl_region_rwsem);
+ device_release_driver(&cxlr->dev);
+ down_write(&cxl_region_rwsem);
+out:
+ put_device(&cxlr->dev);
+ return rc;
+}
+
+void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+{
+ down_write(&cxl_region_rwsem);
+ cxled->mode = CXL_DECODER_DEAD;
+ cxl_region_detach(cxled);
+ up_write(&cxl_region_rwsem);
+}
+
+static int attach_target(struct cxl_region *cxlr, const char *decoder, int pos)
+{
+ struct device *dev;
+ int rc;
+
+ dev = bus_find_device_by_name(&cxl_bus_type, NULL, decoder);
+ if (!dev)
+ return -ENODEV;
+
+ if (!is_endpoint_decoder(dev)) {
+ put_device(dev);
+ return -EINVAL;
+ }
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ goto out;
+ down_read(&cxl_dpa_rwsem);
+ rc = cxl_region_attach(cxlr, to_cxl_endpoint_decoder(dev), pos);
+ up_read(&cxl_dpa_rwsem);
+ up_write(&cxl_region_rwsem);
+out:
+ put_device(dev);
+ return rc;
+}
+
+static int detach_target(struct cxl_region *cxlr, int pos)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
+ p->interleave_ways);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (!p->targets[pos]) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = cxl_region_detach(p->targets[pos]);
+out:
+ up_write(&cxl_region_rwsem);
+ return rc;
+}
+
+static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
+ size_t len)
+{
+ int rc;
+
+ if (sysfs_streq(buf, "\n"))
+ rc = detach_target(cxlr, pos);
+ else
+ rc = attach_target(cxlr, buf, pos);
+
+ if (rc < 0)
+ return rc;
+ return len;
+}
+
+#define TARGET_ATTR_RW(n) \
+static ssize_t target##n##_show( \
+ struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ return show_targetN(to_cxl_region(dev), buf, (n)); \
+} \
+static ssize_t target##n##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return store_targetN(to_cxl_region(dev), buf, (n), len); \
+} \
+static DEVICE_ATTR_RW(target##n)
+
+TARGET_ATTR_RW(0);
+TARGET_ATTR_RW(1);
+TARGET_ATTR_RW(2);
+TARGET_ATTR_RW(3);
+TARGET_ATTR_RW(4);
+TARGET_ATTR_RW(5);
+TARGET_ATTR_RW(6);
+TARGET_ATTR_RW(7);
+TARGET_ATTR_RW(8);
+TARGET_ATTR_RW(9);
+TARGET_ATTR_RW(10);
+TARGET_ATTR_RW(11);
+TARGET_ATTR_RW(12);
+TARGET_ATTR_RW(13);
+TARGET_ATTR_RW(14);
+TARGET_ATTR_RW(15);
+
+static struct attribute *target_attrs[] = {
+ &dev_attr_target0.attr,
+ &dev_attr_target1.attr,
+ &dev_attr_target2.attr,
+ &dev_attr_target3.attr,
+ &dev_attr_target4.attr,
+ &dev_attr_target5.attr,
+ &dev_attr_target6.attr,
+ &dev_attr_target7.attr,
+ &dev_attr_target8.attr,
+ &dev_attr_target9.attr,
+ &dev_attr_target10.attr,
+ &dev_attr_target11.attr,
+ &dev_attr_target12.attr,
+ &dev_attr_target13.attr,
+ &dev_attr_target14.attr,
+ &dev_attr_target15.attr,
+ NULL,
+};
+
+static umode_t cxl_region_target_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+
+ if (n < p->interleave_ways)
+ return a->mode;
+ return 0;
+}
+
+static const struct attribute_group cxl_region_target_group = {
+ .attrs = target_attrs,
+ .is_visible = cxl_region_target_visible,
+};
+
+static const struct attribute_group *get_cxl_region_target_group(void)
+{
+ return &cxl_region_target_group;
+}
+
+static const struct attribute_group *region_groups[] = {
+ &cxl_base_attribute_group,
+ &cxl_region_group,
+ &cxl_region_target_group,
+ NULL,
+};
+
+static void cxl_region_release(struct device *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ memregion_free(cxlr->id);
+ kfree(cxlr);
+}
+
+const struct device_type cxl_region_type = {
+ .name = "cxl_region",
+ .release = cxl_region_release,
+ .groups = region_groups
+};
+
+bool is_cxl_region(struct device *dev)
+{
+ return dev->type == &cxl_region_type;
+}
+EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
+
+static struct cxl_region *to_cxl_region(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type,
+ "not a cxl_region device\n"))
+ return NULL;
+
+ return container_of(dev, struct cxl_region, dev);
+}
+
+static void unregister_region(void *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ device_del(dev);
+ cxl_region_iomem_release(cxlr);
+ put_device(dev);
+}
+
+static struct lock_class_key cxl_region_key;
+
+static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id)
+{
+ struct cxl_region *cxlr;
+ struct device *dev;
+
+ cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL);
+ if (!cxlr) {
+ memregion_free(id);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev = &cxlr->dev;
+ device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &cxl_region_key);
+ dev->parent = &cxlrd->cxlsd.cxld.dev;
+ device_set_pm_not_required(dev);
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_region_type;
+ cxlr->id = id;
+
+ return cxlr;
+}
+
+/**
+ * devm_cxl_add_region - Adds a region to a decoder
+ * @cxlrd: root decoder
+ * @id: memregion id to create, or memregion_free() on failure
+ * @mode: mode for the endpoint decoders of this region
+ * @type: select whether this is an expander or accelerator (type-2 or type-3)
+ *
+ * This is the second step of region initialization. Regions exist within an
+ * address space which is mapped by a @cxlrd.
+ *
+ * Return: 0 if the region was added to the @cxlrd, else returns negative error
+ * code. The region will be named "regionZ" where Z is the unique region number.
+ */
+static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
+ int id,
+ enum cxl_decoder_mode mode,
+ enum cxl_decoder_type type)
+{
+ struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
+ struct cxl_region *cxlr;
+ struct device *dev;
+ int rc;
+
+ cxlr = cxl_region_alloc(cxlrd, id);
+ if (IS_ERR(cxlr))
+ return cxlr;
+ cxlr->mode = mode;
+ cxlr->type = type;
+
+ dev = &cxlr->dev;
+ rc = dev_set_name(dev, "region%d", id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(port->uport, unregister_region, cxlr);
+ if (rc)
+ return ERR_PTR(rc);
+
+ dev_dbg(port->uport, "%s: created %s\n",
+ dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
+ return cxlr;
+
+err:
+ put_device(dev);
+ return ERR_PTR(rc);
+}
+
+static ssize_t create_pmem_region_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+ return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
+}
+
+static ssize_t create_pmem_region_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+ struct cxl_region *cxlr;
+ int id, rc;
+
+ rc = sscanf(buf, "region%d\n", &id);
+ if (rc != 1)
+ return -EINVAL;
+
+ rc = memregion_alloc(GFP_KERNEL);
+ if (rc < 0)
+ return rc;
+
+ if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
+ memregion_free(rc);
+ return -EBUSY;
+ }
+
+ cxlr = devm_cxl_add_region(cxlrd, id, CXL_DECODER_PMEM,
+ CXL_DECODER_EXPANDER);
+ if (IS_ERR(cxlr))
+ return PTR_ERR(cxlr);
+
+ return len;
+}
+DEVICE_ATTR_RW(create_pmem_region);
+
+static ssize_t region_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (cxld->region)
+ rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
+ else
+ rc = sysfs_emit(buf, "\n");
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+DEVICE_ATTR_RO(region);
+
+static struct cxl_region *
+cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name)
+{
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct device *region_dev;
+
+ region_dev = device_find_child_by_name(&cxld->dev, name);
+ if (!region_dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_cxl_region(region_dev);
+}
+
+static ssize_t delete_region_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+ struct cxl_port *port = to_cxl_port(dev->parent);
+ struct cxl_region *cxlr;
+
+ cxlr = cxl_find_region_by_name(cxlrd, buf);
+ if (IS_ERR(cxlr))
+ return PTR_ERR(cxlr);
+
+ devm_release_action(port->uport, unregister_region, cxlr);
+ put_device(&cxlr->dev);
+
+ return len;
+}
+DEVICE_ATTR_WO(delete_region);
+
+static void cxl_pmem_region_release(struct device *dev)
+{
+ struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
+ int i;
+
+ for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
+ struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
+
+ put_device(&cxlmd->dev);
+ }
+
+ kfree(cxlr_pmem);
+}
+
+static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+const struct device_type cxl_pmem_region_type = {
+ .name = "cxl_pmem_region",
+ .release = cxl_pmem_region_release,
+ .groups = cxl_pmem_region_attribute_groups,
+};
+
+bool is_cxl_pmem_region(struct device *dev)
+{
+ return dev->type == &cxl_pmem_region_type;
+}
+EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
+
+struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
+ "not a cxl_pmem_region device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_pmem_region, dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
+
+static struct lock_class_key cxl_pmem_region_key;
+
+static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_pmem_region *cxlr_pmem;
+ struct device *dev;
+ int i;
+
+ down_read(&cxl_region_rwsem);
+ if (p->state != CXL_CONFIG_COMMIT) {
+ cxlr_pmem = ERR_PTR(-ENXIO);
+ goto out;
+ }
+
+ cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
+ GFP_KERNEL);
+ if (!cxlr_pmem) {
+ cxlr_pmem = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ cxlr_pmem->hpa_range.start = p->res->start;
+ cxlr_pmem->hpa_range.end = p->res->end;
+
+ /* Snapshot the region configuration underneath the cxl_region_rwsem */
+ cxlr_pmem->nr_mappings = p->nr_targets;
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
+
+ m->cxlmd = cxlmd;
+ get_device(&cxlmd->dev);
+ m->start = cxled->dpa_res->start;
+ m->size = resource_size(cxled->dpa_res);
+ m->position = i;
+ }
+
+ dev = &cxlr_pmem->dev;
+ cxlr_pmem->cxlr = cxlr;
+ device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
+ device_set_pm_not_required(dev);
+ dev->parent = &cxlr->dev;
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_pmem_region_type;
+out:
+ up_read(&cxl_region_rwsem);
+
+ return cxlr_pmem;
+}
+
+static void cxlr_pmem_unregister(void *dev)
+{
+ device_unregister(dev);
+}
+
+/**
+ * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
+ * @cxlr: parent CXL region for this pmem region bridge device
+ *
+ * Return: 0 on success negative error code on failure.
+ */
+static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
+{
+ struct cxl_pmem_region *cxlr_pmem;
+ struct device *dev;
+ int rc;
+
+ cxlr_pmem = cxl_pmem_region_alloc(cxlr);
+ if (IS_ERR(cxlr_pmem))
+ return PTR_ERR(cxlr_pmem);
+
+ dev = &cxlr_pmem->dev;
+ rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
+ dev_name(dev));
+
+ return devm_add_action_or_reset(&cxlr->dev, cxlr_pmem_unregister, dev);
+
+err:
+ put_device(dev);
+ return rc;
+}
+
+static int cxl_region_probe(struct device *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "probe interrupted\n");
+ return rc;
+ }
+
+ if (p->state < CXL_CONFIG_COMMIT) {
+ dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
+ rc = -ENXIO;
+ }
+
+ /*
+ * From this point on any path that changes the region's state away from
+ * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
+ */
+ up_read(&cxl_region_rwsem);
+
+ switch (cxlr->mode) {
+ case CXL_DECODER_PMEM:
+ return devm_cxl_add_pmem_region(cxlr);
+ default:
+ dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
+ cxlr->mode);
+ return -ENXIO;
+ }
+}
+
+static struct cxl_driver cxl_region_driver = {
+ .name = "cxl_region",
+ .probe = cxl_region_probe,
+ .id = CXL_DEVICE_REGION,
+};
+
+int cxl_region_init(void)
+{
+ return cxl_driver_register(&cxl_region_driver);
+}
+
+void cxl_region_exit(void)
+{
+ cxl_driver_unregister(&cxl_region_driver);
+}
+
+MODULE_IMPORT_NS(CXL);
+MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 6799b27c7db2..f680450f0b16 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -7,6 +7,7 @@
#include <linux/libnvdimm.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/log2.h>
#include <linux/io.h>
/**
@@ -53,9 +54,12 @@
#define CXL_HDM_DECODER0_CTRL_LOCK BIT(8)
#define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9)
#define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10)
+#define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11)
#define CXL_HDM_DECODER0_CTRL_TYPE BIT(12)
#define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24)
#define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28)
+#define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i)
+#define CXL_HDM_DECODER0_SKIP_HIGH(i) CXL_HDM_DECODER0_TL_HIGH(i)
static inline int cxl_hdm_decoder_count(u32 cap_hdr)
{
@@ -64,6 +68,57 @@ static inline int cxl_hdm_decoder_count(u32 cap_hdr)
return val ? val * 2 : 1;
}
+/* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
+static inline int cxl_to_granularity(u16 ig, unsigned int *val)
+{
+ if (ig > 6)
+ return -EINVAL;
+ *val = 256 << ig;
+ return 0;
+}
+
+/* Encode defined in CXL ECN "3, 6, 12 and 16-way memory Interleaving" */
+static inline int cxl_to_ways(u8 eniw, unsigned int *val)
+{
+ switch (eniw) {
+ case 0 ... 4:
+ *val = 1 << eniw;
+ break;
+ case 8 ... 10:
+ *val = 3 << (eniw - 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int granularity_to_cxl(int g, u16 *ig)
+{
+ if (g > SZ_16K || g < 256 || !is_power_of_2(g))
+ return -EINVAL;
+ *ig = ilog2(g) - 8;
+ return 0;
+}
+
+static inline int ways_to_cxl(unsigned int ways, u8 *iw)
+{
+ if (ways > 16)
+ return -EINVAL;
+ if (is_power_of_2(ways)) {
+ *iw = ilog2(ways);
+ return 0;
+ }
+ if (ways % 3)
+ return -EINVAL;
+ ways /= 3;
+ if (!is_power_of_2(ways))
+ return -EINVAL;
+ *iw = ilog2(ways) + 8;
+ return 0;
+}
+
/* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
#define CXLDEV_CAP_ARRAY_OFFSET 0x0
#define CXLDEV_CAP_ARRAY_CAP_ID 0
@@ -193,31 +248,77 @@ enum cxl_decoder_type {
*/
#define CXL_DECODER_MAX_INTERLEAVE 16
+#define CXL_DECODER_MIN_GRANULARITY 256
+
/**
- * struct cxl_decoder - CXL address range decode configuration
+ * struct cxl_decoder - Common CXL HDM Decoder Attributes
* @dev: this decoder's device
* @id: kernel device name id
- * @platform_res: address space resources considered by root decoder
- * @decoder_range: address space resources considered by midlevel decoder
+ * @hpa_range: Host physical address range mapped by this decoder
* @interleave_ways: number of cxl_dports in this decode
* @interleave_granularity: data stride per dport
* @target_type: accelerator vs expander (type2 vs type3) selector
+ * @region: currently assigned region for this decoder
* @flags: memory type capabilities and locking
- * @target_lock: coordinate coherent reads of the target list
- * @nr_targets: number of elements in @target
- * @target: active ordered target list in current decoder configuration
- */
+ * @commit: device/decoder-type specific callback to commit settings to hw
+ * @reset: device/decoder-type specific callback to reset hw settings
+*/
struct cxl_decoder {
struct device dev;
int id;
- union {
- struct resource platform_res;
- struct range decoder_range;
- };
+ struct range hpa_range;
int interleave_ways;
int interleave_granularity;
enum cxl_decoder_type target_type;
+ struct cxl_region *region;
unsigned long flags;
+ int (*commit)(struct cxl_decoder *cxld);
+ int (*reset)(struct cxl_decoder *cxld);
+};
+
+/*
+ * CXL_DECODER_DEAD prevents endpoints from being reattached to regions
+ * while cxld_unregister() is running
+ */
+enum cxl_decoder_mode {
+ CXL_DECODER_NONE,
+ CXL_DECODER_RAM,
+ CXL_DECODER_PMEM,
+ CXL_DECODER_MIXED,
+ CXL_DECODER_DEAD,
+};
+
+/**
+ * struct cxl_endpoint_decoder - Endpoint / SPA to DPA decoder
+ * @cxld: base cxl_decoder_object
+ * @dpa_res: actively claimed DPA span of this decoder
+ * @skip: offset into @dpa_res where @cxld.hpa_range maps
+ * @mode: which memory type / access-mode-partition this decoder targets
+ * @pos: interleave position in @cxld.region
+ */
+struct cxl_endpoint_decoder {
+ struct cxl_decoder cxld;
+ struct resource *dpa_res;
+ resource_size_t skip;
+ enum cxl_decoder_mode mode;
+ int pos;
+};
+
+/**
+ * struct cxl_switch_decoder - Switch specific CXL HDM Decoder
+ * @cxld: base cxl_decoder object
+ * @target_lock: coordinate coherent reads of the target list
+ * @nr_targets: number of elements in @target
+ * @target: active ordered target list in current decoder configuration
+ *
+ * The 'switch' decoder type represents the decoder instances of cxl_port's that
+ * route from the root of a CXL memory decode topology to the endpoints. They
+ * come in two flavors, root-level decoders, statically defined by platform
+ * firmware, and mid-level decoders, where interleave-granularity,
+ * interleave-width, and the target list are mutable.
+ */
+struct cxl_switch_decoder {
+ struct cxl_decoder cxld;
seqlock_t target_lock;
int nr_targets;
struct cxl_dport *target[];
@@ -225,6 +326,76 @@ struct cxl_decoder {
/**
+ * struct cxl_root_decoder - Static platform CXL address decoder
+ * @res: host / parent resource for region allocations
+ * @region_id: region id for next region provisioning event
+ * @calc_hb: which host bridge covers the n'th position by granularity
+ * @cxlsd: base cxl switch decoder
+ */
+struct cxl_root_decoder {
+ struct resource *res;
+ atomic_t region_id;
+ struct cxl_dport *(*calc_hb)(struct cxl_root_decoder *cxlrd, int pos);
+ struct cxl_switch_decoder cxlsd;
+};
+
+/*
+ * enum cxl_config_state - State machine for region configuration
+ * @CXL_CONFIG_IDLE: Any sysfs attribute can be written freely
+ * @CXL_CONFIG_INTERLEAVE_ACTIVE: region size has been set, no more
+ * changes to interleave_ways or interleave_granularity
+ * @CXL_CONFIG_ACTIVE: All targets have been added the region is now
+ * active
+ * @CXL_CONFIG_RESET_PENDING: see commit_store()
+ * @CXL_CONFIG_COMMIT: Soft-config has been committed to hardware
+ */
+enum cxl_config_state {
+ CXL_CONFIG_IDLE,
+ CXL_CONFIG_INTERLEAVE_ACTIVE,
+ CXL_CONFIG_ACTIVE,
+ CXL_CONFIG_RESET_PENDING,
+ CXL_CONFIG_COMMIT,
+};
+
+/**
+ * struct cxl_region_params - region settings
+ * @state: allow the driver to lockdown further parameter changes
+ * @uuid: unique id for persistent regions
+ * @interleave_ways: number of endpoints in the region
+ * @interleave_granularity: capacity each endpoint contributes to a stripe
+ * @res: allocated iomem capacity for this region
+ * @targets: active ordered targets in current decoder configuration
+ * @nr_targets: number of targets
+ *
+ * State transitions are protected by the cxl_region_rwsem
+ */
+struct cxl_region_params {
+ enum cxl_config_state state;
+ uuid_t uuid;
+ int interleave_ways;
+ int interleave_granularity;
+ struct resource *res;
+ struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE];
+ int nr_targets;
+};
+
+/**
+ * struct cxl_region - CXL region
+ * @dev: This region's device
+ * @id: This region's id. Id is globally unique across all regions
+ * @mode: Endpoint decoder allocation / access mode
+ * @type: Endpoint decoder target type
+ * @params: active + config params for the region
+ */
+struct cxl_region {
+ struct device dev;
+ int id;
+ enum cxl_decoder_mode mode;
+ enum cxl_decoder_type type;
+ struct cxl_region_params params;
+};
+
+/**
* enum cxl_nvdimm_brige_state - state machine for managing bus rescans
* @CXL_NVB_NEW: Set at bridge create and after cxl_pmem_wq is destroyed
* @CXL_NVB_DEAD: Set at brige unregistration to preclude async probing
@@ -251,7 +422,26 @@ struct cxl_nvdimm_bridge {
struct cxl_nvdimm {
struct device dev;
struct cxl_memdev *cxlmd;
- struct nvdimm *nvdimm;
+ struct cxl_nvdimm_bridge *bridge;
+ struct cxl_pmem_region *region;
+};
+
+struct cxl_pmem_region_mapping {
+ struct cxl_memdev *cxlmd;
+ struct cxl_nvdimm *cxl_nvd;
+ u64 start;
+ u64 size;
+ int position;
+};
+
+struct cxl_pmem_region {
+ struct device dev;
+ struct cxl_region *cxlr;
+ struct nd_region *nd_region;
+ struct cxl_nvdimm_bridge *bridge;
+ struct range hpa_range;
+ int nr_mappings;
+ struct cxl_pmem_region_mapping mapping[];
};
/**
@@ -260,50 +450,94 @@ struct cxl_nvdimm {
* decode hierarchy.
* @dev: this port's device
* @uport: PCI or platform device implementing the upstream port capability
+ * @host_bridge: Shortcut to the platform attach point for this port
* @id: id for port device-name
* @dports: cxl_dport instances referenced by decoders
* @endpoints: cxl_ep instances, endpoints that are a descendant of this port
+ * @regions: cxl_region_ref instances, regions mapped by this port
+ * @parent_dport: dport that points to this port in the parent
* @decoder_ida: allocator for decoder ids
+ * @hdm_end: track last allocated HDM decoder instance for allocation ordering
+ * @commit_end: cursor to track highest committed decoder for commit ordering
* @component_reg_phys: component register capability base address (optional)
* @dead: last ep has been removed, force port re-creation
* @depth: How deep this port is relative to the root. depth 0 is the root.
+ * @cdat: Cached CDAT data
+ * @cdat_available: Should a CDAT attribute be available in sysfs
*/
struct cxl_port {
struct device dev;
struct device *uport;
+ struct device *host_bridge;
int id;
- struct list_head dports;
- struct list_head endpoints;
+ struct xarray dports;
+ struct xarray endpoints;
+ struct xarray regions;
+ struct cxl_dport *parent_dport;
struct ida decoder_ida;
+ int hdm_end;
+ int commit_end;
resource_size_t component_reg_phys;
bool dead;
unsigned int depth;
+ struct cxl_cdat {
+ void *table;
+ size_t length;
+ } cdat;
+ bool cdat_available;
};
+static inline struct cxl_dport *
+cxl_find_dport_by_dev(struct cxl_port *port, const struct device *dport_dev)
+{
+ return xa_load(&port->dports, (unsigned long)dport_dev);
+}
+
/**
* struct cxl_dport - CXL downstream port
* @dport: PCI bridge or firmware device representing the downstream link
* @port_id: unique hardware identifier for dport in decoder target list
* @component_reg_phys: downstream port component registers
* @port: reference to cxl_port that contains this downstream port
- * @list: node for a cxl_port's list of cxl_dport instances
*/
struct cxl_dport {
struct device *dport;
int port_id;
resource_size_t component_reg_phys;
struct cxl_port *port;
- struct list_head list;
};
/**
* struct cxl_ep - track an endpoint's interest in a port
* @ep: device that hosts a generic CXL endpoint (expander or accelerator)
- * @list: node on port->endpoints list
+ * @dport: which dport routes to this endpoint on @port
+ * @next: cxl switch port across the link attached to @dport NULL if
+ * attached to an endpoint
*/
struct cxl_ep {
struct device *ep;
- struct list_head list;
+ struct cxl_dport *dport;
+ struct cxl_port *next;
+};
+
+/**
+ * struct cxl_region_ref - track a region's interest in a port
+ * @port: point in topology to install this reference
+ * @decoder: decoder assigned for @region in @port
+ * @region: region for this reference
+ * @endpoints: cxl_ep references for region members beneath @port
+ * @nr_targets_set: track how many targets have been programmed during setup
+ * @nr_eps: number of endpoints beneath @port
+ * @nr_targets: number of distinct targets needed to reach @nr_eps
+ */
+struct cxl_region_ref {
+ struct cxl_port *port;
+ struct cxl_decoder *decoder;
+ struct cxl_region *region;
+ struct xarray endpoints;
+ int nr_targets_set;
+ int nr_eps;
+ int nr_targets;
};
/*
@@ -325,29 +559,31 @@ int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys,
- struct cxl_port *parent_port);
+ struct cxl_dport *parent_dport);
+int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
+ struct cxl_dport *parent_dport);
struct cxl_port *find_cxl_root(struct device *dev);
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
int cxl_bus_rescan(void);
-struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd);
+struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
+ struct cxl_dport **dport);
bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd);
struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
struct device *dport, int port_id,
resource_size_t component_reg_phys);
-struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
- const struct device *dev);
struct cxl_decoder *to_cxl_decoder(struct device *dev);
+struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
+struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev);
bool is_root_decoder(struct device *dev);
bool is_endpoint_decoder(struct device *dev);
-bool is_cxl_decoder(struct device *dev);
-struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets);
-struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets);
+struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets);
+struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets);
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
-struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
+struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint);
@@ -357,6 +593,8 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port);
int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm);
int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
+bool is_cxl_region(struct device *dev);
+
extern struct bus_type cxl_bus_type;
struct cxl_driver {
@@ -385,6 +623,8 @@ void cxl_driver_unregister(struct cxl_driver *cxl_drv);
#define CXL_DEVICE_PORT 3
#define CXL_DEVICE_ROOT 4
#define CXL_DEVICE_MEMORY_EXPANDER 5
+#define CXL_DEVICE_REGION 6
+#define CXL_DEVICE_PMEM_REGION 7
#define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*")
#define CXL_MODALIAS_FMT "cxl:t%d"
@@ -396,7 +636,21 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd);
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *dev);
+
+#ifdef CONFIG_CXL_REGION
+bool is_cxl_pmem_region(struct device *dev);
+struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
+#else
+static inline bool is_cxl_pmem_region(struct device *dev)
+{
+ return false;
+}
+static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
+{
+ return NULL;
+}
+#endif
/*
* Unit test builds overrides this to __weak, find the 'strong' version
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 7df0b053373a..88e3a8e54b6a 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -50,6 +50,24 @@ static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
return container_of(dev, struct cxl_memdev, dev);
}
+static inline struct cxl_port *cxled_to_port(struct cxl_endpoint_decoder *cxled)
+{
+ return to_cxl_port(cxled->cxld.dev.parent);
+}
+
+static inline struct cxl_port *cxlrd_to_port(struct cxl_root_decoder *cxlrd)
+{
+ return to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
+}
+
+static inline struct cxl_memdev *
+cxled_to_memdev(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *port = to_cxl_port(cxled->cxld.dev.parent);
+
+ return to_cxl_memdev(port->uport);
+}
+
bool is_cxl_memdev(struct device *dev);
static inline bool is_cxl_endpoint(struct cxl_port *port)
{
@@ -178,8 +196,9 @@ struct cxl_endpoint_dvsec_info {
* @firmware_version: Firmware version for the memory device.
* @enabled_cmds: Hardware commands found enabled in CEL.
* @exclusive_cmds: Commands that are kernel-internal only
- * @pmem_range: Active Persistent memory capacity configuration
- * @ram_range: Active Volatile memory capacity configuration
+ * @dpa_res: Overall DPA resource tree for the device
+ * @pmem_res: Active Persistent memory capacity configuration
+ * @ram_res: Active Volatile memory capacity configuration
* @total_bytes: sum of all possible capacities
* @volatile_only_bytes: hard volatile capacity
* @persistent_only_bytes: hard persistent capacity
@@ -191,6 +210,7 @@ struct cxl_endpoint_dvsec_info {
* @component_reg_phys: register base of component registers
* @info: Cached DVSEC information about the device.
* @serial: PCIe Device Serial Number
+ * @doe_mbs: PCI DOE mailbox array
* @mbox_send: @dev specific transport for transmitting mailbox commands
*
* See section 8.2.9.5.2 Capacity Configuration and Label Storage for
@@ -209,8 +229,9 @@ struct cxl_dev_state {
DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
- struct range pmem_range;
- struct range ram_range;
+ struct resource dpa_res;
+ struct resource pmem_res;
+ struct resource ram_res;
u64 total_bytes;
u64 volatile_only_bytes;
u64 persistent_only_bytes;
@@ -224,6 +245,8 @@ struct cxl_dev_state {
resource_size_t component_reg_phys;
u64 serial;
+ struct xarray doe_mbs;
+
int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
};
@@ -299,6 +322,13 @@ struct cxl_mbox_identify {
u8 qos_telemetry_caps;
} __packed;
+struct cxl_mbox_get_partition_info {
+ __le64 active_volatile_cap;
+ __le64 active_persistent_cap;
+ __le64 next_volatile_cap;
+ __le64 next_persistent_cap;
+} __packed;
+
struct cxl_mbox_get_lsa {
__le32 offset;
__le32 length;
@@ -370,4 +400,8 @@ struct cxl_hdm {
unsigned int interleave_mask;
struct cxl_port *port;
};
+
+struct seq_file;
+struct dentry *cxl_debugfs_create_dir(const char *dir);
+void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds);
#endif /* __CXL_MEM_H__ */
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index fce1c11729c2..eec597dbe763 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -74,4 +74,5 @@ static inline resource_size_t cxl_regmap_to_base(struct pci_dev *pdev,
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm);
+void read_cdat_data(struct cxl_port *port);
#endif /* __CXL_PCI_H__ */
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index a979d0b484d5..64ccf053d32c 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -24,42 +25,32 @@
* in higher level operations.
*/
-static int create_endpoint(struct cxl_memdev *cxlmd,
- struct cxl_port *parent_port)
+static void enable_suspend(void *data)
{
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_port *endpoint;
- int rc;
+ cxl_mem_active_dec();
+}
- endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
- cxlds->component_reg_phys, parent_port);
- if (IS_ERR(endpoint))
- return PTR_ERR(endpoint);
+static void remove_debugfs(void *dentry)
+{
+ debugfs_remove_recursive(dentry);
+}
- dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev));
+static int cxl_mem_dpa_show(struct seq_file *file, void *data)
+{
+ struct device *dev = file->private;
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- rc = cxl_endpoint_autoremove(cxlmd, endpoint);
- if (rc)
- return rc;
-
- if (!endpoint->dev.driver) {
- dev_err(&cxlmd->dev, "%s failed probe\n",
- dev_name(&endpoint->dev));
- return -ENXIO;
- }
+ cxl_dpa_debug(file, cxlmd->cxlds);
return 0;
}
-static void enable_suspend(void *data)
-{
- cxl_mem_active_dec();
-}
-
static int cxl_mem_probe(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_port *parent_port;
+ struct cxl_dport *dport;
+ struct dentry *dentry;
int rc;
/*
@@ -73,11 +64,17 @@ static int cxl_mem_probe(struct device *dev)
if (work_pending(&cxlmd->detach_work))
return -EBUSY;
+ dentry = cxl_debugfs_create_dir(dev_name(dev));
+ debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show);
+ rc = devm_add_action_or_reset(dev, remove_debugfs, dentry);
+ if (rc)
+ return rc;
+
rc = devm_cxl_enumerate_ports(cxlmd);
if (rc)
return rc;
- parent_port = cxl_mem_find_port(cxlmd);
+ parent_port = cxl_mem_find_port(cxlmd, &dport);
if (!parent_port) {
dev_err(dev, "CXL port topology not found\n");
return -ENXIO;
@@ -91,7 +88,7 @@ static int cxl_mem_probe(struct device *dev)
goto unlock;
}
- rc = create_endpoint(cxlmd, parent_port);
+ rc = devm_cxl_add_endpoint(cxlmd, dport);
unlock:
device_unlock(&parent_port->dev);
put_device(&parent_port->dev);
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 5a0ae46d4989..faeb5d9d7a7a 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/pci.h>
+#include <linux/pci-doe.h>
#include <linux/io.h>
#include "cxlmem.h"
#include "cxlpci.h"
@@ -386,6 +387,47 @@ static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
return rc;
}
+static void cxl_pci_destroy_doe(void *mbs)
+{
+ xa_destroy(mbs);
+}
+
+static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds)
+{
+ struct device *dev = cxlds->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 off = 0;
+
+ xa_init(&cxlds->doe_mbs);
+ if (devm_add_action(&pdev->dev, cxl_pci_destroy_doe, &cxlds->doe_mbs)) {
+ dev_err(dev, "Failed to create XArray for DOE's\n");
+ return;
+ }
+
+ /*
+ * Mailbox creation is best effort. Higher layers must determine if
+ * the lack of a mailbox for their protocol is a device failure or not.
+ */
+ pci_doe_for_each_off(pdev, off) {
+ struct pci_doe_mb *doe_mb;
+
+ doe_mb = pcim_doe_create_mb(pdev, off);
+ if (IS_ERR(doe_mb)) {
+ dev_err(dev, "Failed to create MB object for MB @ %x\n",
+ off);
+ continue;
+ }
+
+ if (xa_insert(&cxlds->doe_mbs, off, doe_mb, GFP_KERNEL)) {
+ dev_err(dev, "xa_insert failed to insert MB @ %x\n",
+ off);
+ continue;
+ }
+
+ dev_dbg(dev, "Created DOE mailbox @%x\n", off);
+ }
+}
+
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct cxl_register_map map;
@@ -434,6 +476,8 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
cxlds->component_reg_phys = cxl_regmap_to_base(pdev, &map);
+ devm_cxl_pci_create_doe(cxlds);
+
rc = cxl_pci_setup_mailbox(cxlds);
if (rc)
return rc;
@@ -454,7 +498,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
- if (range_len(&cxlds->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
+ if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM))
rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd);
return rc;
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 0aaa70b4e0f7..7dc0a2fa1a6b 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -7,6 +7,7 @@
#include <linux/ndctl.h>
#include <linux/async.h>
#include <linux/slab.h>
+#include <linux/nd.h>
#include "cxlmem.h"
#include "cxl.h"
@@ -26,7 +27,23 @@ static void clear_exclusive(void *cxlds)
static void unregister_nvdimm(void *nvdimm)
{
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+ struct cxl_nvdimm_bridge *cxl_nvb = cxl_nvd->bridge;
+ struct cxl_pmem_region *cxlr_pmem;
+
+ device_lock(&cxl_nvb->dev);
+ cxlr_pmem = cxl_nvd->region;
+ dev_set_drvdata(&cxl_nvd->dev, NULL);
+ cxl_nvd->region = NULL;
+ device_unlock(&cxl_nvb->dev);
+
+ if (cxlr_pmem) {
+ device_release_driver(&cxlr_pmem->dev);
+ put_device(&cxlr_pmem->dev);
+ }
+
nvdimm_delete(nvdimm);
+ cxl_nvd->bridge = NULL;
}
static int cxl_nvdimm_probe(struct device *dev)
@@ -39,7 +56,7 @@ static int cxl_nvdimm_probe(struct device *dev)
struct nvdimm *nvdimm;
int rc;
- cxl_nvb = cxl_find_nvdimm_bridge(cxl_nvd);
+ cxl_nvb = cxl_find_nvdimm_bridge(dev);
if (!cxl_nvb)
return -ENXIO;
@@ -66,6 +83,7 @@ static int cxl_nvdimm_probe(struct device *dev)
}
dev_set_drvdata(dev, nvdimm);
+ cxl_nvd->bridge = cxl_nvb;
rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
out:
device_unlock(&cxl_nvb->dev);
@@ -204,15 +222,38 @@ static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)
return cxl_nvb->nvdimm_bus != NULL;
}
-static int cxl_nvdimm_release_driver(struct device *dev, void *data)
+static int cxl_nvdimm_release_driver(struct device *dev, void *cxl_nvb)
{
+ struct cxl_nvdimm *cxl_nvd;
+
if (!is_cxl_nvdimm(dev))
return 0;
+
+ cxl_nvd = to_cxl_nvdimm(dev);
+ if (cxl_nvd->bridge != cxl_nvb)
+ return 0;
+
device_release_driver(dev);
return 0;
}
-static void offline_nvdimm_bus(struct nvdimm_bus *nvdimm_bus)
+static int cxl_pmem_region_release_driver(struct device *dev, void *cxl_nvb)
+{
+ struct cxl_pmem_region *cxlr_pmem;
+
+ if (!is_cxl_pmem_region(dev))
+ return 0;
+
+ cxlr_pmem = to_cxl_pmem_region(dev);
+ if (cxlr_pmem->bridge != cxl_nvb)
+ return 0;
+
+ device_release_driver(dev);
+ return 0;
+}
+
+static void offline_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb,
+ struct nvdimm_bus *nvdimm_bus)
{
if (!nvdimm_bus)
return;
@@ -222,7 +263,10 @@ static void offline_nvdimm_bus(struct nvdimm_bus *nvdimm_bus)
* nvdimm_bus_unregister() rips the nvdimm objects out from
* underneath them.
*/
- bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_release_driver);
+ bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
+ cxl_pmem_region_release_driver);
+ bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
+ cxl_nvdimm_release_driver);
nvdimm_bus_unregister(nvdimm_bus);
}
@@ -260,7 +304,7 @@ static void cxl_nvb_update_state(struct work_struct *work)
dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc);
}
- offline_nvdimm_bus(victim_bus);
+ offline_nvdimm_bus(cxl_nvb, victim_bus);
put_device(&cxl_nvb->dev);
}
@@ -315,6 +359,203 @@ static struct cxl_driver cxl_nvdimm_bridge_driver = {
.id = CXL_DEVICE_NVDIMM_BRIDGE,
};
+static int match_cxl_nvdimm(struct device *dev, void *data)
+{
+ return is_cxl_nvdimm(dev);
+}
+
+static void unregister_nvdimm_region(void *nd_region)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct cxl_pmem_region *cxlr_pmem;
+ int i;
+
+ cxlr_pmem = nd_region_provider_data(nd_region);
+ cxl_nvb = cxlr_pmem->bridge;
+ device_lock(&cxl_nvb->dev);
+ for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
+ struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
+ struct cxl_nvdimm *cxl_nvd = m->cxl_nvd;
+
+ if (cxl_nvd->region) {
+ put_device(&cxlr_pmem->dev);
+ cxl_nvd->region = NULL;
+ }
+ }
+ device_unlock(&cxl_nvb->dev);
+
+ nvdimm_region_delete(nd_region);
+}
+
+static void cxlr_pmem_remove_resource(void *res)
+{
+ remove_resource(res);
+}
+
+struct cxl_pmem_region_info {
+ u64 offset;
+ u64 serial;
+};
+
+static int cxl_pmem_region_probe(struct device *dev)
+{
+ struct nd_mapping_desc mappings[CXL_DECODER_MAX_INTERLEAVE];
+ struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
+ struct cxl_region *cxlr = cxlr_pmem->cxlr;
+ struct cxl_pmem_region_info *info = NULL;
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct nd_interleave_set *nd_set;
+ struct nd_region_desc ndr_desc;
+ struct cxl_nvdimm *cxl_nvd;
+ struct nvdimm *nvdimm;
+ struct resource *res;
+ int rc, i = 0;
+
+ cxl_nvb = cxl_find_nvdimm_bridge(&cxlr_pmem->mapping[0].cxlmd->dev);
+ if (!cxl_nvb) {
+ dev_dbg(dev, "bridge not found\n");
+ return -ENXIO;
+ }
+ cxlr_pmem->bridge = cxl_nvb;
+
+ device_lock(&cxl_nvb->dev);
+ if (!cxl_nvb->nvdimm_bus) {
+ dev_dbg(dev, "nvdimm bus not found\n");
+ rc = -ENXIO;
+ goto err;
+ }
+
+ memset(&mappings, 0, sizeof(mappings));
+ memset(&ndr_desc, 0, sizeof(ndr_desc));
+
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ res->name = "Persistent Memory";
+ res->start = cxlr_pmem->hpa_range.start;
+ res->end = cxlr_pmem->hpa_range.end;
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_PERSISTENT_MEMORY;
+
+ rc = insert_resource(&iomem_resource, res);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res);
+ if (rc)
+ goto err;
+
+ ndr_desc.res = res;
+ ndr_desc.provider_data = cxlr_pmem;
+
+ ndr_desc.numa_node = memory_add_physaddr_to_nid(res->start);
+ ndr_desc.target_node = phys_to_target_node(res->start);
+ if (ndr_desc.target_node == NUMA_NO_NODE) {
+ ndr_desc.target_node = ndr_desc.numa_node;
+ dev_dbg(&cxlr->dev, "changing target node from %d to %d",
+ NUMA_NO_NODE, ndr_desc.target_node);
+ }
+
+ nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
+ if (!nd_set) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ ndr_desc.memregion = cxlr->id;
+ set_bit(ND_REGION_CXL, &ndr_desc.flags);
+ set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
+
+ info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
+ struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
+ struct cxl_memdev *cxlmd = m->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *d;
+
+ d = device_find_child(&cxlmd->dev, NULL, match_cxl_nvdimm);
+ if (!d) {
+ dev_dbg(dev, "[%d]: %s: no cxl_nvdimm found\n", i,
+ dev_name(&cxlmd->dev));
+ rc = -ENODEV;
+ goto err;
+ }
+
+ /* safe to drop ref now with bridge lock held */
+ put_device(d);
+
+ cxl_nvd = to_cxl_nvdimm(d);
+ nvdimm = dev_get_drvdata(&cxl_nvd->dev);
+ if (!nvdimm) {
+ dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i,
+ dev_name(&cxlmd->dev));
+ rc = -ENODEV;
+ goto err;
+ }
+ cxl_nvd->region = cxlr_pmem;
+ get_device(&cxlr_pmem->dev);
+ m->cxl_nvd = cxl_nvd;
+ mappings[i] = (struct nd_mapping_desc) {
+ .nvdimm = nvdimm,
+ .start = m->start,
+ .size = m->size,
+ .position = i,
+ };
+ info[i].offset = m->start;
+ info[i].serial = cxlds->serial;
+ }
+ ndr_desc.num_mappings = cxlr_pmem->nr_mappings;
+ ndr_desc.mapping = mappings;
+
+ /*
+ * TODO enable CXL labels which skip the need for 'interleave-set cookie'
+ */
+ nd_set->cookie1 =
+ nd_fletcher64(info, sizeof(*info) * cxlr_pmem->nr_mappings, 0);
+ nd_set->cookie2 = nd_set->cookie1;
+ ndr_desc.nd_set = nd_set;
+
+ cxlr_pmem->nd_region =
+ nvdimm_pmem_region_create(cxl_nvb->nvdimm_bus, &ndr_desc);
+ if (!cxlr_pmem->nd_region) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = devm_add_action_or_reset(dev, unregister_nvdimm_region,
+ cxlr_pmem->nd_region);
+out:
+ kfree(info);
+ device_unlock(&cxl_nvb->dev);
+ put_device(&cxl_nvb->dev);
+
+ return rc;
+
+err:
+ dev_dbg(dev, "failed to create nvdimm region\n");
+ for (i--; i >= 0; i--) {
+ nvdimm = mappings[i].nvdimm;
+ cxl_nvd = nvdimm_provider_data(nvdimm);
+ put_device(&cxl_nvd->region->dev);
+ cxl_nvd->region = NULL;
+ }
+ goto out;
+}
+
+static struct cxl_driver cxl_pmem_region_driver = {
+ .name = "cxl_pmem_region",
+ .probe = cxl_pmem_region_probe,
+ .id = CXL_DEVICE_PMEM_REGION,
+};
+
/*
* Return all bridges to the CXL_NVB_NEW state to invalidate any
* ->state_work referring to the now destroyed cxl_pmem_wq.
@@ -359,8 +600,14 @@ static __init int cxl_pmem_init(void)
if (rc)
goto err_nvdimm;
+ rc = cxl_driver_register(&cxl_pmem_region_driver);
+ if (rc)
+ goto err_region;
+
return 0;
+err_region:
+ cxl_driver_unregister(&cxl_nvdimm_driver);
err_nvdimm:
cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
err_bridge:
@@ -370,6 +617,7 @@ err_bridge:
static __exit void cxl_pmem_exit(void)
{
+ cxl_driver_unregister(&cxl_pmem_region_driver);
cxl_driver_unregister(&cxl_nvdimm_driver);
cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
destroy_cxl_pmem_wq();
@@ -381,3 +629,4 @@ module_exit(cxl_pmem_exit);
MODULE_IMPORT_NS(CXL);
MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
+MODULE_ALIAS_CXL(CXL_DEVICE_PMEM_REGION);
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 3cf308f114c4..5453771bf330 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -53,6 +53,9 @@ static int cxl_port_probe(struct device *dev)
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ /* Cache the data early to ensure is_visible() works */
+ read_cdat_data(port);
+
get_device(&cxlmd->dev);
rc = devm_add_action_or_reset(dev, schedule_detach, cxlmd);
if (rc)
@@ -78,10 +81,60 @@ static int cxl_port_probe(struct device *dev)
return 0;
}
+static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_port *port = to_cxl_port(dev);
+
+ if (!port->cdat_available)
+ return -ENXIO;
+
+ if (!port->cdat.table)
+ return 0;
+
+ return memory_read_from_buffer(buf, count, &offset,
+ port->cdat.table,
+ port->cdat.length);
+}
+
+static BIN_ATTR_ADMIN_RO(CDAT, 0);
+
+static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_port *port = to_cxl_port(dev);
+
+ if ((attr == &bin_attr_CDAT) && port->cdat_available)
+ return attr->attr.mode;
+
+ return 0;
+}
+
+static struct bin_attribute *cxl_cdat_bin_attributes[] = {
+ &bin_attr_CDAT,
+ NULL,
+};
+
+static struct attribute_group cxl_cdat_attribute_group = {
+ .bin_attrs = cxl_cdat_bin_attributes,
+ .is_bin_visible = cxl_port_bin_attr_is_visible,
+};
+
+static const struct attribute_group *cxl_port_attribute_groups[] = {
+ &cxl_cdat_attribute_group,
+ NULL,
+};
+
static struct cxl_driver cxl_port_driver = {
.name = "cxl_port",
.probe = cxl_port_probe,
.id = CXL_DEVICE_PORT,
+ .drv = {
+ .dev_groups = cxl_port_attribute_groups,
+ },
};
module_cxl_driver(cxl_port_driver);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 50a08b2ec247..9b5e2a5eb0ae 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -22,6 +22,8 @@
* @private: dax driver private data
* @flags: state and boolean properties
* @ops: operations for this device
+ * @holder_data: holder of a dax_device: could be filesystem or mapped device
+ * @holder_ops: operations for the inner holder
*/
struct dax_device {
struct inode inode;
@@ -29,6 +31,8 @@ struct dax_device {
void *private;
unsigned long flags;
const struct dax_operations *ops;
+ void *holder_data;
+ const struct dax_holder_operations *holder_ops;
};
static dev_t dax_devt;
@@ -71,8 +75,11 @@ EXPORT_SYMBOL_GPL(dax_remove_host);
* fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax
* @bdev: block device to find a dax_device for
* @start_off: returns the byte offset into the dax_device that @bdev starts
+ * @holder: filesystem or mapped device inside the dax_device
+ * @ops: operations for the inner holder
*/
-struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off)
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off,
+ void *holder, const struct dax_holder_operations *ops)
{
struct dax_device *dax_dev;
u64 part_size;
@@ -92,11 +99,26 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off)
dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk);
if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode))
dax_dev = NULL;
+ else if (holder) {
+ if (!cmpxchg(&dax_dev->holder_data, NULL, holder))
+ dax_dev->holder_ops = ops;
+ else
+ dax_dev = NULL;
+ }
dax_read_unlock(id);
return dax_dev;
}
EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
+
+void fs_put_dax(struct dax_device *dax_dev, void *holder)
+{
+ if (dax_dev && holder &&
+ cmpxchg(&dax_dev->holder_data, holder, NULL) == holder)
+ dax_dev->holder_ops = NULL;
+ put_dax(dax_dev);
+}
+EXPORT_SYMBOL_GPL(fs_put_dax);
#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
enum dax_device_flags {
@@ -204,6 +226,29 @@ size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
}
EXPORT_SYMBOL_GPL(dax_recovery_write);
+int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off,
+ u64 len, int mf_flags)
+{
+ int rc, id;
+
+ id = dax_read_lock();
+ if (!dax_alive(dax_dev)) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (!dax_dev->holder_ops) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ rc = dax_dev->holder_ops->notify_failure(dax_dev, off, len, mf_flags);
+out:
+ dax_read_unlock(id);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dax_holder_notify_failure);
+
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
@@ -277,8 +322,15 @@ void kill_dax(struct dax_device *dax_dev)
if (!dax_dev)
return;
+ if (dax_dev->holder_data != NULL)
+ dax_holder_notify_failure(dax_dev, 0, U64_MAX, 0);
+
clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
synchronize_srcu(&dax_srcu);
+
+ /* clear holder data */
+ dax_dev->holder_ops = NULL;
+ dax_dev->holder_data = NULL;
}
EXPORT_SYMBOL_GPL(kill_dax);
@@ -421,6 +473,19 @@ void put_dax(struct dax_device *dax_dev)
EXPORT_SYMBOL_GPL(put_dax);
/**
+ * dax_holder() - obtain the holder of a dax device
+ * @dax_dev: a dax_device instance
+
+ * Return: the holder's data which represents the holder if registered,
+ * otherwize NULL.
+ */
+void *dax_holder(struct dax_device *dax_dev)
+{
+ return dax_dev->holder_data;
+}
+EXPORT_SYMBOL_GPL(dax_holder);
+
+/**
* inode_dax: convert a public inode into its dax_dev
* @inode: An inode with i_cdev pointing to a dax_dev
*
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index f7dcc44f9414..027e8f336acc 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -33,7 +33,7 @@ struct exynos_bus {
unsigned long curr_freq;
- struct opp_table *opp_table;
+ int opp_token;
struct clk *clk;
unsigned int ratio;
};
@@ -161,8 +161,7 @@ static void exynos_bus_exit(struct device *dev)
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
}
static void exynos_bus_passive_exit(struct device *dev)
@@ -179,18 +178,16 @@ static int exynos_bus_parent_parse_of(struct device_node *np,
struct exynos_bus *bus)
{
struct device *dev = bus->dev;
- struct opp_table *opp_table;
- const char *vdd = "vdd";
+ const char *supplies[] = { "vdd", NULL };
int i, ret, count, size;
- opp_table = dev_pm_opp_set_regulators(dev, &vdd, 1);
- if (IS_ERR(opp_table)) {
- ret = PTR_ERR(opp_table);
+ ret = dev_pm_opp_set_regulators(dev, supplies);
+ if (ret < 0) {
dev_err(dev, "failed to set regulators %d\n", ret);
return ret;
}
- bus->opp_table = opp_table;
+ bus->opp_token = ret;
/*
* Get the devfreq-event devices to get the current utilization of
@@ -236,8 +233,7 @@ static int exynos_bus_parent_parse_of(struct device_node *np,
return 0;
err_regulator:
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
return ret;
}
@@ -459,8 +455,7 @@ err:
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
err_reg:
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
return ret;
}
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 585a95fe2bd6..503376b894b6 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -821,6 +821,15 @@ static int devm_tegra_devfreq_init_hw(struct device *dev,
return err;
}
+static int tegra_devfreq_config_clks_nop(struct device *dev,
+ struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
+{
+ /* We want to skip clk configuration via dev_pm_opp_set_opp() */
+ return 0;
+}
+
static int tegra_devfreq_probe(struct platform_device *pdev)
{
u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
@@ -830,6 +839,13 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
unsigned int i;
long rate;
int err;
+ const char *clk_names[] = { "actmon", NULL };
+ struct dev_pm_opp_config config = {
+ .supported_hw = &hw_version,
+ .supported_hw_count = 1,
+ .clk_names = clk_names,
+ .config_clks = tegra_devfreq_config_clks_nop,
+ };
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
if (!tegra)
@@ -874,13 +890,13 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return err;
}
- err = devm_pm_opp_set_supported_hw(&pdev->dev, &hw_version, 1);
+ err = devm_pm_opp_set_config(&pdev->dev, &config);
if (err) {
- dev_err(&pdev->dev, "Failed to set supported HW: %d\n", err);
+ dev_err(&pdev->dev, "Failed to set OPP config: %d\n", err);
return err;
}
- err = devm_pm_opp_of_add_table_noclk(&pdev->dev, 0);
+ err = devm_pm_opp_of_add_table_indexed(&pdev->dev, 0);
if (err) {
dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err);
return err;
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 205acb2c744d..e3885c90a3ac 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -295,7 +295,8 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
enum dma_resv_usage old_usage;
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
- if ((old->context == fence->context && old_usage >= usage) ||
+ if ((old->context == fence->context && old_usage >= usage &&
+ dma_fence_is_later(fence, old)) ||
dma_fence_is_signaled(old)) {
dma_resv_list_set(fobj, i, fence, usage);
dma_fence_put(old);
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index d4f1e4e9603a..85e00701473c 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
}
pdev = of_find_device_by_node(udma_node);
+ if (np != udma_node)
+ of_node_put(udma_node);
+
if (!pdev) {
pr_debug("UDMA device not found\n");
return ERR_PTR(-EPROBE_DEFER);
}
- if (np != udma_node)
- of_node_put(udma_node);
-
ud = platform_get_drvdata(pdev);
if (!ud) {
pr_debug("UDMA has not been probed\n");
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 6276934d4d2b..8cd4e69dc7b4 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -3040,9 +3040,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Request and map I/O memory */
xdev->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(xdev->regs))
- return PTR_ERR(xdev->regs);
-
+ if (IS_ERR(xdev->regs)) {
+ err = PTR_ERR(xdev->regs);
+ goto disable_clks;
+ }
/* Retrieve the DMA engine properties from the device tree */
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
@@ -3070,7 +3071,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(xdev->dev,
"missing xlnx,num-fstores property\n");
- return err;
+ goto disable_clks;
}
err = of_property_read_u32(node, "xlnx,flush-fsync",
@@ -3090,7 +3091,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->ext_addr = false;
/* Set the dma mask bits */
- dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ if (err < 0) {
+ dev_err(xdev->dev, "DMA mask error %d\n", err);
+ goto disable_clks;
+ }
/* Initialize the DMA engine */
xdev->common.dev = &pdev->dev;
@@ -3137,7 +3142,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
for_each_child_of_node(node, child) {
err = xilinx_dma_child_probe(xdev, child);
if (err < 0)
- goto disable_clks;
+ goto error;
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -3172,12 +3177,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return 0;
-disable_clks:
- xdma_disable_allclks(xdev);
error:
for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
+disable_clks:
+ xdma_disable_allclks(xdev);
return err;
}
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index dc299ab36818..3f4ee3954384 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -849,7 +849,7 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
zynqmp_dma_desc_config_eod(chan, desc);
async_tx_ack(&first->async_tx);
- first->async_tx.flags = flags;
+ first->async_tx.flags = (enum dma_ctrl_flags)flags;
return &first->async_tx;
}
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 5bf92298554d..e50d7928bf8f 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -24,6 +24,8 @@
#include <linux/of_platform.h>
#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include "edac_module.h"
#include "mpc85xx_edac.h"
#include "fsl_ddr_edac.h"
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 6793f6d799e7..0bc670778c99 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/types.h>
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 3ed7ae0d6781..96060bf90a24 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -450,9 +450,13 @@ static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
static const struct scmi_clock_info *
scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
{
+ struct scmi_clock_info *clk;
struct clock_info *ci = ph->get_priv(ph);
- struct scmi_clock_info *clk = ci->clk + clk_id;
+ if (clk_id >= ci->num_clocks)
+ return NULL;
+
+ clk = ci->clk + clk_id;
if (!clk->name[0])
return NULL;
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index 8abace56b958..f42dad997ac9 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -106,6 +106,7 @@ enum scmi_optee_pta_cmd {
* @channel_id: OP-TEE channel ID used for this transport
* @tee_session: TEE session identifier
* @caps: OP-TEE SCMI channel capabilities
+ * @rx_len: Response size
* @mu: Mutex protection on channel access
* @cinfo: SCMI channel information
* @shmem: Virtual base address of the shared memory
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index 673f3eb498f4..e9afa8cab730 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -166,9 +166,13 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
struct scmi_xfer *t;
struct scmi_msg_reset_domain_reset *dom;
struct scmi_reset_info *pi = ph->get_priv(ph);
- struct reset_dom_info *rdom = pi->dom_info + domain;
+ struct reset_dom_info *rdom;
- if (rdom->async_reset)
+ if (domain >= pi->num_domains)
+ return -EINVAL;
+
+ rdom = pi->dom_info + domain;
+ if (rdom->async_reset && flags & AUTONOMOUS_RESET)
flags |= ASYNCHRONOUS_RESET;
ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
@@ -180,7 +184,7 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
dom->flags = cpu_to_le32(flags);
dom->reset_state = cpu_to_le32(state);
- if (rdom->async_reset)
+ if (flags & ASYNCHRONOUS_RESET)
ret = ph->xops->do_xfer_with_response(ph, t);
else
ret = ph->xops->do_xfer(ph, t);
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 581d34c95769..4e27c3d66a83 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -138,9 +138,28 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd_data->domains = domains;
scmi_pd_data->num_domains = num_domains;
+ dev_set_drvdata(dev, scmi_pd_data);
+
return of_genpd_add_provider_onecell(np, scmi_pd_data);
}
+static void scmi_pm_domain_remove(struct scmi_device *sdev)
+{
+ int i;
+ struct genpd_onecell_data *scmi_pd_data;
+ struct device *dev = &sdev->dev;
+ struct device_node *np = dev->of_node;
+
+ of_genpd_del_provider(np);
+
+ scmi_pd_data = dev_get_drvdata(dev);
+ for (i = 0; i < scmi_pd_data->num_domains; i++) {
+ if (!scmi_pd_data->domains[i])
+ continue;
+ pm_genpd_remove(scmi_pd_data->domains[i]);
+ }
+}
+
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_POWER, "genpd" },
{ },
@@ -150,6 +169,7 @@ MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_power_domain_driver = {
.name = "scmi-power-domain",
.probe = scmi_pm_domain_probe,
+ .remove = scmi_pm_domain_remove,
.id_table = scmi_id_table,
};
module_scmi_driver(scmi_power_domain_driver);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 7288c6117838..0b5853fa9d87 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -762,6 +762,10 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
{
int ret;
struct scmi_xfer *t;
+ struct sensors_info *si = ph->get_priv(ph);
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_GET,
sizeof(__le32), sizeof(__le32), &t);
@@ -771,7 +775,6 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
put_unaligned_le32(sensor_id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
*sensor_config = get_unaligned_le64(t->rx.buf);
@@ -788,6 +791,10 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_config_set *msg;
+ struct sensors_info *si = ph->get_priv(ph);
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_SET,
sizeof(*msg), 0, &t);
@@ -800,7 +807,6 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
s->sensor_config = sensor_config;
@@ -831,8 +837,11 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
+ struct scmi_sensor_info *s;
struct sensors_info *si = ph->get_priv(ph);
- struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET,
sizeof(*sensor), 0, &t);
@@ -841,6 +850,7 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
sensor = t->tx.buf;
sensor->id = cpu_to_le32(sensor_id);
+ s = si->sensors + sensor_id;
if (s->async) {
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
ret = ph->xops->do_xfer_with_response(ph, t);
@@ -895,9 +905,13 @@ scmi_sensor_reading_get_timestamped(const struct scmi_protocol_handle *ph,
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
+ struct scmi_sensor_info *s;
struct sensors_info *si = ph->get_priv(ph);
- struct scmi_sensor_info *s = si->sensors + sensor_id;
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
+
+ s = si->sensors + sensor_id;
if (!count || !readings ||
(!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis))
return -EINVAL;
@@ -948,6 +962,9 @@ scmi_sensor_info_get(const struct scmi_protocol_handle *ph, u32 sensor_id)
{
struct sensors_info *si = ph->get_priv(ph);
+ if (sensor_id >= si->num_sensors)
+ return NULL;
+
return si->sensors + sensor_id;
}
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 7dad6f57d970..81cc3d0f6eec 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -2725,6 +2725,9 @@ void cs_dsp_stop(struct cs_dsp *dsp)
mutex_lock(&dsp->pwr_lock);
+ if (dsp->client_ops->pre_stop)
+ dsp->client_ops->pre_stop(dsp);
+
dsp->running = false;
if (dsp->ops->stop_core)
@@ -3177,6 +3180,110 @@ static const struct cs_dsp_ops cs_dsp_halo_ops = {
.stop_core = cs_dsp_halo_stop_core,
};
+/**
+ * cs_dsp_chunk_write() - Format data to a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ * @nbits: Number of bits to write
+ * @val: Value to write
+ *
+ * This function sequentially writes values into the format required for DSP
+ * memory, it handles both inserting of the padding bytes and converting to
+ * big endian. Note that data is only committed to the chunk when a whole DSP
+ * words worth of data is available.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val)
+{
+ int nwrite, i;
+
+ nwrite = min(CS_DSP_DATA_WORD_BITS - ch->cachebits, nbits);
+
+ ch->cache <<= nwrite;
+ ch->cache |= val >> (nbits - nwrite);
+ ch->cachebits += nwrite;
+ nbits -= nwrite;
+
+ if (ch->cachebits == CS_DSP_DATA_WORD_BITS) {
+ if (cs_dsp_chunk_end(ch))
+ return -ENOSPC;
+
+ ch->cache &= 0xFFFFFF;
+ for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE)
+ *ch->data++ = (ch->cache & 0xFF000000) >> CS_DSP_DATA_WORD_BITS;
+
+ ch->bytes += sizeof(ch->cache);
+ ch->cachebits = 0;
+ }
+
+ if (nbits)
+ return cs_dsp_chunk_write(ch, nbits, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_write);
+
+/**
+ * cs_dsp_chunk_flush() - Pad remaining data with zero and commit to chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * As cs_dsp_chunk_write only writes data when a whole DSP word is ready to
+ * be written out it is possible that some data will remain in the cache, this
+ * function will pad that data with zeros upto a whole DSP word and write out.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch)
+{
+ if (!ch->cachebits)
+ return 0;
+
+ return cs_dsp_chunk_write(ch, CS_DSP_DATA_WORD_BITS - ch->cachebits, 0);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_flush);
+
+/**
+ * cs_dsp_chunk_read() - Parse data from a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ * @nbits: Number of bits to read
+ *
+ * This function sequentially reads values from a DSP memory formatted buffer,
+ * it handles both removing of the padding bytes and converting from big endian.
+ *
+ * Return: A negative number is returned on error, otherwise the read value.
+ */
+int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits)
+{
+ int nread, i;
+ u32 result;
+
+ if (!ch->cachebits) {
+ if (cs_dsp_chunk_end(ch))
+ return -ENOSPC;
+
+ ch->cache = 0;
+ ch->cachebits = CS_DSP_DATA_WORD_BITS;
+
+ for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE)
+ ch->cache |= *ch->data++;
+
+ ch->bytes += sizeof(ch->cache);
+ }
+
+ nread = min(ch->cachebits, nbits);
+ nbits -= nread;
+
+ result = ch->cache >> ((sizeof(ch->cache) * BITS_PER_BYTE) - nread);
+ ch->cache <<= nread;
+ ch->cachebits -= nread;
+
+ if (nbits)
+ result = (result << nbits) | cs_dsp_chunk_read(ch, nbits);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_read);
+
MODULE_DESCRIPTION("Cirrus Logic DSP Support");
MODULE_AUTHOR("Simon Trimmer <simont@opensource.cirrus.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index f191a1f901ac..0eb6b617f709 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -630,7 +630,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
{
if (memcmp(buf, "_SM3_", 5) == 0 &&
buf[6] < 32 && dmi_checksum(buf, buf[6])) {
- dmi_ver = get_unaligned_be32(buf + 6) & 0xFFFFFF;
+ dmi_ver = get_unaligned_be24(buf + 7);
dmi_num = 0; /* No longer specified */
dmi_len = get_unaligned_le32(buf + 12);
dmi_base = get_unaligned_le64(buf + 16);
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 4dde8edd53b6..3e8d4b51a814 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -243,29 +243,6 @@ failed:
}
/**
- * efi_capsule_flush - called by file close or file flush
- * @file: file pointer
- * @id: not used
- *
- * If a capsule is being partially uploaded then calling this function
- * will be treated as upload termination and will free those completed
- * buffer pages and -ECANCELED will be returned.
- **/
-static int efi_capsule_flush(struct file *file, fl_owner_t id)
-{
- int ret = 0;
- struct capsule_info *cap_info = file->private_data;
-
- if (cap_info->index > 0) {
- pr_err("capsule upload not complete\n");
- efi_free_all_buff_pages(cap_info);
- ret = -ECANCELED;
- }
-
- return ret;
-}
-
-/**
* efi_capsule_release - called by file close
* @inode: not used
* @file: file pointer
@@ -277,6 +254,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
{
struct capsule_info *cap_info = file->private_data;
+ if (cap_info->index > 0 &&
+ (cap_info->header.headersize == 0 ||
+ cap_info->count < cap_info->total_size)) {
+ pr_err("capsule upload not complete\n");
+ efi_free_all_buff_pages(cap_info);
+ }
+
kfree(cap_info->pages);
kfree(cap_info->phys);
kfree(file->private_data);
@@ -324,7 +308,6 @@ static const struct file_operations efi_capsule_fops = {
.owner = THIS_MODULE,
.open = efi_capsule_open,
.write = efi_capsule_write,
- .flush = efi_capsule_flush,
.release = efi_capsule_release,
.llseek = no_llseek,
};
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 8ced7af8e56d..4f9fb086eab7 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -48,6 +48,9 @@ static int efibc_reboot_notifier_call(struct notifier_block *notifier,
return NOTIFY_DONE;
wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL);
+ if (!wdata)
+ return NOTIFY_DONE;
+
for (l = 0; l < MAX_DATA_LEN - 1 && str[l] != '\0'; l++)
wdata[l] = str[l];
wdata[l] = L'\0';
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d0537573501e..2c67f71f2375 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -37,6 +37,13 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
$(call cc-option,-fno-addrsig) \
-D__DISABLE_EXPORTS
+#
+# struct randomization only makes sense for Linux internal types, which the EFI
+# stub code never touches, so let's turn off struct randomization for the stub
+# altogether
+#
+KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS))
+
# remove SCS flags from all objects in this directory
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
# disable LTO
diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c
index 9e85e58d1f27..b450ebf95977 100644
--- a/drivers/firmware/efi/libstub/riscv-stub.c
+++ b/drivers/firmware/efi/libstub/riscv-stub.c
@@ -8,6 +8,7 @@
#include <asm/efi.h>
#include <asm/sections.h>
+#include <asm/unaligned.h>
#include "efistub.h"
@@ -29,7 +30,7 @@ static int get_boot_hartid_from_fdt(void)
{
const void *fdt;
int chosen_node, len;
- const fdt32_t *prop;
+ const void *prop;
fdt = get_efi_config_table(DEVICE_TREE_GUID);
if (!fdt)
@@ -40,10 +41,16 @@ static int get_boot_hartid_from_fdt(void)
return -EINVAL;
prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len);
- if (!prop || len != sizeof(u32))
+ if (!prop)
+ return -EINVAL;
+
+ if (len == sizeof(u32))
+ hartid = (unsigned long) fdt32_to_cpu(*(fdt32_t *)prop);
+ else if (len == sizeof(u64))
+ hartid = (unsigned long) fdt64_to_cpu(__get_unaligned_t(fdt64_t, prop));
+ else
return -EINVAL;
- hartid = fdt32_to_cpu(*prop);
return 0;
}
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 8a18930f3eb6..516f4f0069bd 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -14,7 +14,7 @@
/* SHIM variables */
static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
-static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
+static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
unsigned long *data_size, void *data)
@@ -43,8 +43,8 @@ enum efi_secureboot_mode efi_get_secureboot(void)
/*
* See if a user has put the shim into insecure mode. If so, and if the
- * variable doesn't have the runtime attribute set, we might as well
- * honor that.
+ * variable doesn't have the non-volatile attribute set, we might as
+ * well honor that.
*/
size = sizeof(moksbstate);
status = get_efi_var(shim_MokSBState_name, &shim_guid,
@@ -53,7 +53,7 @@ enum efi_secureboot_mode efi_get_secureboot(void)
/* If it fails, we don't care why. Default to secure */
if (status != EFI_SUCCESS)
goto secure_boot_enabled;
- if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
+ if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
return efi_secureboot_mode_disabled;
secure_boot_enabled:
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 05ae8bcc9d67..7a7abc8959d2 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -220,7 +220,6 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
unsigned long end, next;
unsigned long rounded_start, rounded_end;
unsigned long unprotect_start, unprotect_size;
- int has_system_memory = 0;
if (efi_dxe_table == NULL)
return;
@@ -517,6 +516,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr->ramdisk_image = 0;
hdr->ramdisk_size = 0;
+ /*
+ * Disregard any setup data that was provided by the bootloader:
+ * setup_data could be pointing anywhere, and we have no way of
+ * authenticating or validating the payload.
+ */
+ hdr->setup_data = 0;
+
efi_stub_entry(handle, sys_table_arg, boot_params);
/* not reached */
diff --git a/drivers/firmware/mtk-adsp-ipc.c b/drivers/firmware/mtk-adsp-ipc.c
index cb255a99170c..3c071f814455 100644
--- a/drivers/firmware/mtk-adsp-ipc.c
+++ b/drivers/firmware/mtk-adsp-ipc.c
@@ -12,6 +12,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+static const char * const adsp_mbox_ch_names[MTK_ADSP_MBOX_NUM] = { "rx", "tx" };
+
/*
* mtk_adsp_ipc_send - send ipc cmd to MTK ADSP
*
@@ -72,7 +74,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
struct mtk_adsp_ipc *adsp_ipc;
struct mtk_adsp_chan *adsp_chan;
struct mbox_client *cl;
- char *chan_name;
int ret;
int i, j;
@@ -83,12 +84,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
return -ENOMEM;
for (i = 0; i < MTK_ADSP_MBOX_NUM; i++) {
- chan_name = kasprintf(GFP_KERNEL, "mbox%d", i);
- if (!chan_name) {
- ret = -ENOMEM;
- goto out;
- }
-
adsp_chan = &adsp_ipc->chans[i];
cl = &adsp_chan->cl;
cl->dev = dev->parent;
@@ -99,17 +94,20 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
adsp_chan->ipc = adsp_ipc;
adsp_chan->idx = i;
- adsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ adsp_chan->ch = mbox_request_channel_byname(cl, adsp_mbox_ch_names[i]);
if (IS_ERR(adsp_chan->ch)) {
ret = PTR_ERR(adsp_chan->ch);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request mbox chan %d ret %d\n",
- i, ret);
- goto out_free;
- }
+ dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+ adsp_mbox_ch_names[i], ret);
+
+ for (j = 0; j < i; j++) {
+ adsp_chan = &adsp_ipc->chans[j];
+ mbox_free_channel(adsp_chan->ch);
+ }
- dev_dbg(dev, "request mbox chan %s\n", chan_name);
- kfree(chan_name);
+ return ret;
+ }
}
adsp_ipc->dev = dev;
@@ -117,16 +115,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
dev_dbg(dev, "MTK ADSP IPC initialized\n");
return 0;
-
-out_free:
- kfree(chan_name);
-out:
- for (j = 0; j < i; j++) {
- adsp_chan = &adsp_ipc->chans[j];
- mbox_free_channel(adsp_chan->ch);
- }
-
- return ret;
}
static int mtk_adsp_ipc_remove(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index a41551870759..74cc71bb3984 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -164,6 +164,7 @@ static void dio48e_irq_mask(struct irq_data *data)
dio48egpio->irq_mask &= ~BIT(0);
else
dio48egpio->irq_mask &= ~BIT(1);
+ gpiochip_disable_irq(chip, offset);
if (!dio48egpio->irq_mask)
/* disable interrupts */
@@ -191,6 +192,7 @@ static void dio48e_irq_unmask(struct irq_data *data)
iowrite8(0x00, &dio48egpio->reg->enable_interrupt);
}
+ gpiochip_enable_irq(chip, offset);
if (offset == 19)
dio48egpio->irq_mask |= BIT(0);
else
@@ -213,12 +215,14 @@ static int dio48e_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
-static struct irq_chip dio48e_irqchip = {
+static const struct irq_chip dio48e_irqchip = {
.name = "104-dio-48e",
.irq_ack = dio48e_irq_ack,
.irq_mask = dio48e_irq_mask,
.irq_unmask = dio48e_irq_unmask,
- .irq_set_type = dio48e_irq_set_type
+ .irq_set_type = dio48e_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
@@ -322,7 +326,7 @@ static int dio48e_probe(struct device *dev, unsigned int id)
dio48egpio->chip.set_multiple = dio48e_gpio_set_multiple;
girq = &dio48egpio->chip.irq;
- girq->chip = &dio48e_irqchip;
+ gpio_irq_chip_set_chip(girq, &dio48e_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 40be76efeed7..3286b914a2cf 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -113,6 +113,7 @@ static void idi_48_irq_mask(struct irq_data *data)
spin_lock_irqsave(&idi48gpio->lock, flags);
idi48gpio->irq_mask[boundary] &= ~mask;
+ gpiochip_disable_irq(chip, offset);
/* Exit early if there are still input lines with IRQ unmasked */
if (idi48gpio->irq_mask[boundary])
@@ -140,6 +141,7 @@ static void idi_48_irq_unmask(struct irq_data *data)
prev_irq_mask = idi48gpio->irq_mask[boundary];
+ gpiochip_enable_irq(chip, offset);
idi48gpio->irq_mask[boundary] |= mask;
/* Exit early if IRQ was already unmasked for this boundary */
@@ -164,12 +166,14 @@ static int idi_48_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
-static struct irq_chip idi_48_irqchip = {
+static const struct irq_chip idi_48_irqchip = {
.name = "104-idi-48",
.irq_ack = idi_48_irq_ack,
.irq_mask = idi_48_irq_mask,
.irq_unmask = idi_48_irq_unmask,
- .irq_set_type = idi_48_irq_set_type
+ .irq_set_type = idi_48_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
@@ -267,7 +271,7 @@ static int idi_48_probe(struct device *dev, unsigned int id)
idi48gpio->chip.get_multiple = idi_48_gpio_get_multiple;
girq = &idi48gpio->chip.irq;
- girq->chip = &idi_48_irqchip;
+ gpio_irq_chip_set_chip(girq, &idi_48_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 65a5f581d981..4756e583f223 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -174,10 +174,11 @@ static void idio_16_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- const unsigned long mask = BIT(irqd_to_hwirq(data));
+ const unsigned long offset = irqd_to_hwirq(data);
unsigned long flags;
- idio16gpio->irq_mask &= ~mask;
+ idio16gpio->irq_mask &= ~BIT(offset);
+ gpiochip_disable_irq(chip, offset);
if (!idio16gpio->irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
@@ -192,11 +193,12 @@ static void idio_16_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- const unsigned long mask = BIT(irqd_to_hwirq(data));
+ const unsigned long offset = irqd_to_hwirq(data);
const unsigned long prev_irq_mask = idio16gpio->irq_mask;
unsigned long flags;
- idio16gpio->irq_mask |= mask;
+ gpiochip_enable_irq(chip, offset);
+ idio16gpio->irq_mask |= BIT(offset);
if (!prev_irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
@@ -217,12 +219,14 @@ static int idio_16_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
-static struct irq_chip idio_16_irqchip = {
+static const struct irq_chip idio_16_irqchip = {
.name = "104-idio-16",
.irq_ack = idio_16_irq_ack,
.irq_mask = idio_16_irq_mask,
.irq_unmask = idio_16_irq_unmask,
- .irq_set_type = idio_16_irq_set_type
+ .irq_set_type = idio_16_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
@@ -299,7 +303,7 @@ static int idio_16_probe(struct device *dev, unsigned int id)
idio16gpio->out_state = 0xFFFF;
girq = &idio16gpio->chip.irq;
- girq->chip = &idio_16_irqchip;
+ gpio_irq_chip_set_chip(girq, &idio_16_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index f422c3e129a0..f77a965f5780 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -41,14 +41,12 @@
* struct ftgpio_gpio - Gemini GPIO state container
* @dev: containing device for this instance
* @gc: gpiochip for this instance
- * @irq: irqchip for this instance
* @base: remapped I/O-memory base
* @clk: silicon clock
*/
struct ftgpio_gpio {
struct device *dev;
struct gpio_chip gc;
- struct irq_chip irq;
void __iomem *base;
struct clk *clk;
};
@@ -70,6 +68,7 @@ static void ftgpio_gpio_mask_irq(struct irq_data *d)
val = readl(g->base + GPIO_INT_EN);
val &= ~BIT(irqd_to_hwirq(d));
writel(val, g->base + GPIO_INT_EN);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void ftgpio_gpio_unmask_irq(struct irq_data *d)
@@ -78,6 +77,7 @@ static void ftgpio_gpio_unmask_irq(struct irq_data *d)
struct ftgpio_gpio *g = gpiochip_get_data(gc);
u32 val;
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
val = readl(g->base + GPIO_INT_EN);
val |= BIT(irqd_to_hwirq(d));
writel(val, g->base + GPIO_INT_EN);
@@ -221,6 +221,16 @@ static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
return 0;
}
+static const struct irq_chip ftgpio_irq_chip = {
+ .name = "FTGPIO010",
+ .irq_ack = ftgpio_gpio_ack_irq,
+ .irq_mask = ftgpio_gpio_mask_irq,
+ .irq_unmask = ftgpio_gpio_unmask_irq,
+ .irq_set_type = ftgpio_gpio_set_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int ftgpio_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -277,14 +287,8 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
if (!IS_ERR(g->clk))
g->gc.set_config = ftgpio_gpio_set_config;
- g->irq.name = "FTGPIO010";
- g->irq.irq_ack = ftgpio_gpio_ack_irq;
- g->irq.irq_mask = ftgpio_gpio_mask_irq;
- g->irq.irq_unmask = ftgpio_gpio_unmask_irq;
- g->irq.irq_set_type = ftgpio_gpio_set_irq_type;
-
girq = &g->gc.irq;
- girq->chip = &g->irq;
+ gpio_irq_chip_set_chip(girq, &ftgpio_irq_chip);
girq->parent_handler = ftgpio_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
index 312309be0287..56656fb519f8 100644
--- a/drivers/gpio/gpio-ixp4xx.c
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -63,6 +63,14 @@ static void ixp4xx_gpio_irq_ack(struct irq_data *d)
__raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
}
+static void ixp4xx_gpio_mask_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ irq_chip_mask_parent(d);
+ gpiochip_disable_irq(gc, d->hwirq);
+}
+
static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -72,6 +80,7 @@ static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
if (!(g->irq_edge & BIT(d->hwirq)))
ixp4xx_gpio_irq_ack(d);
+ gpiochip_enable_irq(gc, d->hwirq);
irq_chip_unmask_parent(d);
}
@@ -149,12 +158,14 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
}
-static struct irq_chip ixp4xx_gpio_irqchip = {
+static const struct irq_chip ixp4xx_gpio_irqchip = {
.name = "IXP4GPIO",
.irq_ack = ixp4xx_gpio_irq_ack,
- .irq_mask = irq_chip_mask_parent,
+ .irq_mask = ixp4xx_gpio_mask_irq,
.irq_unmask = ixp4xx_gpio_irq_unmask,
.irq_set_type = ixp4xx_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
@@ -263,7 +274,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
g->gc.owner = THIS_MODULE;
girq = &g->gc.irq;
- girq->chip = &ixp4xx_gpio_irqchip;
+ gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip);
girq->fwnode = g->fwnode;
girq->parent_domain = parent;
girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 8943cea92764..523dfd17dd92 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -373,6 +373,13 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
}
}
+static void gpio_mockup_debugfs_cleanup(void *data)
+{
+ struct gpio_mockup_chip *chip = data;
+
+ debugfs_remove_recursive(chip->dbg_dir);
+}
+
static void gpio_mockup_dispose_mappings(void *data)
{
struct gpio_mockup_chip *chip = data;
@@ -455,7 +462,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
gpio_mockup_debugfs_setup(dev, chip);
- return 0;
+ return devm_add_action_or_reset(dev, gpio_mockup_debugfs_cleanup, chip);
}
static const struct of_device_id gpio_mockup_of_match[] = {
@@ -526,8 +533,10 @@ static int __init gpio_mockup_register_chip(int idx)
}
fwnode = fwnode_create_software_node(properties, NULL);
- if (IS_ERR(fwnode))
+ if (IS_ERR(fwnode)) {
+ kfree_strarray(line_names, ngpio);
return PTR_ERR(fwnode);
+ }
pdevinfo.name = "gpio-mockup";
pdevinfo.id = idx;
@@ -590,9 +599,9 @@ static int __init gpio_mockup_init(void)
static void __exit gpio_mockup_exit(void)
{
+ gpio_mockup_unregister_pdevs();
debugfs_remove_recursive(gpio_mockup_dbg_dir);
platform_driver_unregister(&gpio_mockup_driver);
- gpio_mockup_unregister_pdevs();
}
module_init(gpio_mockup_init);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 15049822937a..3eb08cd1fdc0 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -169,6 +169,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
switch (flow_type) {
case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index d8a26e503ca5..f163f5ca857b 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -112,6 +112,8 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
unsigned long flags;
u32 rise, fall, high, low;
+ gpiochip_enable_irq(gc, d->hwirq);
+
spin_lock_irqsave(&rg->lock, flags);
rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
@@ -143,6 +145,8 @@ mediatek_gpio_irq_mask(struct irq_data *d)
mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
spin_unlock_irqrestore(&rg->lock, flags);
+
+ gpiochip_disable_irq(gc, d->hwirq);
}
static int
@@ -204,6 +208,16 @@ mediatek_gpio_xlate(struct gpio_chip *chip,
return gpio % MTK_BANK_WIDTH;
}
+static const struct irq_chip mt7621_irq_chip = {
+ .name = "mt7621-gpio",
+ .irq_mask_ack = mediatek_gpio_irq_mask,
+ .irq_mask = mediatek_gpio_irq_mask,
+ .irq_unmask = mediatek_gpio_irq_unmask,
+ .irq_set_type = mediatek_gpio_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int
mediatek_gpio_bank_probe(struct device *dev, int bank)
{
@@ -238,11 +252,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
return -ENOMEM;
rg->chip.offset = bank * MTK_BANK_WIDTH;
- rg->irq_chip.name = dev_name(dev);
- rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
- rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
- rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
- rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
if (mtk->gpio_irq) {
struct gpio_irq_chip *girq;
@@ -262,7 +271,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
}
girq = &rg->chip.irq;
- girq->chip = &rg->irq_chip;
+ gpio_irq_chip_set_chip(girq, &mt7621_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index ecd7d169470b..2925f4d8cef3 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -1175,7 +1175,9 @@ static int pca953x_suspend(struct device *dev)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
+ mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, true);
+ mutex_unlock(&chip->i2c_lock);
if (atomic_read(&chip->wakeup_path))
device_set_wakeup_path(dev);
@@ -1198,13 +1200,17 @@ static int pca953x_resume(struct device *dev)
}
}
+ mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, false);
regcache_mark_dirty(chip->regmap);
ret = pca953x_regcache_sync(dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&chip->i2c_lock);
return ret;
+ }
ret = regcache_sync(chip->regmap);
+ mutex_unlock(&chip->i2c_lock);
if (ret) {
dev_err(dev, "Failed to restore register map: %d\n", ret);
return ret;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index c7fbfa3ae43b..1198ab0305d0 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -661,24 +661,17 @@ static int pxa_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio_reg_base))
return PTR_ERR(gpio_reg_base);
- clk = clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
PTR_ERR(clk));
return PTR_ERR(clk);
}
- ret = clk_prepare_enable(clk);
- if (ret) {
- clk_put(clk);
- return ret;
- }
/* Initialize GPIO chips */
ret = pxa_init_gpio_chip(pchip, pxa_last_gpio + 1, gpio_reg_base);
- if (ret) {
- clk_put(clk);
+ if (ret)
return ret;
- }
/* clear all GPIO edge detects */
for_each_gpio_bank(gpio, c, pchip) {
diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c
index 63dcf42f7c20..d6418f89d3f6 100644
--- a/drivers/gpio/gpio-realtek-otto.c
+++ b/drivers/gpio/gpio-realtek-otto.c
@@ -46,10 +46,20 @@
* @lock: Lock for accessing the IRQ registers and values
* @intr_mask: Mask for interrupts lines
* @intr_type: Interrupt type selection
+ * @bank_read: Read a bank setting as a single 32-bit value
+ * @bank_write: Write a bank setting as a single 32-bit value
+ * @imr_line_pos: Bit shift of an IRQ line's IMR value.
+ *
+ * The DIR, DATA, and ISR registers consist of four 8-bit port values, packed
+ * into a single 32-bit register. Use @bank_read (@bank_write) to get (assign)
+ * a value from (to) these registers. The IMR register consists of four 16-bit
+ * port values, packed into two 32-bit registers. Use @imr_line_pos to get the
+ * bit shift of the 2-bit field for a line's IMR settings. Shifts larger than
+ * 32 overflow into the second register.
*
* Because the interrupt mask register (IMR) combines the function of IRQ type
* selection and masking, two extra values are stored. @intr_mask is used to
- * mask/unmask the interrupts for a GPIO port, and @intr_type is used to store
+ * mask/unmask the interrupts for a GPIO line, and @intr_type is used to store
* the selected interrupt types. The logical AND of these values is written to
* IMR on changes.
*/
@@ -59,10 +69,11 @@ struct realtek_gpio_ctrl {
void __iomem *cpumask_base;
struct cpumask cpu_irq_maskable;
raw_spinlock_t lock;
- u16 intr_mask[REALTEK_GPIO_PORTS_PER_BANK];
- u16 intr_type[REALTEK_GPIO_PORTS_PER_BANK];
- unsigned int (*port_offset_u8)(unsigned int port);
- unsigned int (*port_offset_u16)(unsigned int port);
+ u8 intr_mask[REALTEK_GPIO_MAX];
+ u8 intr_type[REALTEK_GPIO_MAX];
+ u32 (*bank_read)(void __iomem *reg);
+ void (*bank_write)(void __iomem *reg, u32 value);
+ unsigned int (*line_imr_pos)(unsigned int line);
};
/* Expand with more flags as devices with other quirks are added */
@@ -101,14 +112,22 @@ static struct realtek_gpio_ctrl *irq_data_to_ctrl(struct irq_data *data)
* port. The two interrupt mask registers store two bits per GPIO, so use u16
* values.
*/
-static unsigned int realtek_gpio_port_offset_u8(unsigned int port)
+static u32 realtek_gpio_bank_read_swapped(void __iomem *reg)
{
- return port;
+ return ioread32be(reg);
}
-static unsigned int realtek_gpio_port_offset_u16(unsigned int port)
+static void realtek_gpio_bank_write_swapped(void __iomem *reg, u32 value)
{
- return 2 * port;
+ iowrite32be(value, reg);
+}
+
+static unsigned int realtek_gpio_line_imr_pos_swapped(unsigned int line)
+{
+ unsigned int port_pin = line % 8;
+ unsigned int port = line / 8;
+
+ return 2 * (8 * (port ^ 1) + port_pin);
}
/*
@@ -119,66 +138,67 @@ static unsigned int realtek_gpio_port_offset_u16(unsigned int port)
* per GPIO, so use u16 values. The first register contains ports 1 and 0, the
* second ports 3 and 2.
*/
-static unsigned int realtek_gpio_port_offset_u8_rev(unsigned int port)
+static u32 realtek_gpio_bank_read(void __iomem *reg)
{
- return 3 - port;
+ return ioread32(reg);
}
-static unsigned int realtek_gpio_port_offset_u16_rev(unsigned int port)
+static void realtek_gpio_bank_write(void __iomem *reg, u32 value)
{
- return 2 * (port ^ 1);
+ iowrite32(value, reg);
}
-static void realtek_gpio_write_imr(struct realtek_gpio_ctrl *ctrl,
- unsigned int port, u16 irq_type, u16 irq_mask)
+static unsigned int realtek_gpio_line_imr_pos(unsigned int line)
{
- iowrite16(irq_type & irq_mask,
- ctrl->base + REALTEK_GPIO_REG_IMR + ctrl->port_offset_u16(port));
+ return 2 * line;
}
-static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl,
- unsigned int port, u8 mask)
+static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl, u32 mask)
{
- iowrite8(mask, ctrl->base + REALTEK_GPIO_REG_ISR + ctrl->port_offset_u8(port));
+ ctrl->bank_write(ctrl->base + REALTEK_GPIO_REG_ISR, mask);
}
-static u8 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl, unsigned int port)
+static u32 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl)
{
- return ioread8(ctrl->base + REALTEK_GPIO_REG_ISR + ctrl->port_offset_u8(port));
+ return ctrl->bank_read(ctrl->base + REALTEK_GPIO_REG_ISR);
}
-/* Set the rising and falling edge mask bits for a GPIO port pin */
-static u16 realtek_gpio_imr_bits(unsigned int pin, u16 value)
+/* Set the rising and falling edge mask bits for a GPIO pin */
+static void realtek_gpio_update_line_imr(struct realtek_gpio_ctrl *ctrl, unsigned int line)
{
- return (value & REALTEK_GPIO_IMR_LINE_MASK) << 2 * pin;
+ void __iomem *reg = ctrl->base + REALTEK_GPIO_REG_IMR;
+ unsigned int line_shift = ctrl->line_imr_pos(line);
+ unsigned int shift = line_shift % 32;
+ u32 irq_type = ctrl->intr_type[line];
+ u32 irq_mask = ctrl->intr_mask[line];
+ u32 reg_val;
+
+ reg += 4 * (line_shift / 32);
+ reg_val = ioread32(reg);
+ reg_val &= ~(REALTEK_GPIO_IMR_LINE_MASK << shift);
+ reg_val |= (irq_type & irq_mask & REALTEK_GPIO_IMR_LINE_MASK) << shift;
+ iowrite32(reg_val, reg);
}
static void realtek_gpio_irq_ack(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
irq_hw_number_t line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
- realtek_gpio_clear_isr(ctrl, port, BIT(port_pin));
+ realtek_gpio_clear_isr(ctrl, BIT(line));
}
static void realtek_gpio_irq_unmask(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
unsigned long flags;
- u16 m;
gpiochip_enable_irq(&ctrl->gc, line);
raw_spin_lock_irqsave(&ctrl->lock, flags);
- m = ctrl->intr_mask[port];
- m |= realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
- ctrl->intr_mask[port] = m;
- realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
+ ctrl->intr_mask[line] = REALTEK_GPIO_IMR_LINE_MASK;
+ realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
@@ -186,16 +206,11 @@ static void realtek_gpio_irq_mask(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
unsigned long flags;
- u16 m;
raw_spin_lock_irqsave(&ctrl->lock, flags);
- m = ctrl->intr_mask[port];
- m &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
- ctrl->intr_mask[port] = m;
- realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
+ ctrl->intr_mask[line] = 0;
+ realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
gpiochip_disable_irq(&ctrl->gc, line);
@@ -205,10 +220,8 @@ static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_ty
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
unsigned long flags;
- u16 type, t;
+ u8 type;
switch (flow_type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_FALLING:
@@ -227,11 +240,8 @@ static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_ty
irq_set_handler_locked(data, handle_edge_irq);
raw_spin_lock_irqsave(&ctrl->lock, flags);
- t = ctrl->intr_type[port];
- t &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
- t |= realtek_gpio_imr_bits(port_pin, type);
- ctrl->intr_type[port] = t;
- realtek_gpio_write_imr(ctrl, port, t, ctrl->intr_mask[port]);
+ ctrl->intr_type[line] = type;
+ realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
return 0;
@@ -242,28 +252,21 @@ static void realtek_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
- unsigned int lines_done;
- unsigned int port_pin_count;
unsigned long status;
int offset;
chained_irq_enter(irq_chip, desc);
- for (lines_done = 0; lines_done < gc->ngpio; lines_done += 8) {
- status = realtek_gpio_read_isr(ctrl, lines_done / 8);
- port_pin_count = min(gc->ngpio - lines_done, 8U);
- for_each_set_bit(offset, &status, port_pin_count)
- generic_handle_domain_irq(gc->irq.domain, offset + lines_done);
- }
+ status = realtek_gpio_read_isr(ctrl);
+ for_each_set_bit(offset, &status, gc->ngpio)
+ generic_handle_domain_irq(gc->irq.domain, offset);
chained_irq_exit(irq_chip, desc);
}
-static inline void __iomem *realtek_gpio_irq_cpu_mask(struct realtek_gpio_ctrl *ctrl,
- unsigned int port, int cpu)
+static inline void __iomem *realtek_gpio_irq_cpu_mask(struct realtek_gpio_ctrl *ctrl, int cpu)
{
- return ctrl->cpumask_base + ctrl->port_offset_u8(port) +
- REALTEK_GPIO_PORTS_PER_BANK * cpu;
+ return ctrl->cpumask_base + REALTEK_GPIO_PORTS_PER_BANK * cpu;
}
static int realtek_gpio_irq_set_affinity(struct irq_data *data,
@@ -271,12 +274,10 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
void __iomem *irq_cpu_mask;
unsigned long flags;
int cpu;
- u8 v;
+ u32 v;
if (!ctrl->cpumask_base)
return -ENXIO;
@@ -284,15 +285,15 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
raw_spin_lock_irqsave(&ctrl->lock, flags);
for_each_cpu(cpu, &ctrl->cpu_irq_maskable) {
- irq_cpu_mask = realtek_gpio_irq_cpu_mask(ctrl, port, cpu);
- v = ioread8(irq_cpu_mask);
+ irq_cpu_mask = realtek_gpio_irq_cpu_mask(ctrl, cpu);
+ v = ctrl->bank_read(irq_cpu_mask);
if (cpumask_test_cpu(cpu, dest))
- v |= BIT(port_pin);
+ v |= BIT(line);
else
- v &= ~BIT(port_pin);
+ v &= ~BIT(line);
- iowrite8(v, irq_cpu_mask);
+ ctrl->bank_write(irq_cpu_mask, v);
}
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
@@ -305,16 +306,17 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
static int realtek_gpio_irq_init(struct gpio_chip *gc)
{
struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
- unsigned int port;
+ u32 mask_all = GENMASK(gc->ngpio - 1, 0);
+ unsigned int line;
int cpu;
- for (port = 0; (port * 8) < gc->ngpio; port++) {
- realtek_gpio_write_imr(ctrl, port, 0, 0);
- realtek_gpio_clear_isr(ctrl, port, GENMASK(7, 0));
+ for (line = 0; line < gc->ngpio; line++)
+ realtek_gpio_update_line_imr(ctrl, line);
- for_each_cpu(cpu, &ctrl->cpu_irq_maskable)
- iowrite8(GENMASK(7, 0), realtek_gpio_irq_cpu_mask(ctrl, port, cpu));
- }
+ realtek_gpio_clear_isr(ctrl, mask_all);
+
+ for_each_cpu(cpu, &ctrl->cpu_irq_maskable)
+ ctrl->bank_write(realtek_gpio_irq_cpu_mask(ctrl, cpu), mask_all);
return 0;
}
@@ -387,12 +389,14 @@ static int realtek_gpio_probe(struct platform_device *pdev)
if (dev_flags & GPIO_PORTS_REVERSED) {
bgpio_flags = 0;
- ctrl->port_offset_u8 = realtek_gpio_port_offset_u8_rev;
- ctrl->port_offset_u16 = realtek_gpio_port_offset_u16_rev;
+ ctrl->bank_read = realtek_gpio_bank_read;
+ ctrl->bank_write = realtek_gpio_bank_write;
+ ctrl->line_imr_pos = realtek_gpio_line_imr_pos;
} else {
bgpio_flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
- ctrl->port_offset_u8 = realtek_gpio_port_offset_u8;
- ctrl->port_offset_u16 = realtek_gpio_port_offset_u16;
+ ctrl->bank_read = realtek_gpio_bank_read_swapped;
+ ctrl->bank_write = realtek_gpio_bank_write_swapped;
+ ctrl->line_imr_pos = realtek_gpio_line_imr_pos_swapped;
}
err = bgpio_init(&ctrl->gc, dev, 4,
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index f91e876fd969..bb50335239ac 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -419,11 +419,11 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
goto out;
} else {
bank->toggle_edge_mode |= mask;
- level |= mask;
+ level &= ~mask;
/*
* Determine gpio state. If 1 next interrupt should be
- * falling otherwise rising.
+ * low otherwise high.
*/
data = readl(bank->reg_base + bank->gpio_regs->ext_port);
if (data & mask)
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index fa4bc7481f9a..e739dcea61b2 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -307,6 +307,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
girq->init_valid_mask = tqmx86_init_irq_valid_mask;
+
+ irq_domain_set_pm_device(girq->domain, dev);
}
ret = devm_gpiochip_add_data(dev, chip, gpio);
@@ -315,8 +317,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
goto out_pm_dis;
}
- irq_domain_set_pm_device(girq->domain, dev);
-
dev_info(dev, "GPIO functionality initialized with %d pins\n",
chip->ngpio);
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index b098f2dc196b..59fb10641598 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -265,6 +265,7 @@ static void ws16c48_irq_mask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->irq_mask &= ~mask;
+ gpiochip_disable_irq(chip, offset);
port_state = ws16c48gpio->irq_mask >> (8 * port);
/* Select Register Page 2; Unlock all I/O ports */
@@ -295,6 +296,7 @@ static void ws16c48_irq_unmask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
+ gpiochip_enable_irq(chip, offset);
ws16c48gpio->irq_mask |= mask;
port_state = ws16c48gpio->irq_mask >> (8 * port);
@@ -356,12 +358,14 @@ static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
return 0;
}
-static struct irq_chip ws16c48_irqchip = {
+static const struct irq_chip ws16c48_irqchip = {
.name = "ws16c48",
.irq_ack = ws16c48_irq_ack,
.irq_mask = ws16c48_irq_mask,
.irq_unmask = ws16c48_irq_unmask,
- .irq_set_type = ws16c48_irq_set_type
+ .irq_set_type = ws16c48_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
@@ -463,7 +467,7 @@ static int ws16c48_probe(struct device *dev, unsigned int id)
ws16c48gpio->chip.set_multiple = ws16c48_gpio_set_multiple;
girq = &ws16c48gpio->chip.irq;
- girq->chip = &ws16c48_irqchip;
+ gpio_irq_chip_set_chip(girq, &ws16c48_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index f8041d4898d1..92f185575e94 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1986,7 +1986,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
ret = -ENODEV;
goto out_free_le;
}
- le->irq = irq;
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
@@ -2000,7 +1999,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
init_waitqueue_head(&le->wait);
/* Request a thread to read the events */
- ret = request_threaded_irq(le->irq,
+ ret = request_threaded_irq(irq,
lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
@@ -2009,6 +2008,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_le;
+ le->irq = irq;
+
fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index c6cc493a5486..2b97b8a96fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -148,30 +148,22 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
- struct list_head reset_device_list;
int r = 0;
dev_dbg(adev->dev, "aldebaran perform hw reset\n");
+
+ if (reset_device_list == NULL)
+ return -EINVAL;
+
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&
reset_context->hive == NULL) {
/* Wrong context, return error */
return -EINVAL;
}
- INIT_LIST_HEAD(&reset_device_list);
- if (reset_context->hive) {
- list_for_each_entry (tmp_adev,
- &reset_context->hive->device_list,
- gmc.xgmi.head)
- list_add_tail(&tmp_adev->reset_list,
- &reset_device_list);
- } else {
- list_add_tail(&reset_context->reset_req_dev->reset_list,
- &reset_device_list);
- }
-
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
mutex_lock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
}
@@ -179,7 +171,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
* Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
* them together so that they can be completed asynchronously on multiple nodes
*/
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
/* For XGMI run all resets in parallel to speed up the process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
if (!queue_work(system_unbound_wq,
@@ -197,7 +189,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
/* For XGMI wait for all resets to complete before proceed */
if (!r) {
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
flush_work(&tmp_adev->reset_cntl->reset_work);
r = tmp_adev->asic_reset_res;
@@ -207,7 +199,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
}
}
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
}
@@ -339,10 +331,13 @@ static int
aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
- struct list_head reset_device_list;
int r;
+ if (reset_device_list == NULL)
+ return -EINVAL;
+
if (reset_context->reset_req_dev->ip_versions[MP1_HWIP][0] ==
IP_VERSION(13, 0, 2) &&
reset_context->hive == NULL) {
@@ -350,19 +345,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
return -EINVAL;
}
- INIT_LIST_HEAD(&reset_device_list);
- if (reset_context->hive) {
- list_for_each_entry (tmp_adev,
- &reset_context->hive->device_list,
- gmc.xgmi.head)
- list_add_tail(&tmp_adev->reset_list,
- &reset_device_list);
- } else {
- list_add_tail(&reset_context->reset_req_dev->reset_list,
- &reset_device_list);
- }
-
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
dev_info(tmp_adev->dev,
"GPU reset succeeded, trying to resume\n");
r = aldebaran_mode2_restore_ip(tmp_adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e146810c700b..d597e2656c47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -317,7 +317,7 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-
+#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 3c09dcc0986e..647220a8762d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence {
struct amdgpu_kfd_dev {
struct kfd_dev *dev;
uint64_t vram_used;
+ uint64_t vram_used_aligned;
bool init_complete;
struct work_struct reset_work;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index a699134a1e8c..2170db83e41d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -40,10 +40,10 @@
#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
/*
- * Align VRAM allocations to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
+ * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
* BO chunk
*/
-#define VRAM_ALLOCATION_ALIGN (1 << 21)
+#define VRAM_AVAILABLITY_ALIGN (1 << 21)
/* Impose limit on how much memory KFD can use */
static struct {
@@ -149,7 +149,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
* to avoid fragmentation caused by 4K allocations in the tail
* 2M BO chunk.
*/
- vram_needed = ALIGN(size, VRAM_ALLOCATION_ALIGN);
+ vram_needed = size;
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
system_mem_needed = size;
} else if (!(alloc_flag &
@@ -182,8 +182,10 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
*/
WARN_ONCE(vram_needed && !adev,
"adev reference can't be null when vram is used");
- if (adev)
+ if (adev) {
adev->kfd.vram_used += vram_needed;
+ adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
+ }
kfd_mem_limit.system_mem_used += system_mem_needed;
kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
@@ -203,8 +205,10 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
WARN_ONCE(!adev,
"adev reference can't be null when alloc mem flags vram is set");
- if (adev)
- adev->kfd.vram_used -= ALIGN(size, VRAM_ALLOCATION_ALIGN);
+ if (adev) {
+ adev->kfd.vram_used -= size;
+ adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN);
+ }
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
kfd_mem_limit.system_mem_used -= size;
} else if (!(alloc_flag &
@@ -1608,15 +1612,14 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
size_t available;
-
spin_lock(&kfd_mem_limit.mem_limit_lock);
available = adev->gmc.real_vram_size
- - adev->kfd.vram_used
+ - adev->kfd.vram_used_aligned
- atomic64_read(&adev->vram_pin_size)
- reserved_for_pt;
spin_unlock(&kfd_mem_limit.mem_limit_lock);
- return ALIGN_DOWN(available, VRAM_ALLOCATION_ALIGN);
+ return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
}
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
@@ -1725,7 +1728,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
if (user_addr) {
- pr_debug("creating userptr BO for user_addr = %llu\n", user_addr);
+ pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
ret = init_user_pages(*mem, user_addr, criu_resume);
if (ret)
goto allocate_init_user_pages_failed;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index fd8f3731758e..b81b77a9efa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -314,7 +314,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = vram_info->v30.channel_num;
mem_channel_width = vram_info->v30.channel_width;
if (vram_width)
- *vram_width = mem_channel_number * mem_channel_width;
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d8f1335bc68f..b7bae833c804 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -837,16 +837,12 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
continue;
r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r) {
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ if (r)
return r;
- }
r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
- if (r) {
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ if (r)
return r;
- }
}
r = amdgpu_vm_handle_moved(adev, vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index e2eec985adb3..cb00c7d6f50b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1705,7 +1705,7 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
char reg_offset[11];
- uint32_t *new, *tmp = NULL;
+ uint32_t *new = NULL, *tmp = NULL;
int ret, i = 0, len = 0;
do {
@@ -1747,7 +1747,8 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
ret = size;
error_free:
- kfree(tmp);
+ if (tmp != new)
+ kfree(tmp);
kfree(new);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c4a6fe3070b6..be7aff2d4a57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2365,8 +2365,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
adev->ip_blocks[i].status.sw = true;
- /* need to do gmc hw init early so we can allocate gpu mem */
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+ /* need to do common hw init early so everything is set up for gmc */
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+ if (r) {
+ DRM_ERROR("hw_init %d failed %d\n", i, r);
+ goto init_failed;
+ }
+ adev->ip_blocks[i].status.hw = true;
+ } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ /* need to do gmc hw init early so we can allocate gpu mem */
/* Try to reserve bad pages early */
if (amdgpu_sriov_vf(adev))
amdgpu_virt_exchange_data(adev);
@@ -2456,12 +2464,14 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (!hive->reset_domain ||
!amdgpu_reset_get_reset_domain(hive->reset_domain)) {
r = -ENOENT;
+ amdgpu_put_xgmi_hive(hive);
goto init_failed;
}
/* Drop the early temporary reset domain we created for device */
amdgpu_reset_put_reset_domain(adev->reset_domain);
adev->reset_domain = hive->reset_domain;
+ amdgpu_put_xgmi_hive(hive);
}
}
@@ -3050,8 +3060,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
int i, r;
static enum amd_ip_block_type ip_order[] = {
- AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_COMMON,
+ AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_IH,
};
@@ -4413,8 +4423,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
retry:
amdgpu_amdkfd_pre_reset(adev);
- amdgpu_amdkfd_pre_reset(adev);
-
if (from_hypervisor)
r = amdgpu_virt_request_full_gpu(adev, true);
else
@@ -4742,6 +4750,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
amdgpu_reset_reg_dumps(tmp_adev);
+
+ reset_context->reset_device_list = device_list_handle;
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
if (r == -ENOSYS)
@@ -5522,7 +5532,8 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
resource_size_t aper_limit =
adev->gmc.aper_base + adev->gmc.aper_size - 1;
- bool p2p_access = !(pci_p2pdma_distance_many(adev->pdev,
+ bool p2p_access = !adev->gmc.xgmi.connected_to_cpu &&
+ !(pci_p2pdma_distance_many(adev->pdev,
&peer_adev->dev, 1, true) < 0);
return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index c20922a5af9f..23998f727c7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -38,6 +38,8 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
@@ -498,6 +500,12 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.create_handle = drm_gem_fb_create_handle,
};
+static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .dirty = drm_atomic_helper_dirtyfb,
+};
+
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@@ -1100,7 +1108,10 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
+ else
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index ecada5eadfe3..e325150879df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -66,10 +66,15 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
return true;
case CHIP_SIENNA_CICHLID:
if (strnstr(atom_ctx->vbios_version, "D603",
+ sizeof(atom_ctx->vbios_version))) {
+ if (strnstr(atom_ctx->vbios_version, "D603GLXE",
sizeof(atom_ctx->vbios_version)))
- return true;
- else
+ return false;
+ else
+ return true;
+ } else {
return false;
+ }
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 5071b96be982..c2fd6f3076a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -159,7 +159,10 @@ void amdgpu_job_free(struct amdgpu_job *job)
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
- dma_fence_put(&job->hw_fence);
+ if (!job->hw_fence.ops)
+ kfree(job);
+ else
+ dma_fence_put(&job->hw_fence);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
@@ -272,10 +275,6 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
/* Signal all jobs not yet scheduled */
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i];
-
- if (!rq)
- continue;
-
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index fe82b8b19a4e..0c546245793b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -181,6 +181,9 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
+ /* zero sdma_hqd_mask for non-existent engine */
+ else if (adev->sdma.num_instances == 1)
+ adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
else
adev->mes.sdma_hqd_mask[i] = 0xfc;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index b067ce45d226..c9dec2434f37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -486,11 +486,14 @@ static int psp_sw_fini(void *handle)
release_firmware(psp->ta_fw);
psp->ta_fw = NULL;
}
- if (adev->psp.cap_fw) {
+ if (psp->cap_fw) {
release_firmware(psp->cap_fw);
psp->cap_fw = NULL;
}
-
+ if (psp->toc_fw) {
+ release_firmware(psp->toc_fw);
+ psp->toc_fw = NULL;
+ }
if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
psp_sysfs_fini(adev);
@@ -753,7 +756,7 @@ static int psp_tmr_init(struct psp_context *psp)
}
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
- ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE(psp->adev),
+ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
@@ -2401,7 +2404,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
static bool fw_load_skip_check(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
- if (!ucode->fw)
+ if (!ucode->fw || !ucode->ucode_size)
return true;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
@@ -2641,6 +2644,9 @@ static int psp_hw_fini(void *handle)
psp_rap_terminate(psp);
psp_dtm_terminate(psp);
psp_hdcp_terminate(psp);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ psp_xgmi_terminate(psp);
}
psp_asd_terminate(psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index c32b74bd970f..e593e8c2a54d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -36,6 +36,7 @@
#define PSP_CMD_BUFFER_SIZE 0x1000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
+#define PSP_TMR_ALIGNMENT 0x100000
#define PSP_FW_NAME_LEN 0x24
enum psp_shared_mem_size {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ff5361f5c2d4..12c6f97945a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1811,7 +1811,8 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
amdgpu_ras_query_error_status(adev, &info);
if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
- adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
if (amdgpu_ras_reset_error_status(adev, info.head.block))
dev_warn(adev->dev, "Failed to reset error counter and error status");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 9e55a5d7a825..ffda1560c648 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -37,6 +37,7 @@ struct amdgpu_reset_context {
struct amdgpu_device *reset_req_dev;
struct amdgpu_job *job;
struct amdgpu_hive_info *hive;
+ struct list_head *reset_device_list;
unsigned long flags;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 3b4c19412625..134575a3893c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -637,6 +637,8 @@ struct amdgpu_ttm_tt {
#endif
};
+#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
+
#ifdef CONFIG_DRM_AMDGPU_USERPTR
/*
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
@@ -648,7 +650,7 @@ struct amdgpu_ttm_tt {
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
{
struct ttm_tt *ttm = bo->tbo.ttm;
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
unsigned long start = gtt->userptr;
struct vm_area_struct *vma;
struct mm_struct *mm;
@@ -702,7 +704,7 @@ out_unlock:
*/
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
bool r = false;
if (!gtt || !gtt->userptr)
@@ -751,7 +753,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -788,7 +790,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -822,7 +824,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
struct ttm_tt *ttm = tbo->ttm;
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (amdgpu_bo_encrypted(abo))
flags |= AMDGPU_PTE_TMZ;
@@ -860,7 +862,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_resource *bo_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void*)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
uint64_t flags;
int r;
@@ -927,7 +929,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
struct ttm_placement placement;
struct ttm_place placements;
struct ttm_resource *tmp;
@@ -998,7 +1000,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr) {
@@ -1025,7 +1027,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt->usertask)
put_task_struct(gtt->usertask);
@@ -1079,7 +1081,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_operation_ctx *ctx)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
pgoff_t i;
int ret;
@@ -1113,7 +1115,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
struct amdgpu_device *adev;
pgoff_t i;
@@ -1182,7 +1184,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
/* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
- gtt = (void *)bo->ttm;
+ gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
gtt->userptr = addr;
gtt->userflags = flags;
@@ -1199,7 +1201,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
*/
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL)
return NULL;
@@ -1218,7 +1220,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end, unsigned long *userptr)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
unsigned long size;
if (gtt == NULL || !gtt->userptr)
@@ -1241,7 +1243,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
*/
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL || !gtt->userptr)
return false;
@@ -1254,7 +1256,7 @@ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
*/
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index ebed3f5226db..96b6cf4c4d54 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -390,6 +390,7 @@ union amdgpu_firmware_header {
struct rlc_firmware_header_v2_1 rlc_v2_1;
struct rlc_firmware_header_v2_2 rlc_v2_2;
struct rlc_firmware_header_v2_3 rlc_v2_3;
+ struct rlc_firmware_header_v2_4 rlc_v2_4;
struct sdma_firmware_header_v1_0 sdma;
struct sdma_firmware_header_v1_1 sdma_v1_1;
struct sdma_firmware_header_v2_0 sdma_v2_0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 108e8e8a1a36..576849e95296 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -496,8 +496,7 @@ static int amdgpu_vkms_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 59cac347baa3..690fd4f639f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2484,8 +2484,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
/* Intentionally setting invalid PTE flag
* combination to force a no-retry-fault
*/
- flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
- AMDGPU_PTE_TF;
+ flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT;
value = 0;
} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
/* Redirect the access to the dummy page */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 1b108d03e785..f2aebbf3fbe3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -742,7 +742,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
amdgpu_put_xgmi_hive(hive);
}
- return psp_xgmi_terminate(&adev->psp);
+ return 0;
}
static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
index 33a8a7365aef..f0e235f98afb 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
@@ -28,13 +28,44 @@
#include "navi10_enum.h"
#include "soc15_common.h"
+#define regATHUB_MISC_CNTL_V3_0_1 0x00d7
+#define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0
+
+
+static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(3, 0, 1):
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
+ break;
+ default:
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ break;
+ }
+ return data;
+}
+
+static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
+{
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(3, 0, 1):
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
+ break;
+ default:
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ break;
+ }
+}
+
static void
athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
- def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ def = data = athub_v3_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
@@ -42,7 +73,7 @@ athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
if (def != data)
- WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ athub_v3_0_set_cg_cntl(adev, data);
}
static void
@@ -51,7 +82,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
{
uint32_t def, data;
- def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ def = data = athub_v3_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
@@ -59,7 +90,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
if (def != data)
- WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ athub_v3_0_set_cg_cntl(adev, data);
}
int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
@@ -70,6 +101,7 @@ int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
switch (adev->ip_versions[ATHUB_HWIP][0]) {
case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 0, 2):
athub_v3_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -88,7 +120,7 @@ void athub_v3_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
int data;
/* AMD_CG_SUPPORT_ATHUB_MGCG */
- data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ data = athub_v3_0_get_cg_cntl(adev);
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9c964cd3b5d4..288fce7dc0ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2796,8 +2796,7 @@ static int dce_v10_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index e0ad9f27dc3f..cbe5250b31cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2914,8 +2914,7 @@ static int dce_v11_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 77f5e998a120..b1c44fab074f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2673,8 +2673,7 @@ static int dce_v6_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_width = 16384;
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 802e5c753271..a22b45c92792 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2693,8 +2693,11 @@ static int dce_v8_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index fafbad3cf08d..a3cd5c1e8529 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4274,35 +4274,45 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
}
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
@@ -4846,7 +4856,7 @@ static int gfx_v10_0_sw_init(void *handle)
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 7):
adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 2;
+ adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 6fd71cb10e54..f6b1bb40e503 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -53,6 +53,7 @@
#define GFX11_MEC_HPD_SIZE 2048
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+#define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388
#define regCGTT_WD_CLK_CTRL 0x5086
#define regCGTT_WD_CLK_CTRL_BASE_IDX 1
@@ -130,6 +131,8 @@ static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
bool all_hub, uint8_t dst_sel);
static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev);
static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev);
+static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
+ bool enable);
static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -1138,6 +1141,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
.init_spm_golden = &gfx_v11_0_init_spm_golden_registers,
+ .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
};
static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
@@ -5181,9 +5185,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+ if (adev->sdma.num_instances > 1) {
+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+ data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ }
} else {
/* Program RLC_CGCG_CGLS_CTRL */
def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
@@ -5212,9 +5219,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+ if (adev->sdma.num_instances > 1) {
+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+ data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ }
}
}
@@ -5279,6 +5289,38 @@ static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
.update_spm_vmid = gfx_v11_0_update_spm_vmid,
};
+static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
+{
+ u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
+
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+ data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+
+ WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
+
+ // Program RLC_PG_DELAY3 for CGPG hysteresis
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(11, 0, 1):
+ WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
+{
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ gfx_v11_cntl_power_gating(adev, enable);
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+}
+
static int gfx_v11_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
@@ -5293,6 +5335,10 @@ static int gfx_v11_0_set_powergating_state(void *handle,
case IP_VERSION(11, 0, 2):
amdgpu_gfx_off_ctrl(adev, enable);
break;
+ case IP_VERSION(11, 0, 1):
+ gfx_v11_cntl_pg(adev, enable);
+ amdgpu_gfx_off_ctrl(adev, enable);
+ break;
default:
break;
}
@@ -5310,6 +5356,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
gfx_v11_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index c6e0f9313a7f..fc9c1043244c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2587,7 +2587,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
gfx_v9_0_tiling_mode_table_init(adev);
- gfx_v9_0_setup_rb(adev);
+ if (adev->gfx.num_gfx_rings)
+ gfx_v9_0_setup_rb(adev);
gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9ae8cdaa033e..f513e2c2e964 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -419,6 +419,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq;
uint16_t queried_pasid;
bool ret;
+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -437,7 +438,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 22761a3bb818..67ca16a8027c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -896,6 +896,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq;
uint16_t queried_pasid;
bool ret;
+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -935,7 +936,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
up_read(&adev->reset_domain->sem);
@@ -1102,10 +1103,13 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
*flags |= AMDGPU_PDE_BFS(0x9);
} else if (level == AMDGPU_VM_PDB0) {
- if (*flags & AMDGPU_PDE_PTE)
+ if (*flags & AMDGPU_PDE_PTE) {
*flags &= ~AMDGPU_PDE_PTE;
- else
+ if (!(*flags & AMDGPU_PTE_VALID))
+ *addr |= 1 << PAGE_SHIFT;
+ } else {
*flags |= AMDGPU_PTE_TF;
+ }
}
}
@@ -1624,12 +1628,15 @@ static int gmc_v9_0_sw_init(void *handle)
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
else
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
case IP_VERSION(9, 4, 1):
adev->num_vmhubs = 3;
/* Keep the vm size same with Vega20 */
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
index 39a696cd45b5..29c3484ae1f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
@@ -40,6 +40,156 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
0);
}
+static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+ uint32_t hdp_mem_pwr_cntl;
+
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+ hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+
+ /* Before doing clock/power mode switch, forced on MEM clock */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ ATOMIC_MEM_CLK_SOFT_OVERRIDE, 1);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 1);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+
+ /* disable clock and power gating before any changing */
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 0);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+ /* Already disabled above. The actions below are for "enabled" only */
+ if (enable) {
+ /* only one clock gating mode (LS/DS/SD) can be enabled */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 1);
+ }
+
+ /* confirmed that ATOMIC/RC_MEM_POWER_CTRL_EN have to be set for SRAM LS/DS/SD */
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 1);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+ }
+ }
+
+ /* disable MEM clock override after clock/power mode changing */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ ATOMIC_MEM_CLK_SOFT_OVERRIDE, 0);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 0);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+
+ if (enable) {
+ hdp_clk_cntl &=
+ ~(uint32_t)
+ (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
+ } else {
+ hdp_clk_cntl |= HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
+ }
+
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_2_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+ uint32_t tmp;
+
+ /* AMD_CG_SUPPORT_HDP_MGCG */
+ tmp = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+ if (!(tmp & (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
+ *flags |= AMD_CG_SUPPORT_HDP_MGCG;
+
+ /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
+ tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+ if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_DS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_SD;
+}
+
+static void hdp_v5_2_update_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ hdp_v5_2_update_mem_power_gating(adev, enable);
+ hdp_v5_2_update_medium_grain_clock_gating(adev, enable);
+}
+
const struct amdgpu_hdp_funcs hdp_v5_2_funcs = {
.flush_hdp = hdp_v5_2_flush_hdp,
+ .update_clock_gating = hdp_v5_2_update_clock_gating,
+ .get_clock_gating_state = hdp_v5_2_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 92dc60a9d209..085e613f3646 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -727,6 +727,7 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
.get_wptr = ih_v6_0_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = ih_v6_0_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 120ea294abef..cc3fdbbcd314 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -183,6 +183,7 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
mes_add_queue_pkt.tma_addr = input->tma_addr;
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
+ mes_add_queue_pkt.trap_en = 1;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 3f44a099c52a..3e51e773f92b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -176,6 +176,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
+ tmp = mmVM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index cac72ced94c8..e8058edc1d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -518,18 +518,41 @@ static u64 mmhub_v3_0_1_get_mc_fb_offset(struct amdgpu_device *adev)
static void mmhub_v3_0_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
- //TODO
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ if (enable)
+ data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
+ else
+ data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
}
static void mmhub_v3_0_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
- //TODO
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ if (enable)
+ data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+ else
+ data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
}
static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
mmhub_v3_0_1_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
mmhub_v3_0_1_update_medium_grain_light_sleep(adev,
@@ -539,7 +562,20 @@ static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v3_0_1_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
- //TODO
+ int data;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ /* AMD_CG_SUPPORT_MC_MGCG */
+ if (data & MM_ATC_L2_MISC_CG__ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+
+ /* AMD_CG_SUPPORT_MC_LS */
+ if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_LS;
}
const struct amdgpu_mmhub_funcs mmhub_v3_0_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 6e0145b2b408..445cb06b9d26 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -295,9 +295,17 @@ static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ unsigned int num_level, block_size;
uint32_t tmp;
int i;
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->gmc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
+
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i);
@@ -305,7 +313,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PAGE_TABLE_DEPTH,
- adev->vm_manager.num_level);
+ num_level);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
@@ -323,7 +331,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
- adev->vm_manager.block_size - 9);
+ block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 4b5396d3e60f..eec13cb5bf75 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -409,9 +409,11 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -483,6 +485,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index b465baa26762..aa761ff3a5fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -380,6 +380,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
WREG32_PCIE(smnPCIE_LC_CNTL, data);
}
+#ifdef CONFIG_PCIEASPM
static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -401,9 +402,11 @@ static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
@@ -459,7 +462,10 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v2_3_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v2_3_program_ltr(adev);
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -483,6 +489,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index f7f6ddebd3e4..37615a77287b 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -282,6 +282,7 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
+#ifdef CONFIG_PCIEASPM
static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -303,9 +304,11 @@ static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
@@ -361,7 +364,10 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v6_1_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v6_1_program_ltr(adev);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -385,6 +391,7 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 11848d1e238b..19455a725939 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -673,6 +673,7 @@ struct amdgpu_nbio_ras nbio_v7_4_ras = {
};
+#ifdef CONFIG_PCIEASPM
static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -694,9 +695,11 @@ static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
@@ -755,7 +758,10 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v7_4_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v7_4_program_ltr(adev);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -779,6 +785,7 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
index 01e8288d09a8..def89379b51a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
@@ -28,6 +28,14 @@
#include "nbio/nbio_7_7_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
+static void nbio_v7_7_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
static u32 nbio_v7_7_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp;
@@ -68,12 +76,6 @@ static void nbio_v7_7_sdma_doorbell_range(struct amdgpu_device *adev, int instan
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_CSDMA_DOORBELL_RANGE,
SIZE, doorbell_size);
- doorbell_range = REG_SET_FIELD(doorbell_range,
- GDC0_BIF_SDMA0_DOORBELL_RANGE,
- OFFSET, doorbell_index);
- doorbell_range = REG_SET_FIELD(doorbell_range,
- GDC0_BIF_SDMA0_DOORBELL_RANGE,
- SIZE, doorbell_size);
} else {
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_SDMA0_DOORBELL_RANGE,
@@ -247,6 +249,81 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
}
+static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
+ return;
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
+ if (enable) {
+ data |= (BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
+ } else {
+ data &= ~(BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
+ }
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL, data);
+}
+
+static void nbio_v7_7_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
+ return;
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
+ if (enable)
+ data |= BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
+ else
+ data &= ~BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1);
+ if (enable) {
+ data |= (BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
+ BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
+ } else {
+ data &= ~(BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
+ BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
+ }
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1, data);
+}
+
+static void nbio_v7_7_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+ uint32_t data;
+
+ /* AMD_CG_SUPPORT_BIF_MGCG */
+ data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
+ if (data & BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_MGCG;
+
+ /* AMD_CG_SUPPORT_BIF_LS */
+ data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
+ if (data & BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_LS;
+}
+
const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.get_hdp_flush_req_offset = nbio_v7_7_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_7_get_hdp_flush_done_offset,
@@ -262,6 +339,10 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.enable_doorbell_aperture = nbio_v7_7_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_7_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_7_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v7_7_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v7_7_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v7_7_get_clockgating_state,
.ih_control = nbio_v7_7_ih_control,
.init_registers = nbio_v7_7_init_registers,
+ .remap_hdp_registers = nbio_v7_7_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index a2588200ea58..0b2ac418e4ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -101,6 +101,16 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
adev->psp.dtm_context.context.bin_desc.start_addr =
(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes);
+
+ if (adev->apu_flags & AMD_APU_IS_RENOIR) {
+ adev->psp.securedisplay_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->securedisplay.fw_version);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->securedisplay.size_bytes);
+ adev->psp.securedisplay_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 726a5bba40b2..a75a286e1ecf 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <linux/dev_printk.h>
#include <drm/drm_drv.h>
#include <linux/vmalloc.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 65181efba50e..56424f75dd2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1504,6 +1504,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
WREG32_SDMA(i, mmSDMA0_CNTL, temp);
if (!amdgpu_sriov_vf(adev)) {
+ ring = &adev->sdma.instance[i].ring;
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
+ ring->use_doorbell, ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range);
+
/* unhalt engine */
temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index fde6154f2009..183024d7c184 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1211,25 +1211,6 @@ static int soc15_common_sw_fini(void *handle)
return 0;
}
-static void soc15_doorbell_range_init(struct amdgpu_device *adev)
-{
- int i;
- struct amdgpu_ring *ring;
-
- /* sdma/ih doorbell range are programed by hypervisor */
- if (!amdgpu_sriov_vf(adev)) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
- adev->nbio.funcs->sdma_doorbell_range(adev, i,
- ring->use_doorbell, ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range);
- }
-
- adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
- adev->irq.ih.doorbell_index);
- }
-}
-
static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1249,12 +1230,6 @@ static int soc15_common_hw_init(void *handle)
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
- /* HW doorbell routing policy: doorbell writing not
- * in SDMA/IH/MM/ACV range will be routed to CP. So
- * we need to init SDMA/IH/MM/ACV doorbell range prior
- * to CP ip block init and ring test.
- */
- soc15_doorbell_range_init(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 52816de5e17b..2e50db3b761e 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -421,6 +421,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
+ return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
return false;
default:
@@ -494,6 +495,20 @@ static void soc21_pre_asic_init(struct amdgpu_device *adev)
{
}
+static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
+ bool enter)
+{
+ if (enter)
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+ else
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ if (adev->gfx.funcs->update_perfmon_mgcg)
+ adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
+
+ return 0;
+}
+
static const struct amdgpu_asic_funcs soc21_asic_funcs =
{
.read_disabled_bios = &soc21_read_disabled_bios,
@@ -513,6 +528,7 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs =
.supports_baco = &amdgpu_dpm_is_baco_supported,
.pre_asic_init = &soc21_pre_asic_init,
.query_video_codecs = &soc21_query_video_codecs,
+ .update_umd_stable_pstate = &soc21_update_umd_stable_pstate,
};
static int soc21_common_early_init(void *handle)
@@ -546,8 +562,10 @@ static int soc21_common_early_init(void *handle)
case IP_VERSION(11, 0, 0):
adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
+#if 0
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
+#endif
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_REPEATER_FGCG |
AMD_CG_SUPPORT_GFX_FGCG |
@@ -575,7 +593,9 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_ATHUB_MGCG |
- AMD_CG_SUPPORT_ATHUB_LS;
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_HDP_SD;
adev->pg_flags =
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
@@ -586,9 +606,25 @@ static int soc21_common_early_init(void *handle)
break;
case IP_VERSION(11, 0, 1):
adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_FGCG |
+ AMD_CG_SUPPORT_REPEATER_FGCG |
+ AMD_CG_SUPPORT_GFX_PERF_CLK |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags =
+ AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
break;
@@ -683,6 +719,8 @@ static int soc21_common_set_clockgating_state(void *handle,
switch (adev->ip_versions[NBIO_HWIP][0]) {
case IP_VERSION(4, 3, 0):
+ case IP_VERSION(4, 3, 1):
+ case IP_VERSION(7, 7, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index ca14c3ef742e..fb2d74f30448 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -1115,7 +1115,7 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
*
* Stop VCN block with dpg mode
*/
-static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
uint32_t tmp;
@@ -1133,7 +1133,6 @@ static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
- return 0;
}
/**
@@ -1154,7 +1153,7 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_stop_dpg_mode(adev, i);
+ vcn_v4_0_stop_dpg_mode(adev, i);
continue;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index cdd599a08125..1e83db0c5438 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -289,6 +289,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
}
}
+ if (!amdgpu_sriov_vf(adev))
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
+
pci_set_master(adev->pdev);
/* enable interrupts */
@@ -334,9 +338,11 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -409,6 +415,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 3b4eb8285943..59dfca093155 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -340,6 +340,10 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
}
}
+ if (!amdgpu_sriov_vf(adev))
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
+
pci_set_master(adev->pdev);
/* enable interrupts */
@@ -385,9 +389,11 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -461,6 +467,9 @@ static void vega20_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 2b3d8bc8f0aa..dc774ddf3445 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -874,7 +874,7 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
err = kfd_wait_on_events(p, args->num_events,
(void __user *)args->events_ptr,
(args->wait_for_all != 0),
- args->timeout, &args->wait_result);
+ &args->timeout, &args->wait_result);
return err;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index f5853835f03a..22c0929d410b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -102,13 +102,18 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
switch (sdma_version) {
case IP_VERSION(6, 0, 0):
- case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
/* Reserve 1 for paging and 1 for gfx */
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
break;
+ case IP_VERSION(6, 0, 1):
+ /* Reserve 1 for paging and 1 for gfx */
+ kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
+ /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */
+ kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL;
+ break;
default:
break;
}
@@ -377,12 +382,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
case IP_VERSION(10, 3, 6):
- gfx_target_version = 100306;
- if (!vf)
- f2g = &gfx_v10_3_kfd2kgd;
- break;
case IP_VERSION(10, 3, 7):
- gfx_target_version = 100307;
+ gfx_target_version = 100306;
if (!vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 3942a56c28bb..83e3ce9f6049 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -894,7 +894,8 @@ static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
return msecs_to_jiffies(user_timeout_ms) + 1;
}
-static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
+static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters,
+ bool undo_auto_reset)
{
uint32_t i;
@@ -903,6 +904,9 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
spin_lock(&waiters[i].event->lock);
remove_wait_queue(&waiters[i].event->wq,
&waiters[i].wait);
+ if (undo_auto_reset && waiters[i].activated &&
+ waiters[i].event && waiters[i].event->auto_reset)
+ set_event(waiters[i].event);
spin_unlock(&waiters[i].event->lock);
}
@@ -911,7 +915,7 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
- bool all, uint32_t user_timeout_ms,
+ bool all, uint32_t *user_timeout_ms,
uint32_t *wait_result)
{
struct kfd_event_data __user *events =
@@ -920,7 +924,7 @@ int kfd_wait_on_events(struct kfd_process *p,
int ret = 0;
struct kfd_event_waiter *event_waiters = NULL;
- long timeout = user_timeout_to_jiffies(user_timeout_ms);
+ long timeout = user_timeout_to_jiffies(*user_timeout_ms);
event_waiters = alloc_event_waiters(num_events);
if (!event_waiters) {
@@ -970,15 +974,11 @@ int kfd_wait_on_events(struct kfd_process *p,
}
if (signal_pending(current)) {
- /*
- * This is wrong when a nonzero, non-infinite timeout
- * is specified. We need to use
- * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
- * contains a union with data for each user and it's
- * in generic kernel code that I don't want to
- * touch yet.
- */
ret = -ERESTARTSYS;
+ if (*user_timeout_ms != KFD_EVENT_TIMEOUT_IMMEDIATE &&
+ *user_timeout_ms != KFD_EVENT_TIMEOUT_INFINITE)
+ *user_timeout_ms = jiffies_to_msecs(
+ max(0l, timeout-1));
break;
}
@@ -1019,7 +1019,7 @@ int kfd_wait_on_events(struct kfd_process *p,
event_waiters, events);
out_unlock:
- free_waiters(num_events, event_waiters);
+ free_waiters(num_events, event_waiters, ret == -ERESTARTSYS);
mutex_unlock(&p->event_mutex);
out:
if (ret)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 373e5bfd4e91..b059a77b6081 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -685,13 +685,15 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.vma = vma;
migrate.start = start;
migrate.end = end;
- migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
+ if (adev->gmc.xgmi.connected_to_cpu)
+ migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
+ else
+ migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
buf = kvcalloc(npages,
2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
GFP_KERNEL);
-
if (!buf)
goto out;
@@ -974,7 +976,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
{
struct kfd_dev *kfddev = adev->kfd.dev;
struct dev_pagemap *pgmap;
- struct resource *res;
+ struct resource *res = NULL;
unsigned long size;
void *r;
@@ -989,28 +991,34 @@ int svm_migrate_init(struct amdgpu_device *adev)
* should remove reserved size
*/
size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
- res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
- if (IS_ERR(res))
- return -ENOMEM;
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ pgmap->range.start = adev->gmc.aper_base;
+ pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
+ pgmap->type = MEMORY_DEVICE_COHERENT;
+ } else {
+ res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
+ if (IS_ERR(res))
+ return -ENOMEM;
+ pgmap->range.start = res->start;
+ pgmap->range.end = res->end;
+ pgmap->type = MEMORY_DEVICE_PRIVATE;
+ }
- pgmap->type = MEMORY_DEVICE_PRIVATE;
pgmap->nr_range = 1;
- pgmap->range.start = res->start;
- pgmap->range.end = res->end;
pgmap->ops = &svm_migrate_pgmap_ops;
pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
- pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
-
+ pgmap->flags = 0;
/* Device manager releases device-specific resources, memory region and
* pgmap when driver disconnects from device.
*/
r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) {
pr_err("failed to register HMM device memory\n");
-
/* Disable SVM support capability */
pgmap->type = 0;
- devm_release_mem_region(adev->dev, res->start, resource_size(res));
+ if (pgmap->type == MEMORY_DEVICE_PRIVATE)
+ devm_release_mem_region(adev->dev, res->start,
+ res->end - res->start + 1);
return PTR_ERR(r);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d03a3b9c9c5d..bf610e3b683b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1317,7 +1317,7 @@ void kfd_event_free_process(struct kfd_process *p);
int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
- bool all, uint32_t user_timeout_ms,
+ bool all, uint32_t *user_timeout_ms,
uint32_t *wait_result);
void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint32_t valid_id_bits);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index a67ba8879a56..11074cc8c333 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -541,7 +541,6 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
kfree(svm_bo);
return -ESRCH;
}
- svm_bo->svms = prange->svms;
svm_bo->eviction_fence =
amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
mm,
@@ -3273,7 +3272,6 @@ int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
static void svm_range_evict_svm_bo_worker(struct work_struct *work)
{
struct svm_range_bo *svm_bo;
- struct kfd_process *p;
struct mm_struct *mm;
int r = 0;
@@ -3281,13 +3279,12 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
if (!svm_bo_ref_unless_zero(svm_bo))
return; /* svm_bo was freed while eviction was pending */
- /* svm_range_bo_release destroys this worker thread. So during
- * the lifetime of this thread, kfd_process and mm will be valid.
- */
- p = container_of(svm_bo->svms, struct kfd_process, svms);
- mm = p->mm;
- if (!mm)
+ if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ mm = svm_bo->eviction_fence->mm;
+ } else {
+ svm_range_bo_unref(svm_bo);
return;
+ }
mmap_read_lock(mm);
spin_lock(&svm_bo->list_lock);
@@ -3305,8 +3302,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
- r = svm_migrate_vram_to_ram(prange,
- svm_bo->eviction_fence->mm,
+ r = svm_migrate_vram_to_ram(prange, mm,
KFD_MIGRATE_TRIGGER_TTM_EVICTION);
} while (!r && prange->actual_loc && --retries);
@@ -3324,6 +3320,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
}
spin_unlock(&svm_bo->list_lock);
mmap_read_unlock(mm);
+ mmput(mm);
dma_fence_signal(&svm_bo->eviction_fence->base);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 9156b041ef17..cfac13ad06ef 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -46,7 +46,6 @@ struct svm_range_bo {
spinlock_t list_lock;
struct amdgpu_amdkfd_fence *eviction_fence;
struct work_struct eviction_work;
- struct svm_range_list *svms;
uint32_t evicting;
struct work_struct release_work;
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 25990bec600d..3f0a4a415907 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1392,8 +1392,8 @@ static int kfd_build_p2p_node_entry(struct kfd_topology_device *dev,
static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int gpu_node)
{
+ struct kfd_iolink_properties *gpu_link, *tmp_link, *cpu_link;
struct kfd_iolink_properties *props = NULL, *props2 = NULL;
- struct kfd_iolink_properties *gpu_link, *cpu_link;
struct kfd_topology_device *cpu_dev;
int ret = 0;
int i, num_cpu;
@@ -1416,16 +1416,19 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
continue;
/* find CPU <--> CPU links */
+ cpu_link = NULL;
cpu_dev = kfd_topology_device_by_proximity_domain(i);
if (cpu_dev) {
- list_for_each_entry(cpu_link,
+ list_for_each_entry(tmp_link,
&cpu_dev->io_link_props, list) {
- if (cpu_link->node_to == gpu_link->node_to)
+ if (tmp_link->node_to == gpu_link->node_to) {
+ cpu_link = tmp_link;
break;
+ }
}
}
- if (cpu_link->node_to != gpu_link->node_to)
+ if (!cpu_link)
return -ENOMEM;
/* CPU <--> CPU <--> GPU, GPU node*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8660d93cc405..1efe7fa5bc58 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3825,8 +3825,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
/* indicates support for immediate flip */
adev_to_drm(adev)->mode_config.async_page_flip = true;
@@ -4135,6 +4138,7 @@ static void register_backlight_device(struct amdgpu_display_manager *dm,
}
}
+static void amdgpu_set_panel_orientation(struct drm_connector *connector);
/*
* In this architecture, the association
@@ -4326,6 +4330,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
adev_to_drm(adev)->vblank_disable_immediate = false;
}
}
+ amdgpu_set_panel_orientation(&aconnector->base);
}
/* Software is initialized. Now we can register interrupt handlers. */
@@ -4754,7 +4759,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->visible = true;
plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
- plane_info->layer_index = 0;
+ plane_info->layer_index = plane_state->normalized_zpos;
ret = fill_plane_color_attributes(plane_state, plane_info->format,
&plane_info->color_space);
@@ -4822,7 +4827,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->global_alpha = plane_info.global_alpha;
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
dc_plane_state->dcc = plane_info.dcc;
- dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
+ dc_plane_state->layer_index = plane_info.layer_index;
dc_plane_state->flip_int_enabled = true;
/*
@@ -6684,6 +6689,10 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector)
connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
return;
+ mutex_lock(&connector->dev->mode_config.mutex);
+ amdgpu_dm_connector_get_modes(connector);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
encoder = amdgpu_dm_connector_to_encoder(connector);
if (!encoder)
return;
@@ -6728,8 +6737,6 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
* restored here.
*/
amdgpu_dm_update_freesync_caps(connector, edid);
-
- amdgpu_set_panel_orientation(connector);
} else {
amdgpu_dm_connector->num_modes = 0;
}
@@ -9478,6 +9485,14 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
+ /*
+ * DC consults the zpos (layer_index in DC terminology) to determine the
+ * hw plane on which to enable the hw cursor (see
+ * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
+ * atomic state, so call drm helper to normalize zpos.
+ */
+ drm_atomic_normalize_zpos(dev, state);
+
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 0e48824f55e3..ee242d9d8b06 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3288,6 +3288,7 @@ void crtc_debugfs_init(struct drm_crtc *crtc)
&crc_win_y_end_fops);
debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc,
&crc_win_update_fops);
+ dput(dir);
#endif
debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry,
crtc, &amdgpu_current_bpc_fops);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index b841b8b0a9d8..987bde4dca3d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -34,6 +34,7 @@
#include "dal_asic_id.h"
#include "amdgpu_display.h"
#include "amdgpu_dm_trace.h"
+#include "amdgpu_dm_plane.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@@ -149,12 +150,12 @@ static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_
*size += 1;
}
-bool modifier_has_dcc(uint64_t modifier)
+static bool modifier_has_dcc(uint64_t modifier)
{
return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
}
-unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
+static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR)
return 0;
@@ -660,7 +661,7 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty
add_gfx10_1_modifiers(adev, mods, &size, &capacity);
break;
case AMDGPU_FAMILY_GC_11_0_0:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
add_gfx11_modifiers(adev, mods, &size, &capacity);
break;
}
@@ -1412,7 +1413,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
}
break;
case AMDGPU_FAMILY_GC_11_0_0:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
switch (AMD_FMT_MOD_GET(TILE, modifier)) {
case AMD_FMT_MOD_TILE_GFX11_256K_R_X:
case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 95168c2cfa6f..286981a2dd40 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -36,17 +36,9 @@ int fill_dc_scaling_info(struct amdgpu_device *adev,
const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info);
-void get_min_max_dc_plane_scaling(struct drm_device *dev,
- struct drm_framebuffer *fb,
- int *min_downscale, int *max_upscale);
-
int dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state);
-bool modifier_has_dcc(uint64_t modifier);
-
-unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier);
-
int fill_plane_buffer_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 6767fab55c26..352e9afb85c6 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -100,3 +100,24 @@ void convert_float_matrix(
matrix[i] = (uint16_t)reg_value;
}
}
+
+static uint32_t find_gcd(uint32_t a, uint32_t b)
+{
+ uint32_t remainder = 0;
+ while (b != 0) {
+ remainder = a % b;
+ a = b;
+ b = remainder;
+ }
+ return a;
+}
+
+void reduce_fraction(uint32_t num, uint32_t den,
+ uint32_t *out_num, uint32_t *out_den)
+{
+ uint32_t gcd = 0;
+
+ gcd = find_gcd(num, den);
+ *out_num = num / gcd;
+ *out_den = den / gcd;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
index ade785c4fdc7..81da4e6f7a1a 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
@@ -38,6 +38,9 @@ void convert_float_matrix(
struct fixed31_32 *flt,
uint32_t buffer_size);
+void reduce_fraction(uint32_t num, uint32_t den,
+ uint32_t *out_num, uint32_t *out_den);
+
static inline unsigned int log_2(unsigned int num)
{
return ilog2(num);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 4c76091fd1f2..f276abb63bcd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -337,7 +337,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
break;
}
- case AMDGPU_FAMILY_GC_11_0_2: {
+ case AMDGPU_FAMILY_GC_11_0_1: {
struct clk_mgr_dcn314 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
@@ -397,7 +397,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
dcn32_clk_mgr_destroy(clk_mgr);
break;
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
dcn314_clk_mgr_destroy(clk_mgr);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 0202dc682682..ca6dfd2d7561 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -24,10 +24,9 @@
*/
#include "dccg.h"
-#include "clk_mgr_internal.h"
+#include "rn_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
-#include "rn_clk_mgr.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dce100/dce_clk_mgr.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
index 2e088c5171b2..f1319957e400 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
@@ -28,6 +28,7 @@
#include "clk_mgr.h"
#include "dm_pp_smu.h"
+#include "clk_mgr_internal.h"
extern struct wm_table ddr4_wm_table_gs;
extern struct wm_table lpddr4_wm_table_gs;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index c09be3f15fe6..23a299c929a1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -99,7 +99,7 @@ static int dcn31_get_active_display_cnt_wa(
return display_count;
}
-static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -110,9 +110,10 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -211,11 +212,11 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn31_disable_otg_wa(clk_mgr_base, true);
+ dcn31_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn31_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn31_disable_otg_wa(clk_mgr_base, false);
+ dcn31_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index ee99974b3b62..8559dcd80af0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -119,7 +119,7 @@ static int dcn314_get_active_display_cnt_wa(
return display_count;
}
-static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -129,11 +129,11 @@ static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -233,11 +233,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn314_disable_otg_wa(clk_mgr_base, true);
+ dcn314_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn314_disable_otg_wa(clk_mgr_base, false);
+ dcn314_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
@@ -307,16 +307,6 @@ static void dcn314_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn314_smu_enable_pme_wa(clk_mgr);
}
-void dcn314_init_clocks(struct clk_mgr *clk_mgr)
-{
- memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
- // Assumption is that boot state always supports pstate
- clk_mgr->clks.p_state_change_support = true;
- clk_mgr->clks.prev_p_state_change_support = true;
- clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
- clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
-}
-
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
@@ -425,7 +415,7 @@ static struct wm_table lpddr5_wm_table = {
}
};
-static DpmClocks_t dummy_clocks;
+static DpmClocks314_t dummy_clocks;
static struct dcn314_watermarks dummy_wms = { 0 };
@@ -510,7 +500,7 @@ static void dcn314_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
struct dcn314_smu_dpm_clks *smu_dpm_clks)
{
- DpmClocks_t *table = smu_dpm_clks->dpm_clks;
+ DpmClocks314_t *table = smu_dpm_clks->dpm_clks;
if (!clk_mgr->smu_ver)
return;
@@ -527,6 +517,26 @@ static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
dcn314_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
}
+static inline bool is_valid_clock_value(uint32_t clock_value)
+{
+ return clock_value > 1 && clock_value < 100000;
+}
+
+static unsigned int convert_wck_ratio(uint8_t wck_ratio)
+{
+ switch (wck_ratio) {
+ case WCK_RATIO_1_2:
+ return 2;
+
+ case WCK_RATIO_1_4:
+ return 4;
+
+ default:
+ break;
+ }
+ return 1;
+}
+
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
{
uint32_t max = 0;
@@ -540,89 +550,129 @@ static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
return max;
}
-static unsigned int find_clk_for_voltage(
- const DpmClocks_t *clock_table,
- const uint32_t clocks[],
- unsigned int voltage)
-{
- int i;
- int max_voltage = 0;
- int clock = 0;
-
- for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
- if (clock_table->SocVoltage[i] == voltage) {
- return clocks[i];
- } else if (clock_table->SocVoltage[i] >= max_voltage &&
- clock_table->SocVoltage[i] < voltage) {
- max_voltage = clock_table->SocVoltage[i];
- clock = clocks[i];
- }
- }
-
- ASSERT(clock);
- return clock;
-}
-
static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
- const DpmClocks_t *clock_table)
+ const DpmClocks314_t *clock_table)
{
- int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
- uint32_t max_dispclk = 0, max_dppclk = 0;
-
- j = -1;
-
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
-
- /* Find lowest DPM, FCLK is filled in reverse order*/
+ struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+ uint32_t max_pstate = 0, max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
+ int i;
- for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
- if (clock_table->DfPstateTable[i].FClk != 0) {
- j = i;
- break;
+ /* Find highest valid fclk pstate */
+ for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
+ if (is_valid_clock_value(clock_table->DfPstateTable[i].FClk) &&
+ clock_table->DfPstateTable[i].FClk > max_fclk) {
+ max_fclk = clock_table->DfPstateTable[i].FClk;
+ max_pstate = i;
}
}
- if (j == -1) {
- /* clock table is all 0s, just use our own hardcode */
- ASSERT(0);
- return;
- }
+ /* We expect the table to contain at least one valid fclk entry. */
+ ASSERT(is_valid_clock_value(max_fclk));
- bw_params->clk_table.num_entries = j + 1;
-
- /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ /* Dispclk and dppclk can be max at any voltage, same number of levels for both */
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
} else {
+ /* Invalid number of entries in the table from PMFW. */
ASSERT(0);
}
- for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
- bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
- bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
- bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
- switch (clock_table->DfPstateTable[j].WckRatio) {
- case WCK_RATIO_1_2:
- bw_params->clk_table.entries[i].wck_ratio = 2;
- break;
- case WCK_RATIO_1_4:
- bw_params->clk_table.entries[i].wck_ratio = 4;
- break;
- default:
- bw_params->clk_table.entries[i].wck_ratio = 1;
+ /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
+ for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+ uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
+ int j;
+
+ for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
+ if (is_valid_clock_value(clock_table->DfPstateTable[j].FClk) &&
+ clock_table->DfPstateTable[j].FClk < min_fclk &&
+ clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]) {
+ min_fclk = clock_table->DfPstateTable[j].FClk;
+ min_pstate = j;
+ }
}
- bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
- bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
+
+ /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
+ for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
+ if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
+ break;
+
+ bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+ bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->DfPstateTable[min_pstate].WckRatio);
+ };
+
+ /* Make sure to include at least one entry at highest pstate */
+ if (max_pstate != min_pstate || i == 0) {
+ if (i > MAX_NUM_DPM_LVL - 1)
+ i = MAX_NUM_DPM_LVL - 1;
+
+ bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->DfPstateTable[max_pstate].WckRatio);
+ i++;
}
+ bw_params->clk_table.num_entries = i--;
+ /* Make sure all highest clocks are included*/
+ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS);
+ ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS));
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+
+ /*
+ * Set any 0 clocks to max default setting. Not an issue for
+ * power since we aren't doing switching in such case anyway
+ */
+ for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+ if (!bw_params->clk_table.entries[i].fclk_mhz) {
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+ bw_params->clk_table.entries[i].voltage = def_max.voltage;
+ }
+ if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+ bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz)
+ bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+ if (!bw_params->clk_table.entries[i].dispclk_mhz)
+ bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+ if (!bw_params->clk_table.entries[i].dppclk_mhz)
+ bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_mhz)
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_d18_mhz)
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz)
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ }
+ ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
- bw_params->num_channels = bios_info->ma_channel_number;
+
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
+ bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
@@ -641,7 +691,7 @@ static struct clk_mgr_funcs dcn314_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn314_update_clocks,
- .init_clocks = dcn314_init_clocks,
+ .init_clocks = dcn31_init_clocks,
.enable_pme_wa = dcn314_enable_pme_wa,
.are_clock_states_equal = dcn314_are_clock_states_equal,
.notify_wm_ranges = dcn314_notify_wm_ranges
@@ -681,10 +731,10 @@ void dcn314_clk_mgr_construct(
}
ASSERT(clk_mgr->smu_wm_set.wm_set);
- smu_dpm_clks.dpm_clks = (DpmClocks_t *)dm_helpers_allocate_gpu_mem(
+ smu_dpm_clks.dpm_clks = (DpmClocks314_t *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
- sizeof(DpmClocks_t),
+ sizeof(DpmClocks314_t),
&smu_dpm_clks.mc_address.quad_part);
if (smu_dpm_clks.dpm_clks == NULL) {
@@ -729,7 +779,7 @@ void dcn314_clk_mgr_construct(
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
dcn314_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
- if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
dcn314_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index c695a4498c50..171f84340eb2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -42,7 +42,7 @@ struct clk_mgr_dcn314 {
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b);
-void dcn314_init_clocks(struct clk_mgr *clk_mgr);
+
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
index a7958dc96581..047d19ea919c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
@@ -36,6 +36,37 @@ typedef enum {
WCK_RATIO_MAX
} WCK_RATIO_e;
+typedef struct {
+ uint32_t FClk;
+ uint32_t MemClk;
+ uint32_t Voltage;
+ uint8_t WckRatio;
+ uint8_t Spare[3];
+} DfPstateTable314_t;
+
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
+
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
+ uint8_t NumDfPstatesEnabled;
+ uint8_t spare[3];
+
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks314_t;
+
struct dcn314_watermarks {
// Watermarks
WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
@@ -43,7 +74,7 @@ struct dcn314_watermarks {
};
struct dcn314_smu_dpm_clks {
- DpmClocks_t *dpm_clks;
+ DpmClocks314_t *dpm_clks;
union large_integer mc_address;
};
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index cc076621f5e6..98ad8e0fd2d8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -46,6 +46,9 @@
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
+#define UNSUPPORTED_DCFCLK 10000000
+#define MIN_DPP_DISP_CLK 100000
+
static int dcn315_get_active_display_cnt_wa(
struct dc *dc,
struct dc_state *context)
@@ -79,7 +82,7 @@ static int dcn315_get_active_display_cnt_wa(
return display_count;
}
-static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -91,9 +94,10 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -146,6 +150,9 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
+ /* Lock pstate by requesting unsupported dcfclk if change is unsupported */
+ if (!new_clocks->p_state_change_support)
+ new_clocks->dcfclk_khz = UNSUPPORTED_DCFCLK;
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
dcn315_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
@@ -159,10 +166,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- if (new_clocks->dppclk_khz < 100000)
- new_clocks->dppclk_khz = 100000;
- if (new_clocks->dispclk_khz < 100000)
- new_clocks->dispclk_khz = 100000;
+ if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK)
+ new_clocks->dppclk_khz = MIN_DPP_DISP_CLK;
+ if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK)
+ new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
}
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
@@ -175,12 +182,12 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
/* No need to apply the w/a if we haven't taken over from bios yet */
if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, true);
+ dcn315_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, false);
+ dcn315_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
@@ -275,7 +282,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -283,7 +290,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -291,7 +298,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -299,7 +306,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -556,8 +563,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
- if (!bw_params->num_channels)
- bw_params->num_channels = 2;
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 0cd3d2eb7ac7..187f5b27fdc8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -112,7 +112,7 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -124,9 +124,10 @@ static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -221,11 +222,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn316_disable_otg_wa(clk_mgr_base, true);
+ dcn316_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn316_disable_otg_wa(clk_mgr_base, false);
+ dcn316_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e42f44fc1c08..fb22c3d70528 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1074,8 +1074,15 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
struct dc_stream_state *old_stream =
dc->current_state->res_ctx.pipe_ctx[i].stream;
bool should_disable = true;
- bool pipe_split_change =
- context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
+ bool pipe_split_change = false;
+
+ if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
+ (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
+ else
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
for (j = 0; j < context->stream_count; j++) {
if (old_stream == context->streams[j]) {
@@ -1087,7 +1094,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc->current_state->stream_count != context->stream_count)
should_disable = true;
- if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe) {
+ if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
+ !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
struct pipe_ctx *old_pipe, *new_pipe;
old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -3229,7 +3237,7 @@ static void commit_planes_for_stream(struct dc *dc,
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
if (should_use_dmub_lock(stream->link)) {
@@ -3247,7 +3255,6 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
top_pipe_to_program->stream_res.tg);
}
- }
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
@@ -3466,7 +3473,7 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
top_pipe_to_program->stream_res.tg,
@@ -3493,21 +3500,19 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
top_pipe_to_program->stream_res.tg);
}
- }
- if (update_type != UPDATE_TYPE_FAST) {
+ if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
- /* Since phantom pipe programming is moved to post_unlock_program_front_end,
- * move the SubVP lock to after the phantom pipes have been setup
- */
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- } else {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- }
+ /* Since phantom pipe programming is moved to post_unlock_program_front_end,
+ * move the SubVP lock to after the phantom pipes have been setup
+ */
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
}
// Fire manual trigger only when bottom plane is flipped
@@ -4292,7 +4297,7 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2 &&
+ if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
@@ -4340,6 +4345,7 @@ void dc_enable_dmub_outbox(struct dc *dc)
struct dc_context *dc_ctx = dc->ctx;
dmub_enable_outbox_notification(dc_ctx->dmub_srv);
+ DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 9e51338441d0..66d2ae7aacf5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3372,7 +3372,7 @@ bool dc_link_setup_psr(struct dc_link *link,
switch(link->ctx->asic_id.chip_family) {
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
if(!dc->debug.disable_z10)
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 48dad093ae8b..780f7f4c28b6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2758,8 +2758,14 @@ bool perform_link_training_with_retries(
skip_video_pattern);
/* Transmit idle pattern once training successful. */
- if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) {
dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+ /* Update verified link settings to current one
+ * Because DPIA LT might fallback to lower link setting.
+ */
+ link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
+ link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
+ }
} else {
status = dc_link_dp_perform_link_training(link,
&pipe_ctx->link_res,
@@ -5121,6 +5127,14 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ /* If this chip cap is set, at least one retimer must exist in the chain
+ * Override count to 1 if we receive a known bad count (0 or an invalid value) */
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
+ (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
+ ASSERT(0);
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+ }
+
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ffc0f1c0ea93..ccf7bd3d90fe 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -169,7 +169,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
if (ASICREV_IS_GC_11_0_2(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_3_21;
break;
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
dc_version = DCN_VERSION_3_14;
break;
default:
@@ -3584,6 +3584,23 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
}
}
+void reset_sync_context_for_pipe(const struct dc *dc,
+ struct dc_state *context,
+ uint8_t pipe_idx)
+{
+ int i;
+ struct pipe_ctx *pipe_ctx_reset;
+
+ /* reset the otg sync context for the pipe and its slave pipes if any */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx_reset = &context->res_ctx.pipe_ctx[i];
+
+ if (((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_reset) == pipe_idx) &&
+ IS_PIPE_SYNCD_VALID(pipe_ctx_reset)) || (i == pipe_idx))
+ SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_reset, i);
+ }
+}
+
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter)
{
/* TODO - get transmitter to phy idx mapping from DMUB */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index f62d50901d92..0c85ab5933b4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -329,7 +329,7 @@ bool dc_stream_set_cursor_attributes(
dc = stream->ctx->dc;
- if (attributes->height * attributes->width * 4 > 16384)
+ if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384)
if (stream->mall_stream_config.type == SUBVP_MAIN)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8e1e40083ec8..dbf8158b832e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.196"
+#define DC_VER "3.2.198"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -213,6 +213,7 @@ struct dc_caps {
uint32_t cache_num_ways;
uint16_t subvp_fw_processing_delay_us;
uint16_t subvp_prefetch_end_to_mall_start_us;
+ uint8_t subvp_swath_height_margin_lines; // subvp start line must be aligned to 2 x swath height
uint16_t subvp_pstate_allow_width_us;
uint16_t subvp_vertical_int_margin_us;
bool seamless_odm;
@@ -352,6 +353,7 @@ struct dc_config {
bool use_pipe_ctx_sync_logic;
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
+ bool use_default_clock_table;
};
enum visual_confirm {
@@ -609,6 +611,7 @@ struct dc_bounding_box_overrides {
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
int dummy_clock_change_latency_ns;
+ int fclk_clock_change_latency_ns;
/* This forces a hard min on the DCFCLK we use
* for DML. Unlike the debug option for forcing
* DCFCLK, this override affects watermark calculations
@@ -742,6 +745,7 @@ struct dc_debug_options {
bool disable_fixed_vs_aux_timeout_wa;
bool force_disable_subvp;
bool force_subvp_mclk_switch;
+ bool allow_sw_cursor_fallback;
bool force_usr_allow;
/* uses value at boot and disables switch */
bool disable_dtb_ref_clk_switch;
@@ -751,6 +755,7 @@ struct dc_debug_options {
uint32_t mst_start_top_delay;
uint8_t psr_power_use_phy_fsm;
enum dml_hostvm_override_opts dml_hostvm_override;
+ bool dml_disallow_alternate_prefetch_modes;
bool use_legacy_soc_bb_mechanism;
bool exit_idle_opt_for_cursor_updates;
bool enable_single_display_2to1_odm_policy;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 2d61c2a91cee..52a61b3e5a8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -29,6 +29,7 @@
#include "dm_helpers.h"
#include "dc_hw_types.h"
#include "core_types.h"
+#include "../basics/conversion.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -275,8 +276,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
union dmub_rb_cmd cmd = { 0 };
cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
- // TODO: Uncomment once FW headers are promoted
- //cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
+ cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
@@ -417,44 +417,42 @@ static void populate_subvp_cmd_drr_info(struct dc *dc,
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
- int16_t drr_frame_us = 0;
- int16_t min_drr_supported_us = 0;
- int16_t max_drr_supported_us = 0;
- int16_t max_drr_vblank_us = 0;
- int16_t max_drr_mallregion_us = 0;
- int16_t mall_region_us = 0;
- int16_t prefetch_us = 0;
- int16_t subvp_active_us = 0;
- int16_t drr_active_us = 0;
- int16_t min_vtotal_supported = 0;
- int16_t max_vtotal_supported = 0;
+ uint16_t drr_frame_us = 0;
+ uint16_t min_drr_supported_us = 0;
+ uint16_t max_drr_supported_us = 0;
+ uint16_t max_drr_vblank_us = 0;
+ uint16_t max_drr_mallregion_us = 0;
+ uint16_t mall_region_us = 0;
+ uint16_t prefetch_us = 0;
+ uint16_t subvp_active_us = 0;
+ uint16_t drr_active_us = 0;
+ uint16_t min_vtotal_supported = 0;
+ uint16_t max_vtotal_supported = 0;
pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
- drr_frame_us = div64_s64(drr_timing->v_total * drr_timing->h_total,
- (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
+ drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000),
+ (((uint64_t)drr_timing->pix_clk_100hz * 100)));
// P-State allow width and FW delays already included phantom_timing->v_addressable
- mall_region_us = div64_s64(phantom_timing->v_addressable * phantom_timing->h_total,
- (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000);
+ mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000),
+ (((uint64_t)phantom_timing->pix_clk_100hz * 100)));
min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
- min_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 *
- (div64_s64((int64_t)min_drr_supported_us, 1000000)),
- (int64_t)drr_timing->h_total);
-
- prefetch_us = div64_s64((phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total,
- (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
- dc->caps.subvp_prefetch_end_to_mall_start_us);
- subvp_active_us = div64_s64(main_timing->v_addressable * main_timing->h_total,
- (int64_t)(main_timing->pix_clk_100hz * 100) * 1000000);
- drr_active_us = div64_s64(drr_timing->v_addressable * drr_timing->h_total,
- (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
- max_drr_vblank_us = div64_s64((int64_t)(subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
+ min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us),
+ (((uint64_t)drr_timing->h_total * 1000000)));
+
+ prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000),
+ (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
+ subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000),
+ (((uint64_t)main_timing->pix_clk_100hz * 100)));
+ drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
+ (((uint64_t)drr_timing->pix_clk_100hz * 100)));
+ max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us;
max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
- max_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 * (div64_s64((int64_t)max_drr_supported_us, 1000000)),
- (int64_t)drr_timing->h_total);
+ max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
+ (((uint64_t)drr_timing->h_total * 1000000)));
pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
@@ -548,10 +546,12 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing;
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
- subvp0_prefetch_us = div64_s64((phantom_timing0->v_total - phantom_timing0->v_front_porch) * phantom_timing0->h_total,
- (int64_t)(phantom_timing0->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
- subvp1_prefetch_us = div64_s64((phantom_timing1->v_total - phantom_timing1->v_front_porch) * phantom_timing1->h_total,
- (int64_t)(phantom_timing1->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
+ subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
+ (uint64_t)phantom_timing0->h_total * 1000000),
+ (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
+ subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) *
+ (uint64_t)phantom_timing1->h_total * 1000000),
+ (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
// Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time)
// should increase it's prefetch time to match the other
@@ -559,16 +559,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
- (phantom_timing1->pix_clk_100hz * 100) + phantom_timing1->h_total - 1),
- (int64_t)phantom_timing1->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
+ ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)),
+ ((uint64_t)phantom_timing1->h_total * 1000000));
+
} else if (subvp1_prefetch_us > subvp0_prefetch_us) {
pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0];
prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us;
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
- (phantom_timing0->pix_clk_100hz * 100) + phantom_timing0->h_total - 1),
- (int64_t)phantom_timing0->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
+ ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)),
+ ((uint64_t)phantom_timing0->h_total * 1000000));
}
}
@@ -601,6 +602,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ uint32_t out_num, out_den;
pipe_data->mode = SUBVP;
pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
@@ -612,6 +614,16 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param;
+
+ /* Calculate the scaling factor from the src and dst height.
+ * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2.
+ * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor"
+ */
+ reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height, &out_num, &out_den);
+ // TODO: Uncomment below lines once DMCUB include headers are promoted
+ //pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
+ //pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
// Prefetch lines is equal to VACTIVE + BP + VSYNC
pipe_data->pipe_config.subvp_data.prefetch_lines =
@@ -619,13 +631,11 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
// Round up
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)dc->caps.subvp_prefetch_end_to_mall_start_us, 1000000)) *
- (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
- (int64_t)phantom_timing->h_total);
+ div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
+ ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
pipe_data->pipe_config.subvp_data.processing_delay_lines =
- div64_s64(((div64_s64((int64_t)dc->caps.subvp_fw_processing_delay_us, 1000000)) *
- (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
- (int64_t)phantom_timing->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
+ ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
// Find phantom pipe index based on phantom stream
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index a0af0f6afeef..9544abf75e84 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -344,6 +344,7 @@ enum dc_detect_reason {
DETECT_REASON_HPDRX,
DETECT_REASON_FALLBACK,
DETECT_REASON_RETRAIN,
+ DETECT_REASON_TDR,
};
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 213de8cabfad..165392380842 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -543,9 +543,11 @@ static void dce112_get_pix_clk_dividers_helper (
switch (pix_clk_params->color_depth) {
case COLOR_DEPTH_101010:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_121212:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_161616:
actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 38a67051d470..aea49334021c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2164,7 +2164,8 @@ static void dce110_setup_audio_dto(
continue;
if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
continue;
- if (pipe_ctx->stream_res.audio != NULL) {
+ if (pipe_ctx->stream_res.audio != NULL &&
+ pipe_ctx->stream_res.audio->enabled == false) {
struct audio_output audio_output;
build_audio_output(context, pipe_ctx, &audio_output);
@@ -2204,7 +2205,8 @@ static void dce110_setup_audio_dto(
if (!dc_is_dp_signal(pipe_ctx->stream->signal))
continue;
- if (pipe_ctx->stream_res.audio != NULL) {
+ if (pipe_ctx->stream_res.audio != NULL &&
+ pipe_ctx->stream_res.audio->enabled == false) {
struct audio_output audio_output;
build_audio_output(context, pipe_ctx, &audio_output);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index d4a6504dfe00..db7ca4b0cdb9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -361,8 +361,6 @@ void dpp1_cnv_setup (
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index b54c12400323..564e061ccb58 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -278,9 +278,6 @@ void hubp1_program_pixel_format(
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- REG_UPDATE(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, 22);
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index bed783747f16..5b5d952b2b8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -110,6 +110,7 @@ void dcn10_lock_all_pipes(struct dc *dc,
*/
if (pipe_ctx->top_pipe ||
!pipe_ctx->stream ||
+ !pipe_ctx->plane_state ||
!tg->funcs->is_tg_enabled(tg))
continue;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 769974375b4b..8e9384094f6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -131,6 +131,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index e1a9a45b03b6..3fc300cd1ce9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -465,6 +465,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
OTG_CLOCK_ON, 1,
1, 1000);
} else {
+
+ //last chance to clear underflow, otherwise, it will always there due to clock is off.
+ if (optc->funcs->is_optc_underflow_occurred(optc) == true)
+ optc->funcs->clear_optc_underflow(optc);
+
REG_UPDATE_2(OTG_CLOCK_CONTROL,
OTG_CLOCK_GATE_DIS, 0,
OTG_CLOCK_EN, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index ea1f14af0db7..eaa7032f0f1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -166,8 +166,6 @@ static void dpp2_cnv_setup (
select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index cd2671161ef1..7ce64a3c1b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -445,226 +445,6 @@
type DSCRM_DSC_FORWARD_EN; \
type DSCRM_DSC_OPP_PIPE_SOURCE
-#define DSC_REG_LIST_DCN314(id) \
- SRI(DSC_TOP_CONTROL, DSC_TOP, id),\
- SRI(DSC_DEBUG_CONTROL, DSC_TOP, id),\
- SRI(DSCC_CONFIG0, DSCC, id),\
- SRI(DSCC_CONFIG1, DSCC, id),\
- SRI(DSCC_STATUS, DSCC, id),\
- SRI(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id),\
- SRI(DSCC_PPS_CONFIG0, DSCC, id),\
- SRI(DSCC_PPS_CONFIG1, DSCC, id),\
- SRI(DSCC_PPS_CONFIG2, DSCC, id),\
- SRI(DSCC_PPS_CONFIG3, DSCC, id),\
- SRI(DSCC_PPS_CONFIG4, DSCC, id),\
- SRI(DSCC_PPS_CONFIG5, DSCC, id),\
- SRI(DSCC_PPS_CONFIG6, DSCC, id),\
- SRI(DSCC_PPS_CONFIG7, DSCC, id),\
- SRI(DSCC_PPS_CONFIG8, DSCC, id),\
- SRI(DSCC_PPS_CONFIG9, DSCC, id),\
- SRI(DSCC_PPS_CONFIG10, DSCC, id),\
- SRI(DSCC_PPS_CONFIG11, DSCC, id),\
- SRI(DSCC_PPS_CONFIG12, DSCC, id),\
- SRI(DSCC_PPS_CONFIG13, DSCC, id),\
- SRI(DSCC_PPS_CONFIG14, DSCC, id),\
- SRI(DSCC_PPS_CONFIG15, DSCC, id),\
- SRI(DSCC_PPS_CONFIG16, DSCC, id),\
- SRI(DSCC_PPS_CONFIG17, DSCC, id),\
- SRI(DSCC_PPS_CONFIG18, DSCC, id),\
- SRI(DSCC_PPS_CONFIG19, DSCC, id),\
- SRI(DSCC_PPS_CONFIG20, DSCC, id),\
- SRI(DSCC_PPS_CONFIG21, DSCC, id),\
- SRI(DSCC_PPS_CONFIG22, DSCC, id),\
- SRI(DSCC_MEM_POWER_CONTROL, DSCC, id),\
- SRI(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_MAX_ABS_ERROR0, DSCC, id),\
- SRI(DSCC_MAX_ABS_ERROR1, DSCC, id),\
- SRI(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCCIF_CONFIG0, DSCCIF, id),\
- SRI(DSCCIF_CONFIG1, DSCCIF, id),\
- SRI(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
-
-#define DSC_REG_LIST_SH_MASK_DCN314(mask_sh)\
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_TEST_CLOCK_MUX_SEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \
- /*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \
- DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MINOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MAJOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, PPS_IDENTIFIER, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, LINEBUF_DEPTH, mask_sh), \
- DSC2_SF(DSCC0, DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BITS_PER_PIXEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, VBR_ENABLE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, SIMPLE_422, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CONVERT_RGB, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BLOCK_PRED_ENABLE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_422, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_420, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CHUNK_SIZE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_WIDTH, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_HEIGHT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_WIDTH, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_HEIGHT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_XMIT_DELAY, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_DEC_DELAY, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG5, INITIAL_SCALE_VALUE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG5, SCALE_INCREMENT_INTERVAL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SCALE_DECREMENT_INTERVAL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, FIRST_LINE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SECOND_LINE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG7, NFL_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG8, NSL_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG8, SECOND_LINE_OFFSET_ADJ, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG9, INITIAL_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG9, FINAL_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MIN_QP, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MAX_QP, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, RC_MODEL_SIZE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_EDGE_FACTOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_LO, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_HI, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MIN_QP0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MAX_QP0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_BPG_OFFSET0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP14, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP14, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET14, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_DEFAULT_MEM_LOW_POWER_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_FORCE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_DIS, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_FORCE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_DIS, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC_R_Y_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC_R_Y_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC_G_CB_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC_G_CB_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC_B_CR_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC_B_CR_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_R_Y_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_G_CB_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR1, DSCC_B_CR_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_PIXEL_FORMAT, mask_sh), \
- DSC2_SF(DSCCIF0, DSCCIF_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_WIDTH, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_HEIGHT, mask_sh), \
- DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, mask_sh), \
- DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_OPP_PIPE_SOURCE, mask_sh)
-
-
struct dcn20_dsc_registers {
uint32_t DSC_TOP_CONTROL;
uint32_t DSC_DEBUG_CONTROL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 936af65381ef..9570c2118ccc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -463,9 +463,6 @@ void hubp2_program_pixel_format(
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- REG_UPDATE(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, 22);
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 884fa060f375..598ce872a8d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1565,6 +1565,7 @@ static void dcn20_update_dchubp_dpp(
/* Any updates are handled in dc interface, just need
* to apply existing for plane enable / opp change */
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+ || pipe_ctx->update_flags.bits.plane_changed
|| pipe_ctx->stream->update_flags.bits.gamut_remap
|| pipe_ctx->stream->update_flags.bits.out_csc) {
/* dpp/cm gamut remap*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 3d307dd58e9a..116f67a0b989 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -531,6 +531,12 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index c5e200d09038..5752271f22df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -67,9 +67,15 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- uint32_t riommu_active;
+ uint32_t riommu_active, prefetch_done;
int i;
+ REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
+
+ if (prefetch_done) {
+ hubbub->riommu_active = true;
+ return;
+ }
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 77b00f86c216..4a668d6563df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -244,8 +244,6 @@ void dpp3_cnv_setup (
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index 6a4dcafb9bba..dc3e8df706b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr(
VMID, address->vmid);
if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
- REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 0a67f8a5656d..d97076648acb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -372,7 +372,7 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
- if (eng_id <= ENGINE_ID_DIGE) {
+ if (eng_id <= ENGINE_ID_DIGB) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index a788d160953b..ab70ebd8f223 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -104,6 +104,9 @@ static bool has_query_dp_alt(struct link_encoder *enc)
{
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
+ if (enc->ctx->dce_version >= DCN_VERSION_3_15)
+ return true;
+
/* Supports development firmware and firmware >= 4.0.11 */
return dc_dmub_srv &&
!(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
index 7c77c71591a0..82c3b3ac1f0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
@@ -162,7 +162,8 @@
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AIP_ENABLE, mask_sh),\
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ACM_ENABLE, mask_sh),\
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, mask_sh),\
- SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh)
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL, HBLANK_MINIMUM_SYMBOL_WIDTH, mask_sh)
#define DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(type) \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 468a893ff785..aedff18aff56 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -2153,7 +2153,7 @@ static bool dcn31_resource_construct(
pool->base.usb4_dpia_count = 4;
}
- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2)
+ if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1)
pool->base.usb4_dpia_count = 4;
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
index 41f8ec99da6b..901436591ed4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn31_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_1_ip;
-extern struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc;
struct dcn31_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
index e3b5a95e03b1..702c28c2560e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
@@ -13,31 +13,6 @@
DCN314 = dcn314_resource.o dcn314_hwseq.o dcn314_init.o \
dcn314_dio_stream_encoder.o dcn314_dccg.o dcn314_optc.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o := -mhard-float -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN314)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index b384f30395d3..06d8638db696 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -67,8 +67,7 @@ static void enc314_disable_fifo(struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0,
- DIG_FIFO_READ_START_LEVEL, 0);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
}
static void enc314_dp_set_odm_combine(
@@ -317,6 +316,7 @@ static void enc314_stream_encoder_dp_unblank(
/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
* that it overflows during mode transition, and sometimes doesn't recover.
*/
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
udelay(10);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 755c715ad8dc..f4d1b83979fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -343,7 +343,9 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
+ bool two_pix_per_container = false;
+ two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
if (is_dp_128b_132b_signal(pipe_ctx)) {
@@ -355,16 +357,13 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
else
*k2_div = PIXEL_RATE_DIV_BY_4;
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ if (two_pix_per_container) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_2;
- } else if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
- *k1_div = PIXEL_RATE_DIV_BY_2;
- *k2_div = PIXEL_RATE_DIV_BY_2;
} else {
- if (odm_combine_factor == 1)
- *k2_div = PIXEL_RATE_DIV_BY_4;
- else if (odm_combine_factor == 2)
+ *k1_div = PIXEL_RATE_DIV_BY_1;
+ *k2_div = PIXEL_RATE_DIV_BY_4;
+ if (odm_combine_factor == 2)
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -374,3 +373,20 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
return odm_combine_factor;
}
+
+void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
+{
+ uint32_t pix_per_cycle = 1;
+ uint32_t odm_combine_factor = 1;
+
+ if (!pipe_ctx || !pipe_ctx->stream || !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ odm_combine_factor = get_odm_config(pipe_ctx, NULL);
+ if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1)
+ pix_per_cycle = 2;
+
+ if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode)
+ pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
+ pix_per_cycle);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
index be0f5e4d48e1..244280298212 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
@@ -39,4 +39,6 @@ void dcn314_enable_power_gating_plane(struct dce_hwseq *hws, bool enable);
unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
+void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index b9debeb081fd..72a563a4c3e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -145,6 +145,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values,
+ .set_pixels_per_cycle = dcn314_set_pixels_per_cycle,
};
void dcn314_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
index 0c7980266b85..38aa28ec6b13 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
@@ -98,7 +98,8 @@ static void optc314_set_odm_combine(struct timing_generator *optc, int *opp_id,
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, mpcc_hactive);
- REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
+ REG_UPDATE(OTG_H_TIMING_CNTL,
+ OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
optc1->opp_count = opp_cnt;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 63861cdfb09f..44ac1c2aabf5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -70,6 +70,7 @@
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
#include "dml/dcn31/dcn31_fpu.h"
+#include "dml/dcn314/dcn314_fpu.h"
#include "dcn314/dcn314_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
@@ -86,6 +87,9 @@
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+
#include "reg_helper.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
@@ -132,155 +136,6 @@ static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C
#define DC_LOGGER_INIT(logger)
-#define DCN3_14_DEFAULT_DET_SIZE 384
-#define DCN3_14_MAX_DET_SIZE 384
-#define DCN3_14_MIN_COMPBUF_SIZE_KB 128
-#define DCN3_14_CRB_SEGMENT_SIZE_KB 64
-struct _vcs_dpi_ip_params_st dcn3_14_ip = {
- .VBlankNomDefaultUS = 668,
- .gpuvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_enable = 1,
- .hostvm_max_page_table_levels = 2,
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
- .config_return_buffer_size_in_kbytes = 1792,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .meta_fifo_size_in_kentries = 32,
- .zero_size_buffer_entries = 512,
- .compbuf_reserved_space_64b = 256,
- .compbuf_reserved_space_zs = 64,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .min_meta_chunk_size_bytes = 256,
- .writeback_chunk_size_kbytes = 8,
- .ptoi_supported = false,
- .num_dsc = 4,
- .maximum_dsc_bits_per_component = 10,
- .dsc422_native_support = false,
- .is_line_buffer_bpp_fixed = true,
- .line_buffer_fixed_bpp = 48,
- .line_buffer_size_bits = 789504,
- .max_line_buffer_lines = 12,
- .writeback_interface_buffer_size_kbytes = 90,
- .max_num_dpp = 4,
- .max_num_otg = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 34,
- .dispclk_ramp_margin_percent = 1,
- .max_inter_dcn_tile_repeaters = 8,
- .cursor_buffer_size = 16,
- .cursor_chunk_size = 2,
- .writeback_line_buffer_buffer_size = 0,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .dppclk_delay_subtotal = 46,
- .dppclk_delay_scl = 50,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dynamic_metadata_vm_enabled = false,
- .odm_combine_4to1_supported = false,
- .dcc_supported = true,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
- /*TODO: correct dispclk/dppclk voltage level determination*/
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 600.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 186.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 1,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 2,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 3,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 371.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 4,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 625.0,
- },
- },
- .num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 442.0,
- .sr_enter_plus_exit_z8_time_us = 560.0,
- .writeback_latency_us = 12.0,
- .dram_channel_width_bytes = 4,
- .round_trip_ping_latency_dcfclk_cycles = 106,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .return_bus_width_bytes = 64,
- .downspread_percent = 0.38,
- .dcn_downspread_percent = 0.5,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
@@ -602,6 +457,7 @@ static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs
hpo_dp_stream_encoder_reg_list(0),
hpo_dp_stream_encoder_reg_list(1),
hpo_dp_stream_encoder_reg_list(2),
+ hpo_dp_stream_encoder_reg_list(3)
};
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
@@ -726,7 +582,7 @@ static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
#define dsc_regsDCN314(id)\
[id] = {\
- DSC_REG_LIST_DCN314(id)\
+ DSC_REG_LIST_DCN20(id)\
}
static const struct dcn20_dsc_registers dsc_regs[] = {
@@ -737,11 +593,11 @@ static const struct dcn20_dsc_registers dsc_regs[] = {
};
static const struct dcn20_dsc_shift dsc_shift = {
- DSC_REG_LIST_SH_MASK_DCN314(__SHIFT)
+ DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
};
static const struct dcn20_dsc_mask dsc_mask = {
- DSC_REG_LIST_SH_MASK_DCN314(_MASK)
+ DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
static const struct dcn30_mpc_registers mpc_regs = {
@@ -991,7 +847,7 @@ static const struct resource_caps res_cap_dcn314 = {
.num_ddc = 5,
.num_vmid = 16,
.num_mpc_3dlut = 2,
- .num_dsc = 4,
+ .num_dsc = 3,
};
static const struct dc_plane_cap plane_cap = {
@@ -1402,7 +1258,7 @@ static struct stream_encoder *dcn314_stream_encoder_create(
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
- if (eng_id <= ENGINE_ID_DIGF) {
+ if (eng_id < ENGINE_ID_DIGF) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
@@ -1447,7 +1303,8 @@ static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
* VPG[8] -> HPO_DP[2]
* VPG[9] -> HPO_DP[3]
*/
- vpg_inst = hpo_dp_inst + 6;
+ //Uses offset index 5-8, but actually maps to vpg_inst 6-9
+ vpg_inst = hpo_dp_inst + 5;
/* Mapping of APG register blocks to HPO DP block instance:
* APG[0] -> HPO_DP[0]
@@ -1793,109 +1650,16 @@ static struct clock_source *dcn31_clock_source_create(
return NULL;
}
-static bool is_dual_plane(enum surface_pixel_format format)
-{
- return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
-}
-
static int dcn314_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
- int i, pipe_cnt;
- struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
- bool upscaled = false;
-
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_crtc_timing *timing;
-
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
- pipe = &res_ctx->pipe_ctx[i];
- timing = &pipe->stream->timing;
-
- if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
- && pipe->stream->adjust.v_total_min > timing->v_total)
- pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
-
- if (pipe->plane_state &&
- (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
- pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
- upscaled = true;
-
- /*
- * Immediate flip can be set dynamically after enabling the plane.
- * We need to require support for immediate flip or underflow can be
- * intermittently experienced depending on peak b/w requirements.
- */
- pipes[pipe_cnt].pipe.src.immediate_flip = true;
-
- pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
- pipes[pipe_cnt].pipe.src.gpuvm = true;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
- pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
- pipes[pipe_cnt].pipe.src.dcc_rate = 3;
- pipes[pipe_cnt].dout.dsc_input_bpc = 0;
-
- if (pipes[pipe_cnt].dout.dsc_enable) {
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_888:
- pipes[pipe_cnt].dout.dsc_input_bpc = 8;
- break;
- case COLOR_DEPTH_101010:
- pipes[pipe_cnt].dout.dsc_input_bpc = 10;
- break;
- case COLOR_DEPTH_121212:
- pipes[pipe_cnt].dout.dsc_input_bpc = 12;
- break;
- default:
- ASSERT(0);
- break;
- }
- }
-
- pipe_cnt++;
- }
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
-
- dc->config.enable_4to1MPC = false;
- if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (is_dual_plane(pipe->plane_state->format)
- && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
- } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
- /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- pipes[0].pipe.src.unbounded_req_mode = true;
- }
- } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
- && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
- } else if (context->stream_count >= 3 && upscaled) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (!pipe->stream)
- continue;
+ int pipe_cnt;
- if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
- pipe->stream->apply_seamless_boot_optimization) {
-
- if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
- context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
- break;
- }
- }
- }
+ DC_FP_START();
+ pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate);
+ DC_FP_END();
return pipe_cnt;
}
@@ -1906,88 +1670,9 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st *clock_tmp = dcn3_14_soc._clock_tmp;
- unsigned int i, closest_clk_lvl;
- int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
-
- dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
- dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
-
- if (bw_params->num_channels > 0)
- dcn3_14_soc.num_chans = bw_params->num_channels;
-
- ASSERT(dcn3_14_soc.num_chans);
- ASSERT(clk_table->num_entries);
-
- /* Prepass to find max clocks independent of voltage level. */
- for (i = 0; i < clk_table->num_entries; ++i) {
- if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
- if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
- }
-
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
- if (clk_table->num_entries == 1) {
- /*smu gives one DPM level, let's take the highest one*/
- closest_clk_lvl = dcn3_14_soc.num_states - 1;
- }
-
- clock_tmp[i].state = i;
-
- /* Clocks dependent on voltage level. */
- clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- if (clk_table->num_entries == 1 &&
- clock_tmp[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
- /*SMU fix not released yet*/
- clock_tmp[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
- }
- clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
-
- if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
- clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
-
- /* Clocks independent of voltage level. */
- clock_tmp[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
- dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
- clock_tmp[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
- dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
- clock_tmp[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_tmp[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_tmp[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_tmp[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_tmp[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_14_soc.clock_limits[i] = clock_tmp[i];
- if (clk_table->num_entries)
- dcn3_14_soc.num_states = clk_table->num_entries;
- }
-
- if (max_dispclk_mhz) {
- dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- }
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
- else
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+ DC_FP_START();
+ dcn314_update_bw_bounding_box_fpu(dc, bw_params);
+ DC_FP_END();
}
static struct resource_funcs dcn314_res_pool_funcs = {
@@ -2069,6 +1754,7 @@ static bool dcn314_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
+ dc->caps.dp_hdmi21_pcon_support = true;
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
index c41108847ce0..0dd3153aa5c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
@@ -29,6 +29,9 @@
#include "core_types.h"
+extern struct _vcs_dpi_ip_params_st dcn3_14_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc;
+
#define TO_DCN314_RES_POOL(pool)\
container_of(pool, struct dcn314_resource_pool, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
index 39929fa67a51..22849eaa6f24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn315_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_15_ip;
-extern struct _vcs_dpi_ip_params_st dcn3_15_soc;
struct dcn315_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
index 0dc5a6c13ae7..aba6d634131b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn316_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_16_ip;
-extern struct _vcs_dpi_ip_params_st dcn3_16_soc;
struct dcn316_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index a31c64b50410..0d5e8a441512 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -225,19 +225,19 @@ void dccg32_set_dpstreamclk(
case 0:
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, 0);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst);
break;
case 1:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, 1);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst);
break;
case 2:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, 2);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst);
break;
case 3:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, 3);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst);
break;
default:
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 26648ce772da..38a48983f663 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -310,6 +310,11 @@ static void enc32_stream_encoder_dp_unblank(
// TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
+ /* read start level = 0 will bring underflow / overflow and DIG_FIFO_ERROR = 1
+ * so set it to 1/2 full = 7 before reset as suggested by hardware team.
+ */
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
+
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index 6ec1c52535b9..2038cbda33f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -103,6 +103,11 @@ void hubp32_cursor_set_attributes(
enum cursor_lines_per_chunk lpc = hubp2_get_lines_per_chunk(
attr->width, attr->color_format);
+ //Round cursor width up to next multiple of 64
+ uint32_t cursor_width = ((attr->width + 63) / 64) * 64;
+ uint32_t cursor_height = attr->height;
+ uint32_t cursor_size = cursor_width * cursor_height;
+
hubp->curs_attr = *attr;
REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
@@ -126,7 +131,24 @@ void hubp32_cursor_set_attributes(
/* used to shift the cursor chunk request deadline */
CURSOR0_CHUNK_HDL_ADJUST, 3);
- if (attr->width * attr->height * 4 > 16384)
+ switch (attr->color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true);
else
REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index d38341f68b17..344fe7535df5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -250,6 +250,7 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
uint32_t total_lines = 0;
uint32_t lines_per_way = 0;
uint32_t num_ways = 0;
+ uint32_t prev_addr_low = 0;
for (i = 0; i < ctx->stream_count; i++) {
stream = ctx->streams[i];
@@ -267,10 +268,20 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
plane = ctx->stream_status[i].plane_states[j];
// Calculate total surface size
- surface_size = plane->plane_size.surface_pitch *
+ if (prev_addr_low != plane->address.grph.addr.u.low_part) {
+ /* if plane address are different from prev FB, then userspace allocated separate FBs*/
+ surface_size += plane->plane_size.surface_pitch *
plane->plane_size.surface_size.height *
(plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ prev_addr_low = plane->address.grph.addr.u.low_part;
+ } else {
+ /* We have the same fb for all the planes.
+ * Xorg always creates one giant fb that holds all surfaces,
+ * so allocating it once is sufficient.
+ * */
+ continue;
+ }
// Convert surface size + starting address to number of cache lines required
// (alignment accounted for)
cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
@@ -284,24 +295,38 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
}
// Include cursor size for CAB allocation
- if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
- cursor_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size;
- switch (stream->cursor_attributes.color_format) {
- case CURSOR_MODE_MONO:
- cursor_size /= 2;
- break;
- case CURSOR_MODE_COLOR_1BIT_AND:
- case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
- case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
- cursor_size *= 4;
- break;
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
+ struct hubp *hubp = pipe->plane_res.hubp;
- case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
- case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
- cursor_size *= 8;
- break;
- }
- cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
+ if (pipe->stream && pipe->plane_state && hubp)
+ /* Find the cursor plane and use the exact size instead of
+ * using the max for calculation
+ */
+ if (hubp->curs_attr.width > 0) {
+ cursor_size = hubp->curs_attr.width * hubp->curs_attr.height;
+ break;
+ }
+ }
+
+ switch (stream->cursor_attributes.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
+ cache_lines_used += dcn32_cache_lines_for_surface(dc, cursor_size,
plane->address.grph.cursor_cache_addr.quad_part);
}
}
@@ -314,13 +339,36 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
if (cache_lines_used % lines_per_way > 0)
num_ways++;
+ for (i = 0; i < ctx->stream_count; i++) {
+ stream = ctx->streams[i];
+ for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
+ plane = ctx->stream_status[i].plane_states[j];
+
+ if (stream->cursor_position.enable && plane &&
+ !plane->address.grph.cursor_cache_addr.quad_part &&
+ cursor_size > 16384) {
+ /* Cursor caching is not supported since it won't be on the same line.
+ * So we need an extra line to accommodate it. With large cursors and a single 4k monitor
+ * this case triggers corruption. If we're at the edge, then dont trigger display refresh
+ * from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp.
+ */
+ num_ways++;
+ /* We only expect one cursor plane */
+ break;
+ }
+ }
+ }
+
return num_ways;
}
bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
union dmub_rb_cmd cmd;
- uint8_t ways;
+ uint8_t ways, i;
+ int j;
+ bool stereo_in_use = false;
+ struct dc_plane_state *plane = NULL;
if (!dc->ctx->dmub_srv)
return false;
@@ -349,7 +397,23 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
* and configure HUBP's to fetch from MALL
*/
ways = dcn32_calculate_cab_allocation(dc, dc->current_state);
- if (ways <= dc->caps.cache_num_ways) {
+
+ /* MALL not supported with Stereo3D. If any plane is using stereo,
+ * don't try to enter MALL.
+ */
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
+ plane = dc->current_state->stream_status[i].plane_states[j];
+
+ if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO) {
+ stereo_in_use = true;
+ break;
+ }
+ }
+ if (stereo_in_use)
+ break;
+ }
+ if (ways <= dc->caps.cache_num_ways && !stereo_in_use) {
memset(&cmd, 0, sizeof(cmd));
cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
@@ -677,15 +741,39 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
struct hubp *hubp = pipe->plane_res.hubp;
if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
- if (hubp->curs_attr.width * hubp->curs_attr.height * 4 > 16384)
+ //Round cursor width up to next multiple of 64
+ int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64;
+ int cursor_height = hubp->curs_attr.height;
+ int cursor_size = cursor_width * cursor_height;
+
+ switch (hubp->curs_attr.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
cache_cursor = true;
if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
} else {
+ // MALL not supported with Stereo3D
hubp->funcs->hubp_update_mall_sel(hubp,
num_ways <= dc->caps.cache_num_ways &&
- pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED ? 2 : 0,
+ pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+ pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO ? 2 : 0,
cache_cursor);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index eff1f4e17689..1fad7b48bd5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -281,7 +281,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
- .set_drr = optc31_set_drr, // TODO: Update to optc32_set_drr once FW headers are promoted
+ .set_drr = optc32_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc3_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 9a26d24b579f..c3b783cea8a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -867,10 +867,11 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.use_max_lb = true,
- .force_disable_subvp = true,
+ .force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
+ .allow_sw_cursor_fallback = false,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -2039,7 +2040,8 @@ static bool dcn32_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
- dc->caps.max_cursor_size = 256;
+ /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/
+ dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 0;
@@ -2051,6 +2053,7 @@ static bool dcn32_resource_construct(
dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+ dc->caps.subvp_swath_height_margin_lines = 16;
dc->caps.subvp_pstate_allow_width_us = 20;
dc->caps.subvp_vertical_int_margin_us = 30;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 1e7e6201c880..cf15d0e5e9b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -30,6 +30,9 @@
#define DCN3_2_DET_SEG_SIZE 64
#define DCN3_2_MALL_MBLK_SIZE_BYTES 65536 // 64 * 1024
+#define DCN3_2_MBLK_WIDTH 128
+#define DCN3_2_MBLK_HEIGHT_4BPE 128
+#define DCN3_2_MBLK_HEIGHT_8BPE 64
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index b3f8503cea9c..1f195c5b3377 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -46,7 +46,6 @@
uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context)
{
uint32_t num_ways = 0;
- uint32_t mall_region_pixels = 0;
uint32_t bytes_per_pixel = 0;
uint32_t cache_lines_used = 0;
uint32_t lines_per_way = 0;
@@ -54,20 +53,64 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
uint32_t bytes_in_mall = 0;
uint32_t num_mblks = 0;
uint32_t cache_lines_per_plane = 0;
- uint32_t i = 0;
+ uint32_t i = 0, j = 0;
+ uint32_t mblk_width = 0;
+ uint32_t mblk_height = 0;
+ uint32_t full_vp_width_blk_aligned = 0;
+ uint32_t full_vp_height_blk_aligned = 0;
+ uint32_t mall_alloc_width_blk_aligned = 0;
+ uint32_t mall_alloc_height_blk_aligned = 0;
+ uint32_t full_vp_height = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// Find the phantom pipes
- if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
+ if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
- mall_region_pixels = pipe->stream->timing.h_addressable * pipe->stream->timing.v_addressable;
+ struct pipe_ctx *main_pipe = NULL;
- // For bytes required in MALL, calculate based on number of MBlks required
- num_mblks = (mall_region_pixels * bytes_per_pixel +
- DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES;
+ /* Get full viewport height from main pipe (required for MBLK calculation) */
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ main_pipe = &context->res_ctx.pipe_ctx[j];
+ if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) {
+ full_vp_height = main_pipe->plane_res.scl_data.viewport.height;
+ break;
+ }
+ }
+
+ bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
+ mblk_width = DCN3_2_MBLK_WIDTH;
+ mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
+
+ /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
+ * FLOOR(vp_x_start, blk_width)
+ */
+ full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
+ pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) +
+ (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
+
+ /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
+ * FLOOR(vp_y_start, blk_height)
+ */
+ full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
+ full_vp_height + mblk_height - 1) / mblk_height * mblk_height) +
+ (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
+
+ /* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */
+ mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
+
+ /* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
+ mall_alloc_height_blk_aligned = (pipe->stream->timing.v_addressable - 1 + mblk_height - 1) /
+ mblk_height * mblk_height + mblk_height;
+
+ /* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c;
+ * full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c;
+ * num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c);
+ * (Should be divisible, but round up if not)
+ */
+ num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
+ ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
// cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
// (MALL is 64-byte aligned)
@@ -144,7 +187,7 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
- continue;
+ return false;
if (!pipe->plane_state)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index 8157e40d2c7e..7309eed33a61 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -868,10 +868,11 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.use_max_lb = true,
- .force_disable_subvp = true,
+ .force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
+ .allow_sw_cursor_fallback = false,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1651,7 +1652,8 @@ static bool dcn321_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
- dc->caps.max_cursor_size = 256;
+ /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
+ dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 0;
@@ -1662,8 +1664,9 @@ static bool dcn321_resource_construct(
dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+ dc->caps.subvp_swath_height_margin_lines = 16;
dc->caps.subvp_pstate_allow_width_us = 20;
-
+ dc->caps.subvp_vertical_int_margin_us = 30;
dc->caps.max_slave_planes = 1;
dc->caps.max_slave_yuv_planes = 1;
dc->caps.max_slave_rgb_planes = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 359f6e9a1da0..cb81ed2fbd53 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -61,7 +61,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
@@ -71,6 +70,9 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_mode_vba_314.o := $(dml_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_rq_dlg_calc_314.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/dcn314_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_ccflags) $(frame_warn_flag)
@@ -82,7 +84,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_ccflags) -Wno-tautological-compare
@@ -124,6 +125,7 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
DML += dcn30/dcn30_fpu.o dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
+DML += dcn314/display_mode_vba_314.o dcn314/display_rq_dlg_calc_314.o
DML += dcn32/display_mode_vba_32.o dcn32/display_rq_dlg_calc_32.o dcn32/display_mode_vba_util_32.o
DML += dcn31/dcn31_fpu.o
DML += dcn32/dcn32_fpu.o
@@ -131,6 +133,7 @@ DML += dcn321/dcn321_fpu.o
DML += dcn301/dcn301_fpu.o
DML += dcn302/dcn302_fpu.o
DML += dcn303/dcn303_fpu.o
+DML += dcn314/dcn314_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index ca44df4fca74..d34e0f1314d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -30,6 +30,7 @@
#include "dchubbub.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
+#include "clk_mgr/dcn21/rn_clk_mgr.h"
#include "dcn20_fpu.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 876b321b30ca..1cb858dd6ea0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -6610,8 +6610,7 @@ static double CalculateUrgentLatency(
return ret;
}
-
-static void UseMinimumDCFCLK(
+static noinline_for_stack void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
int MaxInterDCNTileRepeaters,
int MaxPrefetchMode,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 7ef66e511ec8..d211cf6d234c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -26,6 +26,7 @@
#include "clk_mgr.h"
#include "dcn20/dcn20_resource.h"
#include "dcn301/dcn301_resource.h"
+#include "clk_mgr/dcn301/vg_clk_mgr.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn301_fpu.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index e36cfa5985ea..fa7b0291ce4d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -25,6 +25,9 @@
#include "resource.h"
#include "clk_mgr.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn315/dcn315_resource.h"
+#include "dcn316/dcn316_resource.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn31_fpu.h"
@@ -114,7 +117,7 @@ struct _vcs_dpi_ip_params_st dcn3_1_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
@@ -259,7 +262,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.sr_exit_z8_time_us = 50.0,
@@ -288,6 +291,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .num_chans = 4,
};
struct _vcs_dpi_ip_params_st dcn3_16_ip = {
@@ -355,7 +359,7 @@ struct _vcs_dpi_ip_params_st dcn3_16_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
@@ -677,7 +681,11 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count;
- dcn3_15_soc.num_chans = bw_params->num_channels;
+
+ if (bw_params->num_channels > 0)
+ dcn3_15_soc.num_chans = bw_params->num_channels;
+ if (bw_params->dram_channel_width_bytes > 0)
+ dcn3_15_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
ASSERT(clk_table->num_entries);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 3fab19134480..8ca66f1644dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -26,7 +26,7 @@
#include "dc.h"
#include "dc_link.h"
#include "../display_mode_lib.h"
-#include "dml/dcn30/display_mode_vba_30.h"
+#include "../dcn30/display_mode_vba_30.h"
#include "display_mode_vba_31.h"
#include "../dml_inline_defs.h"
@@ -251,33 +251,13 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe);
+ double DPTEBytesPerRow);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
@@ -311,64 +291,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
int unsigned CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported);
+ double *Z8StutterEnterPlusExitWatermark);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
@@ -2904,33 +2848,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->UrgentExtraLatency,
v->UrgentLatency,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
- v->PixelPTEBytesPerRow[k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->PixelPTEBytesPerRow[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
@@ -3017,64 +2941,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLK,
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->DCFCLKDeepSleep,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
- v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
&DRAMClockChangeSupport,
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->Z8StutterExitWatermark,
- &v->Z8StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &v->Z8StutterEnterPlusExitWatermark);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@@ -3598,61 +3486,43 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe)
+ double DPTEBytesPerRow)
{
+ struct vba_vars_st *v = &mode_lib->vba;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
+ double LineTime = v->HTotal[k] / v->PixelClock[k];
- if (GPUVMEnable == true && HostVMEnable == true) {
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
- if (GPUVMEnable == true || DCCEnable == true) {
- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
+ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
}
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
+ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
- if ((GPUVMEnable == true || DCCEnable == true)) {
+ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1),
@@ -3661,54 +3531,54 @@ static void CalculateFlipSchedule(
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
- if (GPUVMEnable == true) {
- *final_flip_bw = dml_max(
- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
- } else if ((GPUVMEnable == true || DCCEnable == true)) {
- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
+ if (v->GPUVMEnable == true) {
+ v->final_flip_bw[k] = dml_max(
+ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
+ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
+ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
+ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
} else {
- *final_flip_bw = 0;
+ v->final_flip_bw[k] = 0;
}
- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else {
min_row_time = dml_min4(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / VRatioChroma,
- meta_row_height_chroma * LineTime / VRatioChroma);
+ v->dpte_row_height[k] * LineTime / v->VRatio[k],
+ v->meta_row_height[k] * LineTime / v->VRatio[k],
+ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
+ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
}
} else {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dpte_row_height * LineTime / VRatio;
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = meta_row_height * LineTime / VRatio;
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
} else {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
}
}
- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
- *ImmediateFlipSupportedForPipe = false;
+ v->ImmediateFlipSupportedForPipe[k] = false;
} else {
- *ImmediateFlipSupportedForPipe = true;
+ v->ImmediateFlipSupportedForPipe[k] = true;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
+ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
+ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
+ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
#endif
}
@@ -5300,33 +5170,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->ExtraLatency,
v->UrgLatency[i],
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
- v->DPTEBytesPerRow[i][j][k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->DPTEBytesPerRow[i][j][k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5384,64 +5234,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLKPerState[i],
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->ProjectedDCFCLKDeepSleep[i][j],
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
- v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->NoOfDPPThisState,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
UnboundedRequestEnabledThisState,
CompressedBufferSizeInkByteThisState,
&v->DRAMClockChangeSupport[i][j],
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &dummy,
&dummy,
&dummy,
&dummy,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &dummy);
}
}
@@ -5566,64 +5380,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
int unsigned CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported)
+ double *Z8StutterEnterPlusExitWatermark)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
@@ -5643,103 +5421,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double TotalPixelBW = 0.0;
int k, j;
- *UrgentWatermark = UrgentLatency + ExtraLatency;
+ v->UrgentWatermark = UrgentLatency + ExtraLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
#endif
- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
+ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
#endif
v->TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (WritebackEnable[k] == true) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackUrgentWatermark = WritebackLatency;
+ v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
} else {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalPixelBW = TotalPixelBW
- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
- / (HTotal[k] / PixelClock[k]);
+ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
+ / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double EffectiveDETBufferSizeY = DETBufferSizeY[k];
v->LBLatencyHidingSourceLinesY = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
- if (WritebackEnable[k] == true) {
- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64) {
+ if (v->WritebackEnable[k] == true) {
+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
@@ -5749,14 +5527,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
- if (BlendingAndTiming[k] == k) {
+ if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
- for (j = 0; j < NumberOfActivePlanes; ++j) {
- if (BlendingAndTiming[k] == j) {
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@@ -5764,11 +5542,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
@@ -5776,25 +5554,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->TotalNumberOfActiveOTG = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (BlendingAndTiming[k] == k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
+ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
+ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index 66b82e4f05c6..35d10b4d018b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -27,7 +27,7 @@
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
#include "display_rq_dlg_calc_31.h"
-#include "dml/dcn30/display_mode_vba_30.h"
+#include "../dcn30/display_mode_vba_30.h"
static bool is_dual_plane(enum source_format_class source_format)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
new file mode 100644
index 000000000000..4bb3b31ea7e0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "clk_mgr.h"
+#include "resource.h"
+#include "dcn31/dcn31_hubbub.h"
+#include "dcn314_fpu.h"
+#include "dml/dcn20/dcn20_fpu.h"
+#include "dml/display_mode_vba.h"
+
+struct _vcs_dpi_ip_params_st dcn3_14_ip = {
+ .VBlankNomDefaultUS = 668,
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 4,
+ .maximum_dsc_bits_per_component = 10,
+ .dsc422_native_support = false,
+ .is_line_buffer_bpp_fixed = true,
+ .line_buffer_fixed_bpp = 48,
+ .line_buffer_size_bits = 789504,
+ .max_line_buffer_lines = 12,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 34,
+ .dispclk_ramp_margin_percent = 1,
+ .max_inter_dcn_tile_repeaters = 8,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 46,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 600.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 186.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 1,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 2,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 3,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 371.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 4,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 600.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_z8_time_us = 442.0,
+ .sr_enter_plus_exit_z8_time_us = 560.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.5,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+
+void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st *clock_limits =
+ dcn3_14_soc.clock_limits;
+ unsigned int i, closest_clk_lvl;
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+ int j;
+
+ dc_assert_fp_enabled();
+
+ // Default clock levels are used for diags, which may lead to overclocking.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc->config.use_default_clock_table == false) {
+
+ dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+ dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
+
+ if (bw_params->dram_channel_width_bytes > 0)
+ dcn3_14_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
+
+ if (bw_params->num_channels > 0)
+ dcn3_14_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(dcn3_14_soc.num_chans);
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+ if (clk_table->num_entries == 1) {
+ /*smu gives one DPM level, let's take the highest one*/
+ closest_clk_lvl = dcn3_14_soc.num_states - 1;
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ if (clk_table->num_entries == 1 &&
+ clock_limits[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ /*SMU fix not released yet*/
+ clock_limits[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ }
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+
+ if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn3_14_soc.clock_limits[i] = clock_limits[i];
+ if (clk_table->num_entries) {
+ dcn3_14_soc.num_states = clk_table->num_entries;
+ }
+ }
+
+ if (max_dispclk_mhz) {
+ dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314);
+ else
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+}
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe;
+ bool upscaled = false;
+
+ dc_assert_fp_enabled();
+
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
+ && pipe->stream->adjust.v_total_min > timing->v_total)
+ pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+
+ if (pipe->plane_state &&
+ (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
+ pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
+ upscaled = true;
+
+ /*
+ * Immediate flip can be set dynamically after enabling the plane.
+ * We need to require support for immediate flip or underflow can be
+ * intermittently experienced depending on peak b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
+ pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
+ pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
+ pipes[pipe_cnt].pipe.src.gpuvm = true;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
+ pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+ pipes[pipe_cnt].pipe.src.dcc_rate = 3;
+ pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+
+ if (pipes[pipe_cnt].dout.dsc_enable) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ pipe_cnt++;
+ }
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
+
+ dc->config.enable_4to1MPC = false;
+ if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
+ dc->config.enable_4to1MPC = true;
+ } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
+ /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ }
+ } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
+ && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
+ } else if (context->stream_count >= 3 && upscaled) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
+ pipe->stream->apply_seamless_boot_optimization) {
+
+ if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
+ context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
+ break;
+ }
+ }
+ }
+
+ return pipe_cnt;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
new file mode 100644
index 000000000000..d32c5bb99f4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN314_FPU_H__
+#define __DCN314_FPU_H__
+
+#define DCN3_14_DEFAULT_DET_SIZE 384
+#define DCN3_14_MAX_DET_SIZE 384
+#define DCN3_14_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_14_CRB_SEGMENT_SIZE_KB 64
+
+void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
+int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index fc4d7474c111..ee821c4fb5dd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -61,7 +61,7 @@
// fudge factor for min dcfclk calclation
#define __DML_MIN_DCFCLK_FACTOR__ 1.15
-struct {
+typedef struct {
double DPPCLK;
double DISPCLK;
double PixelClock;
@@ -265,33 +265,13 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe);
+ double DPTEBytesPerRow);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
@@ -325,64 +305,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported);
+ double *Z8StutterEnterPlusExitWatermark);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
@@ -1599,7 +1543,7 @@ static void CalculateDCCConfiguration(
int segment_order_vert_contiguous_luma;
int segment_order_vert_contiguous_chroma;
- enum {
+ typedef enum {
REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA
} RequestType;
RequestType RequestLuma;
@@ -2928,33 +2872,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->UrgentExtraLatency,
v->UrgentLatency,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
- v->PixelPTEBytesPerRow[k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->PixelPTEBytesPerRow[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
@@ -3041,64 +2965,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLK,
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->DCFCLKDeepSleep,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
- v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
&DRAMClockChangeSupport,
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->Z8StutterExitWatermark,
- &v->Z8StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &v->Z8StutterEnterPlusExitWatermark);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@@ -3710,61 +3598,43 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe)
+ double DPTEBytesPerRow)
{
+ struct vba_vars_st *v = &mode_lib->vba;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
+ double LineTime = v->HTotal[k] / v->PixelClock[k];
- if (GPUVMEnable == true && HostVMEnable == true) {
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
- if (GPUVMEnable == true || DCCEnable == true) {
- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
+ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
}
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
+ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
- if ((GPUVMEnable == true || DCCEnable == true)) {
+ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1),
@@ -3773,54 +3643,54 @@ static void CalculateFlipSchedule(
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
- if (GPUVMEnable == true) {
- *final_flip_bw = dml_max(
- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
- } else if ((GPUVMEnable == true || DCCEnable == true)) {
- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
+ if (v->GPUVMEnable == true) {
+ v->final_flip_bw[k] = dml_max(
+ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
+ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
+ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
+ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
} else {
- *final_flip_bw = 0;
+ v->final_flip_bw[k] = 0;
}
- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else {
min_row_time = dml_min4(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / VRatioChroma,
- meta_row_height_chroma * LineTime / VRatioChroma);
+ v->dpte_row_height[k] * LineTime / v->VRatio[k],
+ v->meta_row_height[k] * LineTime / v->VRatio[k],
+ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
+ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
}
} else {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dpte_row_height * LineTime / VRatio;
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = meta_row_height * LineTime / VRatio;
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
} else {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
}
}
- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
- *ImmediateFlipSupportedForPipe = false;
+ v->ImmediateFlipSupportedForPipe[k] = false;
} else {
- *ImmediateFlipSupportedForPipe = true;
+ v->ImmediateFlipSupportedForPipe[k] = true;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
+ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
+ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
+ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
#endif
}
@@ -4071,9 +3941,7 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
v->SourceFormatPixelAndScanSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
- if ((v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true))
- || ((v->SurfaceTiling[k] == dm_sw_64kb_d || v->SurfaceTiling[k] == dm_sw_64kb_d_t
- || v->SurfaceTiling[k] == dm_sw_64kb_d_x) && !(v->SourcePixelFormat[k] == dm_444_64))) {
+ if (v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true)) {
v->SourceFormatPixelAndScanSupport = false;
}
}
@@ -5414,33 +5282,13 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->ExtraLatency,
v->UrgLatency[i],
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
- v->DPTEBytesPerRow[i][j][k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->DPTEBytesPerRow[i][j][k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5498,64 +5346,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLKPerState[i],
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->ProjectedDCFCLKDeepSleep[i][j],
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
- v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->NoOfDPPThisState,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
UnboundedRequestEnabledThisState,
CompressedBufferSizeInkByteThisState,
&v->DRAMClockChangeSupport[i][j],
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &dummy,
&dummy,
&dummy,
&dummy,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &dummy);
}
}
@@ -5681,64 +5493,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported)
+ double *Z8StutterEnterPlusExitWatermark)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
@@ -5758,103 +5534,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double TotalPixelBW = 0.0;
int k, j;
- *UrgentWatermark = UrgentLatency + ExtraLatency;
+ v->UrgentWatermark = UrgentLatency + ExtraLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
#endif
- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
+ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
#endif
v->TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (WritebackEnable[k] == true) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackUrgentWatermark = WritebackLatency;
+ v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
} else {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalPixelBW = TotalPixelBW
- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
- / (HTotal[k] / PixelClock[k]);
+ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
+ / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double EffectiveDETBufferSizeY = DETBufferSizeY[k];
v->LBLatencyHidingSourceLinesY = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
- if (WritebackEnable[k] == true) {
- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64) {
+ if (v->WritebackEnable[k] == true) {
+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
@@ -5864,14 +5640,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
- if (BlendingAndTiming[k] == k) {
+ if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
- for (j = 0; j < NumberOfActivePlanes; ++j) {
- if (BlendingAndTiming[k] == j) {
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@@ -5879,11 +5655,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
@@ -5891,25 +5667,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->TotalNumberOfActiveOTG = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (BlendingAndTiming[k] == k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
+ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
+ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
@@ -7157,12 +6933,13 @@ static double CalculateExtraLatencyBytes(
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
else
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
- else
+ } else {
HostVMDynamicLevels = 0;
+ }
ret = ReorderingBytes + (TotalNumberOfActiveDPP * PixelChunkSizeInKByte + TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0;
- if (GPUVMEnable == true)
+ if (GPUVMEnable == true) {
for (k = 0; k < NumberOfActivePlanes; ++k)
ret = ret + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 66453546e24f..f43686997917 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -244,6 +244,50 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
}
/**
+ * Finds dummy_latency_index when MCLK switching using firmware based
+ * vblank stretch is enabled. This function will iterate through the
+ * table of dummy pstate latencies until the lowest value that allows
+ * dm_allow_self_refresh_and_mclk_switch to happen is found
+ */
+int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ const int max_latency_table_entries = 4;
+ const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+ int dummy_latency_index = 0;
+
+ dc_assert_fp_enabled();
+
+ while (dummy_latency_index < max_latency_table_entries) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+
+ if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported)
+ break;
+
+ dummy_latency_index++;
+ }
+
+ if (dummy_latency_index == max_latency_table_entries) {
+ ASSERT(dummy_latency_index != max_latency_table_entries);
+ /* If the execution gets here, it means dummy p_states are
+ * not possible. This should never happen and would mean
+ * something is severely wrong.
+ * Here we reset dummy_latency_index to 3, because it is
+ * better to have underflows than system crashes.
+ */
+ dummy_latency_index = max_latency_table_entries - 1;
+ }
+
+ return dummy_latency_index;
+}
+
+/**
* dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
* and populate pipe_ctx with those params.
*
@@ -473,8 +517,11 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
// DML calculation for MALL region doesn't take into account FW delay
// and required pstate allow width for multi-display cases
+ /* Add 16 lines margin to the MALL REGION because SUB_VP_START_LINE must be aligned
+ * to 2 swaths (i.e. 16 lines)
+ */
phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) +
- pstate_width_fw_delay_lines;
+ pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines;
// For backporch of phantom pipe, use vstartup of the main pipe
phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -490,6 +537,7 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
phantom_stream->timing.v_front_porch +
phantom_stream->timing.v_sync_width +
phantom_bp;
+ phantom_stream->timing.flags.DSC = 0; // Don't need DSC for phantom timing
}
/**
@@ -983,9 +1031,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
* prefetch mode > 0 so try capping the prefetch mode to start.
+ * Override present for testing.
*/
- context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ if (dc->debug.dml_disallow_alternate_prefetch_modes)
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_uclk_fclk_and_stutter;
+ else
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
+
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
if (*vlevel < context->bw_ctx.dml.soc.num_states)
@@ -1004,6 +1058,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
dc->debug.force_subvp_mclk_switch)) {
dcn32_merge_pipes_for_subvp(dc, context);
+ // to re-initialize viewport after the pipe merge
+ for (int i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state || !pipe_ctx->stream)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) &&
dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) {
@@ -1014,7 +1077,9 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* will not allow for switch in VBLANK. The DRR display must have it's VBLANK stretched
* enough to support MCLK switching.
*/
- if (*vlevel == context->bw_ctx.dml.soc.num_states) {
+ if (*vlevel == context->bw_ctx.dml.soc.num_states &&
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final ==
+ dm_prefetch_support_uclk_fclk_and_stutter) {
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_stutter;
/* There are params (such as FabricClock) that need to be recalculated
@@ -1344,7 +1409,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int split[MAX_PIPES] = { 0 };
bool merge[MAX_PIPES] = { false };
bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx, vlevel;
+ int pipe_cnt, i, pipe_idx;
+ int vlevel = context->bw_ctx.dml.soc.num_states;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
dc_assert_fp_enabled();
@@ -1373,17 +1439,22 @@ bool dcn32_internal_validate_bw(struct dc *dc,
DC_FP_END();
}
- if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ if (fast_validate ||
+ (dc->debug.dml_disallow_alternate_prefetch_modes &&
+ (vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
/*
- * If mode is unsupported or there's still no p-state support then
- * fall back to favoring voltage.
+ * If dml_disallow_alternate_prefetch_modes is false, then we have already
+ * tried alternate prefetch modes during full validation.
*
- * If Prefetch mode 0 failed for this config, or passed with Max UCLK, try if
- * supported with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
+ * If mode is unsupported or there is no p-state support, then
+ * fall back to favouring voltage.
+ *
+ * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
+ * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
*/
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
- dm_prefetch_support_fclk_and_stutter;
+ dm_prefetch_support_fclk_and_stutter;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
@@ -1619,7 +1690,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- dummy_latency_index = dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
+ dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
context, pipes, pipe_cnt, vlevel);
/* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch
@@ -2098,6 +2169,13 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+ if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000)
+ != dc->bb_overrides.fclk_clock_change_latency_ns
+ && dc->bb_overrides.fclk_clock_change_latency_ns) {
+ dcn3_2_soc.fclk_change_latency_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+ }
+
if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index 3ed06ab855be..6ce221098979 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -71,4 +71,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
+int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 890612db08dc..6980f698eb23 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -221,7 +221,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
// VBA_DELTA
// Calculate DET size, swath height
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -461,7 +460,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
{
dml32_CalculateVMRowAndSwath(
- &v->dummy_vars.dml32_CalculateVMRowAndSwath,
mode_lib->vba.NumberOfActiveSurfaces,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters,
v->SurfaceSizeInMALL,
@@ -758,31 +756,17 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelC = v->BytePerPixelC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(
- &v->dummy_vars.dml32_CalculatePrefetchSchedule,
+ v,
+ k,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
- &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, v->DSCDelay[k],
- mode_lib->vba.DPPCLKDelaySubtotal + mode_lib->vba.DPPCLKDelayCNVCFormater,
- mode_lib->vba.DPPCLKDelaySCL,
- mode_lib->vba.DPPCLKDelaySCLLBOnly,
- mode_lib->vba.DPPCLKDelayCNVCCursor,
- mode_lib->vba.DISPCLKDelaySubtotal,
- (unsigned int) (v->SwathWidthY[k] / mode_lib->vba.HRatio[k]),
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.MaxInterDCNTileRepeaters,
+ &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe,
+ v->DSCDelay[k],
+ (unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
v->MaxVStartupLines[k],
- mode_lib->vba.GPUVMMaxPageTableLevels,
- mode_lib->vba.GPUVMEnable,
- mode_lib->vba.HostVMEnable,
- mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
- mode_lib->vba.HostVMMinPageSize,
- mode_lib->vba.DynamicMetadataEnable[k],
- mode_lib->vba.DynamicMetadataVMEnabled,
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
- mode_lib->vba.DynamicMetadataTransmittedBytes[k],
v->UrgentLatency,
v->UrgentExtraLatency,
- mode_lib->vba.TCalc,
+ v->TCalc,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
v->PixelPTEBytesPerRow[k],
@@ -796,8 +780,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->MaxNumSwathC[k],
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
- mode_lib->vba.SwathHeightY[k],
- mode_lib->vba.SwathHeightC[k],
+ v->SwathHeightY[k],
+ v->SwathHeightC[k],
TWait,
/* Output */
&v->DSTXAfterScaler[k],
@@ -1167,59 +1151,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SMNLatency = mode_lib->vba.SMNLatency;
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- &v->dummy_vars.dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport,
- mode_lib->vba.USRRetrainingRequiredFinal,
- mode_lib->vba.UsesMALLForPStateChange,
- mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
- mode_lib->vba.NumberOfActiveSurfaces,
- mode_lib->vba.MaxLineBufferLines,
- mode_lib->vba.LineBufferSizeFinal,
- mode_lib->vba.WritebackInterfaceBufferSize,
- mode_lib->vba.DCFCLK,
- mode_lib->vba.ReturnBW,
- mode_lib->vba.SynchronizeTimingsFinal,
- mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- mode_lib->vba.DRRDisplay,
- v->dpte_group_bytes,
- v->meta_row_height,
- v->meta_row_height_chroma,
+ v,
+ v->PrefetchModePerState[v->VoltageLevel][v->maxMpcComb],
+ v->DCFCLK,
+ v->ReturnBW,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters,
- mode_lib->vba.WritebackChunkSize,
- mode_lib->vba.SOCCLK,
+ v->SOCCLK,
v->DCFCLKDeepSleep,
- mode_lib->vba.DETBufferSizeY,
- mode_lib->vba.DETBufferSizeC,
- mode_lib->vba.SwathHeightY,
- mode_lib->vba.SwathHeightC,
- mode_lib->vba.LBBitPerPixel,
+ v->DETBufferSizeY,
+ v->DETBufferSizeC,
+ v->SwathHeightY,
+ v->SwathHeightC,
v->SwathWidthY,
v->SwathWidthC,
- mode_lib->vba.HRatio,
- mode_lib->vba.HRatioChroma,
- mode_lib->vba.vtaps,
- mode_lib->vba.VTAPsChroma,
- mode_lib->vba.VRatio,
- mode_lib->vba.VRatioChroma,
- mode_lib->vba.HTotal,
- mode_lib->vba.VTotal,
- mode_lib->vba.VActive,
- mode_lib->vba.PixelClock,
- mode_lib->vba.BlendingAndTiming,
- mode_lib->vba.DPPPerPlane,
+ v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
v->DSTXAfterScaler,
v->DSTYAfterScaler,
- mode_lib->vba.WritebackEnable,
- mode_lib->vba.WritebackPixelFormat,
- mode_lib->vba.WritebackDestinationWidth,
- mode_lib->vba.WritebackDestinationHeight,
- mode_lib->vba.WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
/* Output */
- &v->Watermark,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_dramchange_support,
v->MaxActiveDRAMClockChangeLatencySupported,
v->SubViewportLinesNeededInMALL,
@@ -1811,10 +1764,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.Read256BlockHeightC[k],
&mode_lib->vba.Read256BlockWidthY[k],
&mode_lib->vba.Read256BlockWidthC[k],
- &mode_lib->vba.MicroTileHeightY[k],
- &mode_lib->vba.MicroTileHeightC[k],
- &mode_lib->vba.MicroTileWidthY[k],
- &mode_lib->vba.MicroTileWidthC[k]);
+ &mode_lib->vba.MacroTileHeightY[k],
+ &mode_lib->vba.MacroTileHeightC[k],
+ &mode_lib->vba.MacroTileWidthY[k],
+ &mode_lib->vba.MacroTileWidthC[k]);
}
/*Bandwidth Support Check*/
@@ -1952,7 +1905,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -2040,6 +1992,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
dml32_CalculateODMMode(
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
mode_lib->vba.HActive[k],
+ mode_lib->vba.OutputFormat[k],
mode_lib->vba.Output[k],
mode_lib->vba.ODMUse[k],
mode_lib->vba.MaxDispclk[i],
@@ -2061,6 +2014,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
dml32_CalculateODMMode(
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
mode_lib->vba.HActive[k],
+ mode_lib->vba.OutputFormat[k],
mode_lib->vba.Output[k],
mode_lib->vba.ODMUse[k],
mode_lib->vba.MaxDispclk[i],
@@ -2549,7 +2503,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -2666,10 +2619,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.Read256BlockWidthC,
mode_lib->vba.Read256BlockHeightY,
mode_lib->vba.Read256BlockHeightC,
- mode_lib->vba.MicroTileWidthY,
- mode_lib->vba.MicroTileWidthC,
- mode_lib->vba.MicroTileHeightY,
- mode_lib->vba.MicroTileHeightC,
+ mode_lib->vba.MacroTileWidthY,
+ mode_lib->vba.MacroTileWidthC,
+ mode_lib->vba.MacroTileHeightY,
+ mode_lib->vba.MacroTileHeightC,
/* Output */
mode_lib->vba.SurfaceSizeInMALL,
@@ -2716,10 +2669,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesY = mode_lib->vba.Read256BlockHeightY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidth256BytesC = mode_lib->vba.Read256BlockWidthC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesC = mode_lib->vba.Read256BlockHeightC[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MicroTileWidthY[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MicroTileHeightY[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MicroTileWidthC[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MicroTileHeightC[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MacroTileWidthY[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MacroTileHeightY[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MacroTileWidthC[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MacroTileHeightC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].InterlaceEnable = mode_lib->vba.Interlace[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].HTotal = mode_lib->vba.HTotal[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].DCCEnable = mode_lib->vba.DCCEnable[k];
@@ -2749,7 +2702,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateVMRowAndSwath(
- &v->dummy_vars.dml32_CalculateVMRowAndSwath,
mode_lib->vba.NumberOfActiveSurfaces,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters,
mode_lib->vba.SurfaceSizeInMALL,
@@ -3266,64 +3218,47 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.NoTimeForPrefetch[i][j][k] =
dml32_CalculatePrefetchSchedule(
- &v->dummy_vars.dml32_CalculatePrefetchSchedule,
+ v,
+ k,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe,
- mode_lib->vba.DSCDelayPerState[i][k],
- mode_lib->vba.DPPCLKDelaySubtotal +
- mode_lib->vba.DPPCLKDelayCNVCFormater,
- mode_lib->vba.DPPCLKDelaySCL,
- mode_lib->vba.DPPCLKDelaySCLLBOnly,
- mode_lib->vba.DPPCLKDelayCNVCCursor,
- mode_lib->vba.DISPCLKDelaySubtotal,
- mode_lib->vba.SwathWidthYThisState[k] /
- mode_lib->vba.HRatio[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.MaxInterDCNTileRepeaters,
- dml_min(mode_lib->vba.MaxVStartup,
- mode_lib->vba.MaximumVStartup[i][j][k]),
- mode_lib->vba.MaximumVStartup[i][j][k],
- mode_lib->vba.GPUVMMaxPageTableLevels,
- mode_lib->vba.GPUVMEnable, mode_lib->vba.HostVMEnable,
- mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
- mode_lib->vba.HostVMMinPageSize,
- mode_lib->vba.DynamicMetadataEnable[k],
- mode_lib->vba.DynamicMetadataVMEnabled,
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
- mode_lib->vba.DynamicMetadataTransmittedBytes[k],
- mode_lib->vba.UrgLatency[i],
- mode_lib->vba.ExtraLatency,
- mode_lib->vba.TimeCalc,
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[i][j][k],
- mode_lib->vba.MetaRowBytes[i][j][k],
- mode_lib->vba.DPTEBytesPerRow[i][j][k],
- mode_lib->vba.PrefetchLinesY[i][j][k],
- mode_lib->vba.SwathWidthYThisState[k],
- mode_lib->vba.PrefillY[k],
- mode_lib->vba.MaxNumSwY[k],
- mode_lib->vba.PrefetchLinesC[i][j][k],
- mode_lib->vba.SwathWidthCThisState[k],
- mode_lib->vba.PrefillC[k],
- mode_lib->vba.MaxNumSwC[k],
- mode_lib->vba.swath_width_luma_ub_this_state[k],
- mode_lib->vba.swath_width_chroma_ub_this_state[k],
- mode_lib->vba.SwathHeightYThisState[k],
- mode_lib->vba.SwathHeightCThisState[k], mode_lib->vba.TWait,
+ v->DSCDelayPerState[i][k],
+ v->SwathWidthYThisState[k] / v->HRatio[k],
+ dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
+ v->MaximumVStartup[i][j][k],
+ v->UrgLatency[i],
+ v->ExtraLatency,
+ v->TimeCalc,
+ v->PDEAndMetaPTEBytesPerFrame[i][j][k],
+ v->MetaRowBytes[i][j][k],
+ v->DPTEBytesPerRow[i][j][k],
+ v->PrefetchLinesY[i][j][k],
+ v->SwathWidthYThisState[k],
+ v->PrefillY[k],
+ v->MaxNumSwY[k],
+ v->PrefetchLinesC[i][j][k],
+ v->SwathWidthCThisState[k],
+ v->PrefillC[k],
+ v->MaxNumSwC[k],
+ v->swath_width_luma_ub_this_state[k],
+ v->swath_width_chroma_ub_this_state[k],
+ v->SwathHeightYThisState[k],
+ v->SwathHeightCThisState[k], v->TWait,
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler[k],
- &mode_lib->vba.LineTimesForPrefetch[k],
- &mode_lib->vba.PrefetchBW[k],
- &mode_lib->vba.LinesForMetaPTE[k],
- &mode_lib->vba.LinesForMetaAndDPTERow[k],
- &mode_lib->vba.VRatioPreY[i][j][k],
- &mode_lib->vba.VRatioPreC[i][j][k],
- &mode_lib->vba.RequiredPrefetchPixelDataBWLuma[0][0][k],
- &mode_lib->vba.RequiredPrefetchPixelDataBWChroma[0][0][k],
- &mode_lib->vba.NoTimeForDynamicMetadata[i][j][k],
- &mode_lib->vba.Tno_bw[k],
- &mode_lib->vba.prefetch_vmrow_bw[k],
+ &v->LineTimesForPrefetch[k],
+ &v->PrefetchBW[k],
+ &v->LinesForMetaPTE[k],
+ &v->LinesForMetaAndDPTERow[k],
+ &v->VRatioPreY[i][j][k],
+ &v->VRatioPreC[i][j][k],
+ &v->RequiredPrefetchPixelDataBWLuma[0][0][k],
+ &v->RequiredPrefetchPixelDataBWChroma[0][0][k],
+ &v->NoTimeForDynamicMetadata[i][j][k],
+ &v->Tno_bw[k],
+ &v->prefetch_vmrow_bw[k],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0], // double *Tdmdl_vm
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[1], // double *Tdmdl
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[2], // double *TSetup
@@ -3566,63 +3501,32 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- &v->dummy_vars.dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport,
- mode_lib->vba.USRRetrainingRequiredFinal,
- mode_lib->vba.UsesMALLForPStateChange,
- mode_lib->vba.PrefetchModePerState[i][j],
- mode_lib->vba.NumberOfActiveSurfaces,
- mode_lib->vba.MaxLineBufferLines,
- mode_lib->vba.LineBufferSizeFinal,
- mode_lib->vba.WritebackInterfaceBufferSize,
- mode_lib->vba.DCFCLKState[i][j],
- mode_lib->vba.ReturnBWPerState[i][j],
- mode_lib->vba.SynchronizeTimingsFinal,
- mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- mode_lib->vba.DRRDisplay,
- mode_lib->vba.dpte_group_bytes,
- mode_lib->vba.meta_row_height,
- mode_lib->vba.meta_row_height_chroma,
+ v,
+ v->PrefetchModePerState[i][j],
+ v->DCFCLKState[i][j],
+ v->ReturnBWPerState[i][j],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters,
- mode_lib->vba.WritebackChunkSize,
- mode_lib->vba.SOCCLKPerState[i],
- mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j],
- mode_lib->vba.DETBufferSizeYThisState,
- mode_lib->vba.DETBufferSizeCThisState,
- mode_lib->vba.SwathHeightYThisState,
- mode_lib->vba.SwathHeightCThisState,
- mode_lib->vba.LBBitPerPixel,
- mode_lib->vba.SwathWidthYThisState, // 24
- mode_lib->vba.SwathWidthCThisState,
- mode_lib->vba.HRatio,
- mode_lib->vba.HRatioChroma,
- mode_lib->vba.vtaps,
- mode_lib->vba.VTAPsChroma,
- mode_lib->vba.VRatio,
- mode_lib->vba.VRatioChroma,
- mode_lib->vba.HTotal,
- mode_lib->vba.VTotal,
- mode_lib->vba.VActive,
- mode_lib->vba.PixelClock,
- mode_lib->vba.BlendingAndTiming,
- mode_lib->vba.NoOfDPPThisState,
- mode_lib->vba.BytePerPixelInDETY,
- mode_lib->vba.BytePerPixelInDETC,
+ v->SOCCLKPerState[i],
+ v->ProjectedDCFCLKDeepSleep[i][j],
+ v->DETBufferSizeYThisState,
+ v->DETBufferSizeCThisState,
+ v->SwathHeightYThisState,
+ v->SwathHeightCThisState,
+ v->SwathWidthYThisState, // 24
+ v->SwathWidthCThisState,
+ v->NoOfDPPThisState,
+ v->BytePerPixelInDETY,
+ v->BytePerPixelInDETC,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler,
- mode_lib->vba.WritebackEnable,
- mode_lib->vba.WritebackPixelFormat,
- mode_lib->vba.WritebackDestinationWidth,
- mode_lib->vba.WritebackDestinationHeight,
- mode_lib->vba.WritebackSourceHeight,
- mode_lib->vba.UnboundedRequestEnabledThisState,
- mode_lib->vba.CompressedBufferSizeInkByteThisState,
+ v->UnboundedRequestEnabledThisState,
+ v->CompressedBufferSizeInkByteThisState,
/* Output */
- &mode_lib->vba.Watermark, // Store the values in vba
- &mode_lib->vba.DRAMClockChangeSupport[i][j],
+ &v->DRAMClockChangeSupport[i][j],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[0], // double *MaxActiveDRAMClockChangeLatencySupported
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer[0], // Long SubViewportLinesNeededInMALL[]
- &mode_lib->vba.FCLKChangeSupport[i][j],
+ &v->FCLKChangeSupport[i][j],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[1], // double *MinActiveFCLKChangeLatencySupported
&mode_lib->vba.USRRetrainingSupport[i][j],
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 07f8f3b8626b..365d290bba99 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -27,6 +27,8 @@
#include "display_mode_vba_32.h"
#include "../display_mode_lib.h"
+#define DCN32_MAX_FMT_420_BUFFER_WIDTH 4096
+
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
double BPP,
@@ -391,7 +393,6 @@ void dml32_CalculateBytePerPixelAndBlockSizes(
} // CalculateBytePerPixelAndBlockSizes
void dml32_CalculateSwathAndDETConfiguration(
- struct dml32_CalculateSwathAndDETConfiguration *st_vars,
unsigned int DETSizeOverride[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int ConfigReturnBufferSizeInKByte,
@@ -456,10 +457,18 @@ void dml32_CalculateSwathAndDETConfiguration(
bool ViewportSizeSupportPerSurface[],
bool *ViewportSizeSupport)
{
+ unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
+ unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpSwathSizeBytesY;
+ unsigned int RoundedUpSwathSizeBytesC;
+ double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
+ double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
unsigned int k;
-
- st_vars->TotalActiveDPP = 0;
- st_vars->NoChromaSurfaces = true;
+ unsigned int TotalActiveDPP = 0;
+ bool NoChromaSurfaces = true;
+ unsigned int DETBufferSizeInKByteForSwathCalculation;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ForceSingleDPP = %d\n", __func__, ForceSingleDPP);
@@ -494,43 +503,43 @@ void dml32_CalculateSwathAndDETConfiguration(
DPPPerSurface,
/* Output */
- st_vars->SwathWidthdoubleDPP,
- st_vars->SwathWidthdoubleDPPChroma,
+ SwathWidthdoubleDPP,
+ SwathWidthdoubleDPPChroma,
SwathWidth,
SwathWidthChroma,
- st_vars->MaximumSwathHeightY,
- st_vars->MaximumSwathHeightC,
+ MaximumSwathHeightY,
+ MaximumSwathHeightC,
swath_width_luma_ub,
swath_width_chroma_ub);
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * st_vars->MaximumSwathHeightY[k];
- st_vars->RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * st_vars->MaximumSwathHeightC[k];
+ RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * MaximumSwathHeightY[k];
+ RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * MaximumSwathHeightC[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DPPPerSurface = %d\n", __func__, k, DPPPerSurface[k]);
dml_print("DML::%s: k=%0d swath_width_luma_ub = %d\n", __func__, k, swath_width_luma_ub[k]);
dml_print("DML::%s: k=%0d BytePerPixDETY = %f\n", __func__, k, BytePerPixDETY[k]);
- dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, st_vars->MaximumSwathHeightY[k]);
+ dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, MaximumSwathHeightY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__, k,
- st_vars->RoundedUpMaxSwathSizeBytesY[k]);
+ RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d swath_width_chroma_ub = %d\n", __func__, k, swath_width_chroma_ub[k]);
dml_print("DML::%s: k=%0d BytePerPixDETC = %f\n", __func__, k, BytePerPixDETC[k]);
- dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, st_vars->MaximumSwathHeightC[k]);
+ dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, MaximumSwathHeightC[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__, k,
- st_vars->RoundedUpMaxSwathSizeBytesC[k]);
+ RoundedUpMaxSwathSizeBytesC[k]);
#endif
if (SourcePixelFormat[k] == dm_420_10) {
- st_vars->RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) st_vars->RoundedUpMaxSwathSizeBytesY[k], 256);
- st_vars->RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) st_vars->RoundedUpMaxSwathSizeBytesC[k], 256);
+ RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesY[k], 256);
+ RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesC[k], 256);
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->TotalActiveDPP = st_vars->TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
+ TotalActiveDPP = TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
if (SourcePixelFormat[k] == dm_420_8 || SourcePixelFormat[k] == dm_420_10 ||
SourcePixelFormat[k] == dm_420_12 || SourcePixelFormat[k] == dm_rgbe_alpha) {
- st_vars->NoChromaSurfaces = false;
+ NoChromaSurfaces = false;
}
}
@@ -540,10 +549,10 @@ void dml32_CalculateSwathAndDETConfiguration(
// if unbounded req is enabled, program reserved space such that the ROB will not hold more than 8 swaths worth of data
// - assume worst-case compression rate of 4. [ROB size - 8 * swath_size / max_compression ratio]
// - assume for "narrow" vp case in which the ROB can fit 8 swaths, the DET should be big enough to do full size req
- *CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (st_vars->RoundedUpMaxSwathSizeBytesY[0]/512);
+ *CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (RoundedUpMaxSwathSizeBytesY[0]/512);
if (*CompBufReservedSpaceNeedAdjustment == 1) {
- *CompBufReservedSpaceKBytes = ROBSizeKBytes - st_vars->RoundedUpMaxSwathSizeBytesY[0]/512;
+ *CompBufReservedSpaceKBytes = ROBSizeKBytes - RoundedUpMaxSwathSizeBytesY[0]/512;
}
#ifdef __DML_VBA_DEBUG__
@@ -551,7 +560,7 @@ void dml32_CalculateSwathAndDETConfiguration(
dml_print("DML::%s: CompBufReservedSpaceNeedAdjustment = %d\n", __func__, *CompBufReservedSpaceNeedAdjustment);
#endif
- *UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, st_vars->TotalActiveDPP, st_vars->NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
+ *UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, TotalActiveDPP, NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
dml32_CalculateDETBufferSize(DETSizeOverride,
UseMALLForPStateChange,
@@ -566,8 +575,8 @@ void dml32_CalculateSwathAndDETConfiguration(
SourcePixelFormat,
ReadBandwidthLuma,
ReadBandwidthChroma,
- st_vars->RoundedUpMaxSwathSizeBytesY,
- st_vars->RoundedUpMaxSwathSizeBytesC,
+ RoundedUpMaxSwathSizeBytesY,
+ RoundedUpMaxSwathSizeBytesC,
DPPPerSurface,
/* Output */
@@ -575,7 +584,7 @@ void dml32_CalculateSwathAndDETConfiguration(
CompressedBufferSizeInkByte);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, st_vars->TotalActiveDPP);
+ dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, TotalActiveDPP);
dml_print("DML::%s: nomDETInKByte = %d\n", __func__, nomDETInKByte);
dml_print("DML::%s: ConfigReturnBufferSizeInKByte = %d\n", __func__, ConfigReturnBufferSizeInKByte);
dml_print("DML::%s: UseUnboundedRequestingFinal = %d\n", __func__, UseUnboundedRequestingFinal);
@@ -586,42 +595,42 @@ void dml32_CalculateSwathAndDETConfiguration(
*ViewportSizeSupport = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
+ DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
dm_use_mall_pstate_change_phantom_pipe ? 1024 : DETBufferSizeInKByte[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DETBufferSizeInKByteForSwathCalculation = %d\n", __func__, k,
- st_vars->DETBufferSizeInKByteForSwathCalculation);
+ DETBufferSizeInKByteForSwathCalculation);
#endif
- if (st_vars->RoundedUpMaxSwathSizeBytesY[k] + st_vars->RoundedUpMaxSwathSizeBytesC[k] <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k];
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k];
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k];
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k];
- } else if (st_vars->RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * st_vars->RoundedUpMaxSwathSizeBytesC[k] &&
- st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2 + st_vars->RoundedUpMaxSwathSizeBytesC[k] <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k] / 2;
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k];
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2;
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k];
- } else if (st_vars->RoundedUpMaxSwathSizeBytesY[k] < 1.5 * st_vars->RoundedUpMaxSwathSizeBytesC[k] &&
- st_vars->RoundedUpMaxSwathSizeBytesY[k] + st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2 <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k];
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k] / 2;
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k];
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2;
+ if (RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k];
+ SwathHeightC[k] = MaximumSwathHeightC[k];
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
+ } else if (RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
+ RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
+ SwathHeightC[k] = MaximumSwathHeightC[k];
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
+ } else if (RoundedUpMaxSwathSizeBytesY[k] < 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
+ RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] / 2 <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k];
+ SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
} else {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k] / 2;
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k] / 2;
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2;
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2;
+ SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
+ SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
}
- if ((st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2 + st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2 >
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
+ if ((RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] / 2 >
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
|| SwathWidth[k] > MaximumSwathWidthLuma[k] || (SwathHeightC[k] > 0 &&
SwathWidthChroma[k] > MaximumSwathWidthChroma[k])) {
*ViewportSizeSupport = false;
@@ -636,7 +645,7 @@ void dml32_CalculateSwathAndDETConfiguration(
#endif
DETBufferSizeY[k] = DETBufferSizeInKByte[k] * 1024;
DETBufferSizeC[k] = 0;
- } else if (st_vars->RoundedUpSwathSizeBytesY <= 1.5 * st_vars->RoundedUpSwathSizeBytesC) {
+ } else if (RoundedUpSwathSizeBytesY <= 1.5 * RoundedUpSwathSizeBytesC) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d Half DET for plane0, half for plane1\n", __func__, k);
#endif
@@ -654,11 +663,11 @@ void dml32_CalculateSwathAndDETConfiguration(
dml_print("DML::%s: k=%0d SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
dml_print("DML::%s: k=%0d SwathHeightC = %d\n", __func__, k, SwathHeightC[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__,
- k, st_vars->RoundedUpMaxSwathSizeBytesY[k]);
+ k, RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__,
- k, st_vars->RoundedUpMaxSwathSizeBytesC[k]);
- dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, st_vars->RoundedUpSwathSizeBytesY);
- dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, st_vars->RoundedUpSwathSizeBytesC);
+ k, RoundedUpMaxSwathSizeBytesC[k]);
+ dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, RoundedUpSwathSizeBytesY);
+ dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, RoundedUpSwathSizeBytesC);
dml_print("DML::%s: k=%0d DETBufferSizeInKByte = %d\n", __func__, k, DETBufferSizeInKByte[k]);
dml_print("DML::%s: k=%0d DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
dml_print("DML::%s: k=%0d DETBufferSizeC = %d\n", __func__, k, DETBufferSizeC[k]);
@@ -1175,6 +1184,7 @@ void dml32_CalculateDETBufferSize(
void dml32_CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
+ enum output_format_class OutFormat,
enum output_encoder_class Output,
enum odm_combine_policy ODMUse,
double StateDispclk,
@@ -1246,6 +1256,29 @@ void dml32_CalculateODMMode(
else
*TotalAvailablePipesSupport = false;
}
+ if (OutFormat == dm_420 && HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH &&
+ ODMUse != dm_odm_combine_policy_4to1) {
+ if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 4) {
+ *ODMMode = dm_odm_combine_mode_disabled;
+ *NumberOfDPP = 0;
+ *TotalAvailablePipesSupport = false;
+ } else if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 2 ||
+ *ODMMode == dm_odm_combine_mode_4to1) {
+ *ODMMode = dm_odm_combine_mode_4to1;
+ *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
+ *NumberOfDPP = 4;
+ } else {
+ *ODMMode = dm_odm_combine_mode_2to1;
+ *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
+ *NumberOfDPP = 2;
+ }
+ }
+ if (Output == dm_hdmi && OutFormat == dm_420 &&
+ HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH) {
+ *ODMMode = dm_odm_combine_mode_disabled;
+ *NumberOfDPP = 0;
+ *TotalAvailablePipesSupport = false;
+ }
}
double dml32_CalculateRequiredDispclk(
@@ -1867,7 +1900,6 @@ void dml32_CalculateSurfaceSizeInMall(
} // CalculateSurfaceSizeInMall
void dml32_CalculateVMRowAndSwath(
- struct dml32_CalculateVMRowAndSwath *st_vars,
unsigned int NumberOfActiveSurfaces,
DmlPipe myPipe[],
unsigned int SurfaceSizeInMALL[],
@@ -1933,6 +1965,21 @@ void dml32_CalculateVMRowAndSwath(
unsigned int BIGK_FRAGMENT_SIZE[])
{
unsigned int k;
+ unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
+ unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
+ unsigned int PDEAndMetaPTEBytesFrameY;
+ unsigned int PDEAndMetaPTEBytesFrameC;
+ unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
+ unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
+ bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (HostVMEnable == true) {
@@ -1954,15 +2001,15 @@ void dml32_CalculateVMRowAndSwath(
myPipe[k].SourcePixelFormat == dm_rgbe_alpha) {
if ((myPipe[k].SourcePixelFormat == dm_420_10 || myPipe[k].SourcePixelFormat == dm_420_12) &&
!IsVertical(myPipe[k].SourceRotation)) {
- st_vars->PTEBufferSizeInRequestsForLuma[k] =
+ PTEBufferSizeInRequestsForLuma[k] =
(PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma) / 2;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = st_vars->PTEBufferSizeInRequestsForLuma[k];
+ PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsForLuma[k];
} else {
- st_vars->PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
+ PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
+ PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
}
- st_vars->PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
+ PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
myPipe[k].ViewportStationary,
myPipe[k].DCCEnable,
myPipe[k].DPPPerSurface,
@@ -1982,21 +2029,21 @@ void dml32_CalculateVMRowAndSwath(
GPUVMMaxPageTableLevels,
GPUVMMinPageSizeKBytes[k],
HostVMMinPageSize,
- st_vars->PTEBufferSizeInRequestsForChroma[k],
+ PTEBufferSizeInRequestsForChroma[k],
myPipe[k].PitchC,
myPipe[k].DCCMetaPitchC,
myPipe[k].BlockWidthC,
myPipe[k].BlockHeightC,
/* Output */
- &st_vars->MetaRowByteC[k],
- &st_vars->PixelPTEBytesPerRowC[k],
+ &MetaRowByteC[k],
+ &PixelPTEBytesPerRowC[k],
&dpte_row_width_chroma_ub[k],
&dpte_row_height_chroma[k],
&dpte_row_height_linear_chroma[k],
- &st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k],
- &st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k],
- &st_vars->dpte_row_height_chroma_one_row_per_frame[k],
+ &PixelPTEBytesPerRowC_one_row_per_frame[k],
+ &dpte_row_width_chroma_ub_one_row_per_frame[k],
+ &dpte_row_height_chroma_one_row_per_frame[k],
&meta_req_width_chroma[k],
&meta_req_height_chroma[k],
&meta_row_width_chroma[k],
@@ -2024,19 +2071,19 @@ void dml32_CalculateVMRowAndSwath(
&VInitPreFillC[k],
&MaxNumSwathC[k]);
} else {
- st_vars->PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = 0;
- st_vars->PixelPTEBytesPerRowC[k] = 0;
- st_vars->PDEAndMetaPTEBytesFrameC = 0;
- st_vars->MetaRowByteC[k] = 0;
+ PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
+ PTEBufferSizeInRequestsForChroma[k] = 0;
+ PixelPTEBytesPerRowC[k] = 0;
+ PDEAndMetaPTEBytesFrameC = 0;
+ MetaRowByteC[k] = 0;
MaxNumSwathC[k] = 0;
PrefetchSourceLinesC[k] = 0;
- st_vars->dpte_row_height_chroma_one_row_per_frame[k] = 0;
- st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
- st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
+ dpte_row_height_chroma_one_row_per_frame[k] = 0;
+ dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
+ PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
}
- st_vars->PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
+ PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
myPipe[k].ViewportStationary,
myPipe[k].DCCEnable,
myPipe[k].DPPPerSurface,
@@ -2056,21 +2103,21 @@ void dml32_CalculateVMRowAndSwath(
GPUVMMaxPageTableLevels,
GPUVMMinPageSizeKBytes[k],
HostVMMinPageSize,
- st_vars->PTEBufferSizeInRequestsForLuma[k],
+ PTEBufferSizeInRequestsForLuma[k],
myPipe[k].PitchY,
myPipe[k].DCCMetaPitchY,
myPipe[k].BlockWidthY,
myPipe[k].BlockHeightY,
/* Output */
- &st_vars->MetaRowByteY[k],
- &st_vars->PixelPTEBytesPerRowY[k],
+ &MetaRowByteY[k],
+ &PixelPTEBytesPerRowY[k],
&dpte_row_width_luma_ub[k],
&dpte_row_height_luma[k],
&dpte_row_height_linear_luma[k],
- &st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k],
- &st_vars->dpte_row_width_luma_ub_one_row_per_frame[k],
- &st_vars->dpte_row_height_luma_one_row_per_frame[k],
+ &PixelPTEBytesPerRowY_one_row_per_frame[k],
+ &dpte_row_width_luma_ub_one_row_per_frame[k],
+ &dpte_row_height_luma_one_row_per_frame[k],
&meta_req_width[k],
&meta_req_height[k],
&meta_row_width[k],
@@ -2098,19 +2145,19 @@ void dml32_CalculateVMRowAndSwath(
&VInitPreFillY[k],
&MaxNumSwathY[k]);
- PDEAndMetaPTEBytesFrame[k] = st_vars->PDEAndMetaPTEBytesFrameY + st_vars->PDEAndMetaPTEBytesFrameC;
- MetaRowByte[k] = st_vars->MetaRowByteY[k] + st_vars->MetaRowByteC[k];
+ PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY + PDEAndMetaPTEBytesFrameC;
+ MetaRowByte[k] = MetaRowByteY[k] + MetaRowByteC[k];
- if (st_vars->PixelPTEBytesPerRowY[k] <= 64 * st_vars->PTEBufferSizeInRequestsForLuma[k] &&
- st_vars->PixelPTEBytesPerRowC[k] <= 64 * st_vars->PTEBufferSizeInRequestsForChroma[k]) {
+ if (PixelPTEBytesPerRowY[k] <= 64 * PTEBufferSizeInRequestsForLuma[k] &&
+ PixelPTEBytesPerRowC[k] <= 64 * PTEBufferSizeInRequestsForChroma[k]) {
PTEBufferSizeNotExceeded[k] = true;
} else {
PTEBufferSizeNotExceeded[k] = false;
}
- st_vars->one_row_per_frame_fits_in_buffer[k] = (st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
- st_vars->PTEBufferSizeInRequestsForLuma[k] &&
- st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * st_vars->PTEBufferSizeInRequestsForChroma[k]);
+ one_row_per_frame_fits_in_buffer[k] = (PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
+ PTEBufferSizeInRequestsForLuma[k] &&
+ PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * PTEBufferSizeInRequestsForChroma[k]);
}
dml32_CalculateMALLUseForStaticScreen(
@@ -2118,7 +2165,7 @@ void dml32_CalculateVMRowAndSwath(
MALLAllocatedForDCN,
UseMALLForStaticScreen, // mode
SurfaceSizeInMALL,
- st_vars->one_row_per_frame_fits_in_buffer,
+ one_row_per_frame_fits_in_buffer,
/* Output */
UsesMALLForStaticScreen); // boolen
@@ -2144,13 +2191,13 @@ void dml32_CalculateVMRowAndSwath(
!(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame);
if (use_one_row_for_frame[k]) {
- dpte_row_height_luma[k] = st_vars->dpte_row_height_luma_one_row_per_frame[k];
- dpte_row_width_luma_ub[k] = st_vars->dpte_row_width_luma_ub_one_row_per_frame[k];
- st_vars->PixelPTEBytesPerRowY[k] = st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k];
- dpte_row_height_chroma[k] = st_vars->dpte_row_height_chroma_one_row_per_frame[k];
- dpte_row_width_chroma_ub[k] = st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k];
- st_vars->PixelPTEBytesPerRowC[k] = st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k];
- PTEBufferSizeNotExceeded[k] = st_vars->one_row_per_frame_fits_in_buffer[k];
+ dpte_row_height_luma[k] = dpte_row_height_luma_one_row_per_frame[k];
+ dpte_row_width_luma_ub[k] = dpte_row_width_luma_ub_one_row_per_frame[k];
+ PixelPTEBytesPerRowY[k] = PixelPTEBytesPerRowY_one_row_per_frame[k];
+ dpte_row_height_chroma[k] = dpte_row_height_chroma_one_row_per_frame[k];
+ dpte_row_width_chroma_ub[k] = dpte_row_width_chroma_ub_one_row_per_frame[k];
+ PixelPTEBytesPerRowC[k] = PixelPTEBytesPerRowC_one_row_per_frame[k];
+ PTEBufferSizeNotExceeded[k] = one_row_per_frame_fits_in_buffer[k];
}
if (MetaRowByte[k] <= DCCMetaBufferSizeBytes)
@@ -2158,7 +2205,7 @@ void dml32_CalculateVMRowAndSwath(
else
DCCMetaBufferSizeNotExceeded[k] = false;
- PixelPTEBytesPerRow[k] = st_vars->PixelPTEBytesPerRowY[k] + st_vars->PixelPTEBytesPerRowC[k];
+ PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY[k] + PixelPTEBytesPerRowC[k];
if (use_one_row_for_frame[k])
PixelPTEBytesPerRow[k] = PixelPTEBytesPerRow[k] / 2;
@@ -2169,11 +2216,11 @@ void dml32_CalculateVMRowAndSwath(
myPipe[k].VRatioChroma,
myPipe[k].DCCEnable,
myPipe[k].HTotal / myPipe[k].PixelClock,
- st_vars->MetaRowByteY[k], st_vars->MetaRowByteC[k],
+ MetaRowByteY[k], MetaRowByteC[k],
meta_row_height[k],
meta_row_height_chroma[k],
- st_vars->PixelPTEBytesPerRowY[k],
- st_vars->PixelPTEBytesPerRowC[k],
+ PixelPTEBytesPerRowY[k],
+ PixelPTEBytesPerRowC[k],
dpte_row_height_luma[k],
dpte_row_height_chroma[k],
@@ -2189,12 +2236,12 @@ void dml32_CalculateVMRowAndSwath(
dml_print("DML::%s: k=%d, dpte_row_height_luma = %d\n", __func__, k, dpte_row_height_luma[k]);
dml_print("DML::%s: k=%d, dpte_row_width_luma_ub = %d\n",
__func__, k, dpte_row_width_luma_ub[k]);
- dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY = %d\n", __func__, k, st_vars->PixelPTEBytesPerRowY[k]);
+ dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY = %d\n", __func__, k, PixelPTEBytesPerRowY[k]);
dml_print("DML::%s: k=%d, dpte_row_height_chroma = %d\n",
__func__, k, dpte_row_height_chroma[k]);
dml_print("DML::%s: k=%d, dpte_row_width_chroma_ub = %d\n",
__func__, k, dpte_row_width_chroma_ub[k]);
- dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC = %d\n", __func__, k, st_vars->PixelPTEBytesPerRowC[k]);
+ dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC = %d\n", __func__, k, PixelPTEBytesPerRowC[k]);
dml_print("DML::%s: k=%d, PixelPTEBytesPerRow = %d\n", __func__, k, PixelPTEBytesPerRow[k]);
dml_print("DML::%s: k=%d, PTEBufferSizeNotExceeded = %d\n",
__func__, k, PTEBufferSizeNotExceeded[k]);
@@ -3342,29 +3389,14 @@ double dml32_CalculateExtraLatency(
} // CalculateExtraLatency
bool dml32_CalculatePrefetchSchedule(
- struct dml32_CalculatePrefetchSchedule *st_vars,
+ struct vba_vars_st *v,
+ unsigned int k,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -3405,72 +3437,100 @@ bool dml32_CalculatePrefetchSchedule(
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
+ double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
bool MyError = false;
-
- st_vars->TimeForFetchingMetaPTE = 0;
- st_vars->TimeForFetchingRowInVBlank = 0;
- st_vars->LinesToRequestPrefetchPixelData = 0;
- st_vars->max_vratio_pre = __DML_MAX_VRATIO_PRE__;
- st_vars->Tsw_est1 = 0;
- st_vars->Tsw_est3 = 0;
-
- if (GPUVMEnable == true && HostVMEnable == true)
- st_vars->HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ unsigned int DPPCycles, DISPCLKCycles;
+ double DSTTotalPixelsAfterScaler;
+ double LineTime;
+ double dst_y_prefetch_equ;
+ double prefetch_bw_oto;
+ double Tvm_oto;
+ double Tr0_oto;
+ double Tvm_oto_lines;
+ double Tr0_oto_lines;
+ double dst_y_prefetch_oto;
+ double TimeForFetchingMetaPTE = 0;
+ double TimeForFetchingRowInVBlank = 0;
+ double LinesToRequestPrefetchPixelData = 0;
+ unsigned int HostVMDynamicLevelsTrips;
+ double trip_to_mem;
+ double Tvm_trips;
+ double Tr0_trips;
+ double Tvm_trips_rounded;
+ double Tr0_trips_rounded;
+ double Lsw_oto;
+ double Tpre_rounded;
+ double prefetch_bw_equ;
+ double Tvm_equ;
+ double Tr0_equ;
+ double Tdmbf;
+ double Tdmec;
+ double Tdmsks;
+ double prefetch_sw_bytes;
+ double bytes_pp;
+ double dep_bytes;
+ unsigned int max_vratio_pre = __DML_MAX_VRATIO_PRE__;
+ double min_Lsw;
+ double Tsw_est1 = 0;
+ double Tsw_est3 = 0;
+
+ if (v->GPUVMEnable == true && v->HostVMEnable == true)
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
else
- st_vars->HostVMDynamicLevelsTrips = 0;
+ HostVMDynamicLevelsTrips = 0;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
- dml_print("DML::%s: GPUVMPageTableLevels = %d\n", __func__, GPUVMPageTableLevels);
+ dml_print("DML::%s: v->GPUVMEnable = %d\n", __func__, v->GPUVMEnable);
+ dml_print("DML::%s: v->GPUVMMaxPageTableLevels = %d\n", __func__, v->GPUVMMaxPageTableLevels);
dml_print("DML::%s: DCCEnable = %d\n", __func__, myPipe->DCCEnable);
- dml_print("DML::%s: HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
- __func__, HostVMEnable, HostVMInefficiencyFactor);
+ dml_print("DML::%s: v->HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
+ __func__, v->HostVMEnable, HostVMInefficiencyFactor);
#endif
dml32_CalculateVUpdateAndDynamicMetadataParameters(
- MaxInterDCNTileRepeaters,
+ v->MaxInterDCNTileRepeaters,
myPipe->Dppclk,
myPipe->Dispclk,
myPipe->DCFClkDeepSleep,
myPipe->PixelClock,
myPipe->HTotal,
myPipe->VBlank,
- DynamicMetadataTransmittedBytes,
- DynamicMetadataLinesBeforeActiveRequired,
+ v->DynamicMetadataTransmittedBytes[k],
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
myPipe->InterlaceEnable,
myPipe->ProgressiveToInterlaceUnitInOPP,
TSetup,
/* output */
- &st_vars->Tdmbf,
- &st_vars->Tdmec,
- &st_vars->Tdmsks,
+ &Tdmbf,
+ &Tdmec,
+ &Tdmsks,
VUpdateOffsetPix,
VUpdateWidthPix,
VReadyOffsetPix);
- st_vars->LineTime = myPipe->HTotal / myPipe->PixelClock;
- st_vars->trip_to_mem = UrgentLatency;
- st_vars->Tvm_trips = UrgentExtraLatency + st_vars->trip_to_mem * (GPUVMPageTableLevels * (st_vars->HostVMDynamicLevelsTrips + 1) - 1);
+ LineTime = myPipe->HTotal / myPipe->PixelClock;
+ trip_to_mem = UrgentLatency;
+ Tvm_trips = UrgentExtraLatency + trip_to_mem * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
- if (DynamicMetadataVMEnabled == true)
- *Tdmdl = TWait + st_vars->Tvm_trips + st_vars->trip_to_mem;
+ if (v->DynamicMetadataVMEnabled == true)
+ *Tdmdl = TWait + Tvm_trips + trip_to_mem;
else
*Tdmdl = TWait + UrgentExtraLatency;
#ifdef __DML_VBA_ALLOW_DELTA__
- if (DynamicMetadataEnable == false)
+ if (v->DynamicMetadataEnable[k] == false)
*Tdmdl = 0.0;
#endif
- if (DynamicMetadataEnable == true) {
- if (VStartup * st_vars->LineTime < *TSetup + *Tdmdl + st_vars->Tdmbf + st_vars->Tdmec + st_vars->Tdmsks) {
+ if (v->DynamicMetadataEnable[k] == true) {
+ if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n",
- __func__, st_vars->Tdmbf);
- dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, st_vars->Tdmec);
+ __func__, Tdmbf);
+ dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n",
- __func__, st_vars->Tdmsks);
+ __func__, Tdmsks);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n",
__func__, *Tdmdl);
#endif
@@ -3481,22 +3541,22 @@ bool dml32_CalculatePrefetchSchedule(
*NotEnoughTimeForDynamicMetadata = false;
}
- *Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true &&
- GPUVMEnable == true ? TWait + st_vars->Tvm_trips : 0);
+ *Tdmdl_vm = (v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true &&
+ v->GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
- st_vars->DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCL;
else
- st_vars->DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCLLBOnly;
- st_vars->DPPCycles = st_vars->DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+ DPPCycles = DPPCycles + myPipe->NumberOfCursors * v->DPPCLKDelayCNVCCursor;
- st_vars->DISPCLKCycles = DISPCLKDelaySubtotal;
+ DISPCLKCycles = v->DISPCLKDelaySubtotal;
if (myPipe->Dppclk == 0.0 || myPipe->Dispclk == 0.0)
return true;
- *DSTXAfterScaler = st_vars->DPPCycles * myPipe->PixelClock / myPipe->Dppclk + st_vars->DISPCLKCycles *
+ *DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->Dppclk + DISPCLKCycles *
myPipe->PixelClock / myPipe->Dispclk + DSCDelay;
*DSTXAfterScaler = *DSTXAfterScaler + (myPipe->ODMMode != dm_odm_combine_mode_disabled ? 18 : 0)
@@ -3506,10 +3566,10 @@ bool dml32_CalculatePrefetchSchedule(
+ ((myPipe->ODMMode == dm_odm_mode_mso_1to4) ? myPipe->HActive * 3 / 4 : 0);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DPPCycles: %d\n", __func__, st_vars->DPPCycles);
+ dml_print("DML::%s: DPPCycles: %d\n", __func__, DPPCycles);
dml_print("DML::%s: PixelClock: %f\n", __func__, myPipe->PixelClock);
dml_print("DML::%s: Dppclk: %f\n", __func__, myPipe->Dppclk);
- dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, st_vars->DISPCLKCycles);
+ dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, DISPCLKCycles);
dml_print("DML::%s: DISPCLK: %f\n", __func__, myPipe->Dispclk);
dml_print("DML::%s: DSCDelay: %d\n", __func__, DSCDelay);
dml_print("DML::%s: ODMMode: %d\n", __func__, myPipe->ODMMode);
@@ -3517,14 +3577,14 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: DSTXAfterScaler: %d\n", __func__, *DSTXAfterScaler);
#endif
- if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
+ if (v->OutputFormat[k] == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
*DSTYAfterScaler = 1;
else
*DSTYAfterScaler = 0;
- st_vars->DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
- *DSTYAfterScaler = dml_floor(st_vars->DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
- *DSTXAfterScaler = st_vars->DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
+ DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
+ *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
+ *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DSTXAfterScaler: %d (final)\n", __func__, *DSTXAfterScaler);
dml_print("DML::%s: DSTYAfterScaler: %d (final)\n", __func__, *DSTYAfterScaler);
@@ -3532,132 +3592,132 @@ bool dml32_CalculatePrefetchSchedule(
MyError = false;
- st_vars->Tr0_trips = st_vars->trip_to_mem * (st_vars->HostVMDynamicLevelsTrips + 1);
-
- if (GPUVMEnable == true) {
- st_vars->Tvm_trips_rounded = dml_ceil(4.0 * st_vars->Tvm_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * st_vars->Tr0_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
- if (GPUVMPageTableLevels >= 3) {
- *Tno_bw = UrgentExtraLatency + st_vars->trip_to_mem *
- (double) ((GPUVMPageTableLevels - 2) * (st_vars->HostVMDynamicLevelsTrips + 1) - 1);
- } else if (GPUVMPageTableLevels == 1 && myPipe->DCCEnable != true) {
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / st_vars->LineTime, 1.0) /
- 4.0 * st_vars->LineTime; // VBA_ERROR
+ Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
+
+ if (v->GPUVMEnable == true) {
+ Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1.0) / 4.0 * LineTime;
+ Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
+ if (v->GPUVMMaxPageTableLevels >= 3) {
+ *Tno_bw = UrgentExtraLatency + trip_to_mem *
+ (double) ((v->GPUVMMaxPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
+ } else if (v->GPUVMMaxPageTableLevels == 1 && myPipe->DCCEnable != true) {
+ Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / LineTime, 1.0) /
+ 4.0 * LineTime; // VBA_ERROR
*Tno_bw = UrgentExtraLatency;
} else {
*Tno_bw = 0;
}
} else if (myPipe->DCCEnable == true) {
- st_vars->Tvm_trips_rounded = st_vars->LineTime / 4.0;
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * st_vars->Tr0_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
+ Tvm_trips_rounded = LineTime / 4.0;
+ Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
*Tno_bw = 0;
} else {
- st_vars->Tvm_trips_rounded = st_vars->LineTime / 4.0;
- st_vars->Tr0_trips_rounded = st_vars->LineTime / 2.0;
+ Tvm_trips_rounded = LineTime / 4.0;
+ Tr0_trips_rounded = LineTime / 2.0;
*Tno_bw = 0;
}
- st_vars->Tvm_trips_rounded = dml_max(st_vars->Tvm_trips_rounded, st_vars->LineTime / 4.0);
- st_vars->Tr0_trips_rounded = dml_max(st_vars->Tr0_trips_rounded, st_vars->LineTime / 4.0);
+ Tvm_trips_rounded = dml_max(Tvm_trips_rounded, LineTime / 4.0);
+ Tr0_trips_rounded = dml_max(Tr0_trips_rounded, LineTime / 4.0);
if (myPipe->SourcePixelFormat == dm_420_8 || myPipe->SourcePixelFormat == dm_420_10
|| myPipe->SourcePixelFormat == dm_420_12) {
- st_vars->bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
+ bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
} else {
- st_vars->bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
+ bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
}
- st_vars->prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
+ prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
+ PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
- st_vars->prefetch_bw_oto = dml_max(st_vars->bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
- st_vars->prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * st_vars->LineTime));
-
- st_vars->min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / st_vars->max_vratio_pre;
- st_vars->min_Lsw = dml_max(st_vars->min_Lsw, 1.0);
- st_vars->Lsw_oto = dml_ceil(4.0 * dml_max(st_vars->prefetch_sw_bytes / st_vars->prefetch_bw_oto / st_vars->LineTime, st_vars->min_Lsw), 1.0) / 4.0;
-
- if (GPUVMEnable == true) {
- st_vars->Tvm_oto = dml_max3(
- st_vars->Tvm_trips,
- *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / st_vars->prefetch_bw_oto,
- st_vars->LineTime / 4.0);
+ prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
+ prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
+
+ min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre;
+ min_Lsw = dml_max(min_Lsw, 1.0);
+ Lsw_oto = dml_ceil(4.0 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1.0) / 4.0;
+
+ if (v->GPUVMEnable == true) {
+ Tvm_oto = dml_max3(
+ Tvm_trips,
+ *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
+ LineTime / 4.0);
} else
- st_vars->Tvm_oto = st_vars->LineTime / 4.0;
-
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
- st_vars->Tr0_oto = dml_max4(
- st_vars->Tr0_trips,
- (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / st_vars->prefetch_bw_oto,
- (st_vars->LineTime - st_vars->Tvm_oto)/2.0,
- st_vars->LineTime / 4.0);
+ Tvm_oto = LineTime / 4.0;
+
+ if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ Tr0_oto = dml_max4(
+ Tr0_trips,
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
+ (LineTime - Tvm_oto)/2.0,
+ LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Tr0_oto max0 = %f\n", __func__,
- (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / st_vars->prefetch_bw_oto);
- dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, st_vars->Tr0_trips);
- dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, st_vars->LineTime - st_vars->Tvm_oto);
- dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, st_vars->LineTime / 4);
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto);
+ dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, Tr0_trips);
+ dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, LineTime - Tvm_oto);
+ dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, LineTime / 4);
#endif
} else
- st_vars->Tr0_oto = (st_vars->LineTime - st_vars->Tvm_oto) / 2.0;
+ Tr0_oto = (LineTime - Tvm_oto) / 2.0;
- st_vars->Tvm_oto_lines = dml_ceil(4.0 * st_vars->Tvm_oto / st_vars->LineTime, 1) / 4.0;
- st_vars->Tr0_oto_lines = dml_ceil(4.0 * st_vars->Tr0_oto / st_vars->LineTime, 1) / 4.0;
- st_vars->dst_y_prefetch_oto = st_vars->Tvm_oto_lines + 2 * st_vars->Tr0_oto_lines + st_vars->Lsw_oto;
+ Tvm_oto_lines = dml_ceil(4.0 * Tvm_oto / LineTime, 1) / 4.0;
+ Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
+ dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
- st_vars->dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / st_vars->LineTime -
+ dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime -
(*DSTYAfterScaler + (double) *DSTXAfterScaler / (double) myPipe->HTotal);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: HTotal = %d\n", __func__, myPipe->HTotal);
- dml_print("DML::%s: min_Lsw = %f\n", __func__, st_vars->min_Lsw);
+ dml_print("DML::%s: min_Lsw = %f\n", __func__, min_Lsw);
dml_print("DML::%s: *Tno_bw = %f\n", __func__, *Tno_bw);
dml_print("DML::%s: UrgentExtraLatency = %f\n", __func__, UrgentExtraLatency);
- dml_print("DML::%s: trip_to_mem = %f\n", __func__, st_vars->trip_to_mem);
+ dml_print("DML::%s: trip_to_mem = %f\n", __func__, trip_to_mem);
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
dml_print("DML::%s: BytePerPixelC = %d\n", __func__, myPipe->BytePerPixelC);
dml_print("DML::%s: PrefetchSourceLinesC = %f\n", __func__, PrefetchSourceLinesC);
dml_print("DML::%s: swath_width_chroma_ub = %d\n", __func__, swath_width_chroma_ub);
- dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, st_vars->prefetch_sw_bytes);
- dml_print("DML::%s: bytes_pp = %f\n", __func__, st_vars->bytes_pp);
+ dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, prefetch_sw_bytes);
+ dml_print("DML::%s: bytes_pp = %f\n", __func__, bytes_pp);
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
dml_print("DML::%s: PixelPTEBytesPerRow = %d\n", __func__, PixelPTEBytesPerRow);
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
- dml_print("DML::%s: Tvm_trips = %f\n", __func__, st_vars->Tvm_trips);
- dml_print("DML::%s: Tr0_trips = %f\n", __func__, st_vars->Tr0_trips);
- dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, st_vars->prefetch_bw_oto);
- dml_print("DML::%s: Tr0_oto = %f\n", __func__, st_vars->Tr0_oto);
- dml_print("DML::%s: Tvm_oto = %f\n", __func__, st_vars->Tvm_oto);
- dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, st_vars->Tvm_oto_lines);
- dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, st_vars->Tr0_oto_lines);
- dml_print("DML::%s: Lsw_oto = %f\n", __func__, st_vars->Lsw_oto);
- dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, st_vars->dst_y_prefetch_oto);
- dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, st_vars->dst_y_prefetch_equ);
+ dml_print("DML::%s: Tvm_trips = %f\n", __func__, Tvm_trips);
+ dml_print("DML::%s: Tr0_trips = %f\n", __func__, Tr0_trips);
+ dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, prefetch_bw_oto);
+ dml_print("DML::%s: Tr0_oto = %f\n", __func__, Tr0_oto);
+ dml_print("DML::%s: Tvm_oto = %f\n", __func__, Tvm_oto);
+ dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, Tvm_oto_lines);
+ dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, Tr0_oto_lines);
+ dml_print("DML::%s: Lsw_oto = %f\n", __func__, Lsw_oto);
+ dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, dst_y_prefetch_oto);
+ dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, dst_y_prefetch_equ);
#endif
- st_vars->dst_y_prefetch_equ = dml_floor(4.0 * (st_vars->dst_y_prefetch_equ + 0.125), 1) / 4.0;
- st_vars->Tpre_rounded = st_vars->dst_y_prefetch_equ * st_vars->LineTime;
+ dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
+ Tpre_rounded = dst_y_prefetch_equ * LineTime;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, st_vars->dst_y_prefetch_equ);
- dml_print("DML::%s: LineTime: %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, dst_y_prefetch_equ);
+ dml_print("DML::%s: LineTime: %f\n", __func__, LineTime);
dml_print("DML::%s: VStartup: %d\n", __func__, VStartup);
dml_print("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n",
- __func__, VStartup * st_vars->LineTime);
+ __func__, VStartup * LineTime);
dml_print("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *TSetup);
dml_print("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, TCalc);
- dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, st_vars->Tdmbf);
- dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, st_vars->Tdmec);
+ dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, Tdmbf);
+ dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd\n", __func__, *Tdmdl_vm);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n", __func__, *Tdmdl);
dml_print("DML::%s: DSTYAfterScaler: %d lines - number of lines of pipeline and buffer delay after scaler\n",
__func__, *DSTYAfterScaler);
#endif
- st_vars->dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
+ dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor);
- if (st_vars->prefetch_sw_bytes < st_vars->dep_bytes)
- st_vars->prefetch_sw_bytes = 2 * st_vars->dep_bytes;
+ if (prefetch_sw_bytes < dep_bytes)
+ prefetch_sw_bytes = 2 * dep_bytes;
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
@@ -3665,61 +3725,61 @@ bool dml32_CalculatePrefetchSchedule(
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
- if (st_vars->dst_y_prefetch_equ > 1) {
+ if (dst_y_prefetch_equ > 1) {
double PrefetchBandwidth1;
double PrefetchBandwidth2;
double PrefetchBandwidth3;
double PrefetchBandwidth4;
- if (st_vars->Tpre_rounded - *Tno_bw > 0) {
+ if (Tpre_rounded - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
- + st_vars->prefetch_sw_bytes) / (st_vars->Tpre_rounded - *Tno_bw);
- st_vars->Tsw_est1 = st_vars->prefetch_sw_bytes / PrefetchBandwidth1;
+ + prefetch_sw_bytes) / (Tpre_rounded - *Tno_bw);
+ Tsw_est1 = prefetch_sw_bytes / PrefetchBandwidth1;
} else
PrefetchBandwidth1 = 0;
- if (VStartup == MaxVStartup && (st_vars->Tsw_est1 / st_vars->LineTime < st_vars->min_Lsw)
- && st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - *Tno_bw > 0) {
+ if (VStartup == MaxVStartup && (Tsw_est1 / LineTime < min_Lsw)
+ && Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / (st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - *Tno_bw);
+ / (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw);
}
- if (st_vars->Tpre_rounded - *Tno_bw - 2 * st_vars->Tr0_trips_rounded > 0)
- PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + st_vars->prefetch_sw_bytes) /
- (st_vars->Tpre_rounded - *Tno_bw - 2 * st_vars->Tr0_trips_rounded);
+ if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
+ PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + prefetch_sw_bytes) /
+ (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
- if (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded > 0) {
+ if (Tpre_rounded - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
- + st_vars->prefetch_sw_bytes) / (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded);
- st_vars->Tsw_est3 = st_vars->prefetch_sw_bytes / PrefetchBandwidth3;
+ + prefetch_sw_bytes) / (Tpre_rounded - Tvm_trips_rounded);
+ Tsw_est3 = prefetch_sw_bytes / PrefetchBandwidth3;
} else
PrefetchBandwidth3 = 0;
if (VStartup == MaxVStartup &&
- (st_vars->Tsw_est3 / st_vars->LineTime < st_vars->min_Lsw) && st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 *
- st_vars->LineTime - st_vars->Tvm_trips_rounded > 0) {
+ (Tsw_est3 / LineTime < min_Lsw) && Tpre_rounded - min_Lsw * LineTime - 0.75 *
+ LineTime - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / (st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - st_vars->Tvm_trips_rounded);
+ / (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - Tvm_trips_rounded);
}
- if (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded - 2 * st_vars->Tr0_trips_rounded > 0) {
- PrefetchBandwidth4 = st_vars->prefetch_sw_bytes /
- (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded - 2 * st_vars->Tr0_trips_rounded);
+ if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0) {
+ PrefetchBandwidth4 = prefetch_sw_bytes /
+ (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
} else {
PrefetchBandwidth4 = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: Tpre_rounded: %f\n", __func__, st_vars->Tpre_rounded);
+ dml_print("DML::%s: Tpre_rounded: %f\n", __func__, Tpre_rounded);
dml_print("DML::%s: Tno_bw: %f\n", __func__, *Tno_bw);
- dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, st_vars->Tvm_trips_rounded);
- dml_print("DML::%s: Tsw_est1: %f\n", __func__, st_vars->Tsw_est1);
- dml_print("DML::%s: Tsw_est3: %f\n", __func__, st_vars->Tsw_est3);
+ dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, Tvm_trips_rounded);
+ dml_print("DML::%s: Tsw_est1: %f\n", __func__, Tsw_est1);
+ dml_print("DML::%s: Tsw_est3: %f\n", __func__, Tsw_est3);
dml_print("DML::%s: PrefetchBandwidth1: %f\n", __func__, PrefetchBandwidth1);
dml_print("DML::%s: PrefetchBandwidth2: %f\n", __func__, PrefetchBandwidth2);
dml_print("DML::%s: PrefetchBandwidth3: %f\n", __func__, PrefetchBandwidth3);
@@ -3732,9 +3792,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth1 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
- >= st_vars->Tvm_trips_rounded
+ >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / PrefetchBandwidth1 >= st_vars->Tr0_trips_rounded) {
+ / PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
Case1OK = false;
@@ -3745,9 +3805,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth2 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
- >= st_vars->Tvm_trips_rounded
+ >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / PrefetchBandwidth2 < st_vars->Tr0_trips_rounded) {
+ / PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
Case2OK = false;
@@ -3758,9 +3818,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth3 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3 <
- st_vars->Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
+ Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
HostVMInefficiencyFactor) / PrefetchBandwidth3 >=
- st_vars->Tr0_trips_rounded) {
+ Tr0_trips_rounded) {
Case3OK = true;
} else {
Case3OK = false;
@@ -3770,80 +3830,80 @@ bool dml32_CalculatePrefetchSchedule(
}
if (Case1OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth1;
+ prefetch_bw_equ = PrefetchBandwidth1;
else if (Case2OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth2;
+ prefetch_bw_equ = PrefetchBandwidth2;
else if (Case3OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth3;
+ prefetch_bw_equ = PrefetchBandwidth3;
else
- st_vars->prefetch_bw_equ = PrefetchBandwidth4;
+ prefetch_bw_equ = PrefetchBandwidth4;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Case1OK: %d\n", __func__, Case1OK);
dml_print("DML::%s: Case2OK: %d\n", __func__, Case2OK);
dml_print("DML::%s: Case3OK: %d\n", __func__, Case3OK);
- dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, st_vars->prefetch_bw_equ);
+ dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, prefetch_bw_equ);
#endif
- if (st_vars->prefetch_bw_equ > 0) {
- if (GPUVMEnable == true) {
- st_vars->Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
- HostVMInefficiencyFactor / st_vars->prefetch_bw_equ,
- st_vars->Tvm_trips, st_vars->LineTime / 4);
+ if (prefetch_bw_equ > 0) {
+ if (v->GPUVMEnable == true) {
+ Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
+ HostVMInefficiencyFactor / prefetch_bw_equ,
+ Tvm_trips, LineTime / 4);
} else {
- st_vars->Tvm_equ = st_vars->LineTime / 4;
+ Tvm_equ = LineTime / 4;
}
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
- st_vars->Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
- HostVMInefficiencyFactor) / st_vars->prefetch_bw_equ, st_vars->Tr0_trips,
- (st_vars->LineTime - st_vars->Tvm_equ) / 2, st_vars->LineTime / 4);
+ if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
+ HostVMInefficiencyFactor) / prefetch_bw_equ, Tr0_trips,
+ (LineTime - Tvm_equ) / 2, LineTime / 4);
} else {
- st_vars->Tr0_equ = (st_vars->LineTime - st_vars->Tvm_equ) / 2;
+ Tr0_equ = (LineTime - Tvm_equ) / 2;
}
} else {
- st_vars->Tvm_equ = 0;
- st_vars->Tr0_equ = 0;
+ Tvm_equ = 0;
+ Tr0_equ = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML: prefetch_bw_equ equals 0! %s:%d\n", __FILE__, __LINE__);
#endif
}
}
- if (st_vars->dst_y_prefetch_oto < st_vars->dst_y_prefetch_equ) {
- *DestinationLinesForPrefetch = st_vars->dst_y_prefetch_oto;
- st_vars->TimeForFetchingMetaPTE = st_vars->Tvm_oto;
- st_vars->TimeForFetchingRowInVBlank = st_vars->Tr0_oto;
- *PrefetchBandwidth = st_vars->prefetch_bw_oto;
+ if (dst_y_prefetch_oto < dst_y_prefetch_equ) {
+ *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ TimeForFetchingMetaPTE = Tvm_oto;
+ TimeForFetchingRowInVBlank = Tr0_oto;
+ *PrefetchBandwidth = prefetch_bw_oto;
} else {
- *DestinationLinesForPrefetch = st_vars->dst_y_prefetch_equ;
- st_vars->TimeForFetchingMetaPTE = st_vars->Tvm_equ;
- st_vars->TimeForFetchingRowInVBlank = st_vars->Tr0_equ;
- *PrefetchBandwidth = st_vars->prefetch_bw_equ;
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ TimeForFetchingMetaPTE = Tvm_equ;
+ TimeForFetchingRowInVBlank = Tr0_equ;
+ *PrefetchBandwidth = prefetch_bw_equ;
}
- *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * st_vars->TimeForFetchingMetaPTE / st_vars->LineTime, 1.0) / 4.0;
+ *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
*DestinationLinesToRequestRowInVBlank =
- dml_ceil(4.0 * st_vars->TimeForFetchingRowInVBlank / st_vars->LineTime, 1.0) / 4.0;
+ dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
- st_vars->LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
+ LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
*DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DestinationLinesForPrefetch = %f\n", __func__, *DestinationLinesForPrefetch);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
__func__, *DestinationLinesToRequestVMInVBlank);
- dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, st_vars->TimeForFetchingRowInVBlank);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, TimeForFetchingRowInVBlank);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: DestinationLinesToRequestRowInVBlank = %f\n",
__func__, *DestinationLinesToRequestRowInVBlank);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
- dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, st_vars->LinesToRequestPrefetchPixelData);
+ dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, LinesToRequestPrefetchPixelData);
#endif
- if (st_vars->LinesToRequestPrefetchPixelData >= 1 && st_vars->prefetch_bw_equ > 0) {
- *VRatioPrefetchY = (double) PrefetchSourceLinesY / st_vars->LinesToRequestPrefetchPixelData;
+ if (LinesToRequestPrefetchPixelData >= 1 && prefetch_bw_equ > 0) {
+ *VRatioPrefetchY = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData;
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchY = %f\n", __func__, *VRatioPrefetchY);
@@ -3851,12 +3911,12 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: VInitPreFillY = %d\n", __func__, VInitPreFillY);
#endif
if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
- if (st_vars->LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
*VRatioPrefetchY =
dml_max((double) PrefetchSourceLinesY /
- st_vars->LinesToRequestPrefetchPixelData,
+ LinesToRequestPrefetchPixelData,
(double) MaxNumSwathY * SwathHeightY /
- (st_vars->LinesToRequestPrefetchPixelData -
+ (LinesToRequestPrefetchPixelData -
(VInitPreFillY - 3.0) / 2.0));
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
} else {
@@ -3870,7 +3930,7 @@ bool dml32_CalculatePrefetchSchedule(
#endif
}
- *VRatioPrefetchC = (double) PrefetchSourceLinesC / st_vars->LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchC = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData;
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
#ifdef __DML_VBA_DEBUG__
@@ -3879,11 +3939,11 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: VInitPreFillC = %d\n", __func__, VInitPreFillC);
#endif
if ((SwathHeightC > 4)) {
- if (st_vars->LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
*VRatioPrefetchC =
dml_max(*VRatioPrefetchC,
(double) MaxNumSwathC * SwathHeightC /
- (st_vars->LinesToRequestPrefetchPixelData -
+ (LinesToRequestPrefetchPixelData -
(VInitPreFillC - 3.0) / 2.0));
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
} else {
@@ -3898,25 +3958,25 @@ bool dml32_CalculatePrefetchSchedule(
}
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY
- / st_vars->LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
- / st_vars->LineTime;
+ / LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
+ / LineTime;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: RequiredPrefetchPixDataBWLuma = %f\n",
__func__, *RequiredPrefetchPixDataBWLuma);
#endif
*RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC /
- st_vars->LinesToRequestPrefetchPixelData
+ LinesToRequestPrefetchPixelData
* myPipe->BytePerPixelC
- * swath_width_chroma_ub / st_vars->LineTime;
+ * swath_width_chroma_ub / LineTime;
} else {
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML:%s: MyErr set. LinesToRequestPrefetchPixelData: %f, should be > 0\n",
- __func__, st_vars->LinesToRequestPrefetchPixelData);
+ __func__, LinesToRequestPrefetchPixelData);
#endif
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
@@ -3925,15 +3985,15 @@ bool dml32_CalculatePrefetchSchedule(
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML: Tpre: %fus - sum of time to request meta pte, 2 x data pte + meta data, swaths\n",
- (double)st_vars->LinesToRequestPrefetchPixelData * st_vars->LineTime +
- 2.0*st_vars->TimeForFetchingRowInVBlank + st_vars->TimeForFetchingMetaPTE);
- dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", st_vars->TimeForFetchingMetaPTE);
+ (double)LinesToRequestPrefetchPixelData * LineTime +
+ 2.0*TimeForFetchingRowInVBlank + TimeForFetchingMetaPTE);
+ dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", TimeForFetchingMetaPTE);
dml_print("DML: To: %fus - time for propagation from scaler to optc\n",
- (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * st_vars->LineTime);
+ (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - TSetup - Tcalc - Twait - Tpre - To > 0\n");
- dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * st_vars->LineTime -
- st_vars->TimeForFetchingMetaPTE - 2*st_vars->TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
- ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * st_vars->LineTime - TWait - TCalc - *TSetup);
+ dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime -
+ TimeForFetchingMetaPTE - 2*TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
+ ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - *TSetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n",
PixelPTEBytesPerRow);
#endif
@@ -3941,7 +4001,7 @@ bool dml32_CalculatePrefetchSchedule(
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MyErr set, dst_y_prefetch_equ = %f (should be > 1)\n",
- __func__, st_vars->dst_y_prefetch_equ);
+ __func__, dst_y_prefetch_equ);
#endif
}
@@ -3957,10 +4017,10 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
__func__, *DestinationLinesToRequestVMInVBlank);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
prefetch_vm_bw = PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor /
- (*DestinationLinesToRequestVMInVBlank * st_vars->LineTime);
+ (*DestinationLinesToRequestVMInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
@@ -3977,7 +4037,7 @@ bool dml32_CalculatePrefetchSchedule(
prefetch_row_bw = 0;
} else if (*DestinationLinesToRequestRowInVBlank > 0) {
prefetch_row_bw = (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) /
- (*DestinationLinesToRequestRowInVBlank * st_vars->LineTime);
+ (*DestinationLinesToRequestRowInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
@@ -4000,12 +4060,12 @@ bool dml32_CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- st_vars->TimeForFetchingMetaPTE = 0;
- st_vars->TimeForFetchingRowInVBlank = 0;
+ TimeForFetchingMetaPTE = 0;
+ TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- st_vars->LinesToRequestPrefetchPixelData = 0;
+ LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
@@ -4159,59 +4219,28 @@ void dml32_CalculateFlipSchedule(
} // CalculateFlipSchedule
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport *st_vars,
- bool USRRetrainingRequiredFinal,
- enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
+ struct vba_vars_st *v,
unsigned int PrefetchMode,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizeTimingsFinal,
- bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- bool DRRDisplay[],
- unsigned int dpte_group_bytes[],
- unsigned int meta_row_height[],
- unsigned int meta_row_height_chroma[],
SOCParametersList mmSOCParameters,
- unsigned int WritebackChunkSize,
double SOCCLK,
double DCFClkDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int VTaps[],
- unsigned int VTapsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- unsigned int VTotal[],
- unsigned int VActive[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerSurface[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
/* Output */
- Watermarks *Watermark,
enum clock_change_support *DRAMClockChangeSupport,
double MaxActiveDRAMClockChangeLatencySupported[],
unsigned int SubViewportLinesNeededInMALL[],
@@ -4221,229 +4250,251 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
double ActiveDRAMClockChangeLatencyMargin[])
{
unsigned int i, j, k;
-
- st_vars->SurfaceWithMinActiveFCLKChangeMargin = 0;
- st_vars->DRAMClockChangeSupportNumber = 0;
- st_vars->DRAMClockChangeMethod = 0;
- st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
- st_vars->MinActiveFCLKChangeMargin = 0.;
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
- st_vars->TotalPixelBW = 0.0;
- st_vars->TotalActiveWriteback = 0;
-
- Watermark->UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
- Watermark->USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
+ unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0;
+ unsigned int DRAMClockChangeSupportNumber = 0;
+ unsigned int LastSurfaceWithoutMargin;
+ unsigned int DRAMClockChangeMethod = 0;
+ bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
+ double MinActiveFCLKChangeMargin = 0.;
+ double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
+ double ActiveClockChangeLatencyHidingY;
+ double ActiveClockChangeLatencyHidingC;
+ double ActiveClockChangeLatencyHiding;
+ double EffectiveDETBufferSizeY;
+ double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
+ double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
+ double TotalPixelBW = 0.0;
+ bool SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
+ double EffectiveLBLatencyHidingY;
+ double EffectiveLBLatencyHidingC;
+ double LinesInDETY[DC__NUM_DPP__MAX];
+ double LinesInDETC[DC__NUM_DPP__MAX];
+ unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
+ unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
+ double FullDETBufferingTimeY;
+ double FullDETBufferingTimeC;
+ double WritebackDRAMClockChangeLatencyMargin;
+ double WritebackFCLKChangeLatencyMargin;
+ double WritebackLatencyHiding;
+ bool SameTimingForFCLKChange;
+
+ unsigned int TotalActiveWriteback = 0;
+ unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
+ unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
+
+ v->Watermark.UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
+ v->Watermark.USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
+ mmSOCParameters.USRRetrainingLatency + mmSOCParameters.SMNLatency;
- Watermark->DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + Watermark->UrgentWatermark;
- Watermark->FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + Watermark->UrgentWatermark;
- Watermark->StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
+ v->Watermark.DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + v->Watermark.UrgentWatermark;
+ v->Watermark.FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + v->Watermark.UrgentWatermark;
+ v->Watermark.StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
+ v->Watermark.StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
+ v->Watermark.Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
+ v->Watermark.Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
+ mmSOCParameters.ExtraLatency + 10 / DCFClkDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, mmSOCParameters.UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, mmSOCParameters.ExtraLatency);
dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, mmSOCParameters.DRAMClockChangeLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, Watermark->UrgentWatermark);
- dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, Watermark->USRRetrainingWatermark);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, Watermark->DRAMClockChangeWatermark);
- dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, Watermark->FCLKChangeWatermark);
- dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, Watermark->StutterExitWatermark);
- dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, Watermark->StutterEnterPlusExitWatermark);
- dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, Watermark->Z8StutterExitWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->Watermark.UrgentWatermark);
+ dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, v->Watermark.USRRetrainingWatermark);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->Watermark.DRAMClockChangeWatermark);
+ dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, v->Watermark.FCLKChangeWatermark);
+ dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, v->Watermark.StutterExitWatermark);
+ dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, v->Watermark.StutterEnterPlusExitWatermark);
+ dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, v->Watermark.Z8StutterExitWatermark);
dml_print("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n",
- __func__, Watermark->Z8StutterEnterPlusExitWatermark);
+ __func__, v->Watermark.Z8StutterEnterPlusExitWatermark);
#endif
- st_vars->TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (WritebackEnable[k] == true)
- st_vars->TotalActiveWriteback = st_vars->TotalActiveWriteback + 1;
+ TotalActiveWriteback = 0;
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->WritebackEnable[k] == true)
+ TotalActiveWriteback = TotalActiveWriteback + 1;
}
- if (st_vars->TotalActiveWriteback <= 1) {
- Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
+ if (TotalActiveWriteback <= 1) {
+ v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
} else {
- Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
- + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
+ + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackUrgentWatermark = Watermark->WritebackUrgentWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackUrgentWatermark = v->Watermark.WritebackUrgentWatermark
+ mmSOCParameters.USRRetrainingLatency;
- if (st_vars->TotalActiveWriteback <= 1) {
- Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ if (TotalActiveWriteback <= 1) {
+ v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ mmSOCParameters.WritebackLatency;
- Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ mmSOCParameters.WritebackLatency;
} else {
- Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
- + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
- Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
- + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024 / 32 / SOCCLK;
+ v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024 / 32 / SOCCLK;
}
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackDRAMClockChangeWatermark = Watermark->WritebackDRAMClockChangeWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackFCLKChangeWatermark = Watermark->WritebackFCLKChangeWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackFCLKChangeWatermark = v->Watermark.WritebackFCLKChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: WritebackDRAMClockChangeWatermark = %f\n",
- __func__, Watermark->WritebackDRAMClockChangeWatermark);
- dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, Watermark->WritebackFCLKChangeWatermark);
- dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, Watermark->WritebackUrgentWatermark);
- dml_print("DML::%s: USRRetrainingRequiredFinal = %d\n", __func__, USRRetrainingRequiredFinal);
+ __func__, v->Watermark.WritebackDRAMClockChangeWatermark);
+ dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, v->Watermark.WritebackFCLKChangeWatermark);
+ dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, v->Watermark.WritebackUrgentWatermark);
+ dml_print("DML::%s: v->USRRetrainingRequiredFinal = %d\n", __func__, v->USRRetrainingRequiredFinal);
dml_print("DML::%s: USRRetrainingLatency = %f\n", __func__, mmSOCParameters.USRRetrainingLatency);
#endif
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->TotalPixelBW = st_vars->TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
- SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) / (HTotal[k] / PixelClock[k]);
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] +
+ SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k]) / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
- st_vars->LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
- st_vars->LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
+ LBLatencyHidingSourceLinesY[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
+ LBLatencyHidingSourceLinesC[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: k=%d, MaxLineBufferLines = %d\n", __func__, k, MaxLineBufferLines);
- dml_print("DML::%s: k=%d, LineBufferSize = %d\n", __func__, k, LineBufferSize);
- dml_print("DML::%s: k=%d, LBBitPerPixel = %d\n", __func__, k, LBBitPerPixel[k]);
- dml_print("DML::%s: k=%d, HRatio = %f\n", __func__, k, HRatio[k]);
- dml_print("DML::%s: k=%d, VTaps = %d\n", __func__, k, VTaps[k]);
+ dml_print("DML::%s: k=%d, v->MaxLineBufferLines = %d\n", __func__, k, v->MaxLineBufferLines);
+ dml_print("DML::%s: k=%d, v->LineBufferSizeFinal = %d\n", __func__, k, v->LineBufferSizeFinal);
+ dml_print("DML::%s: k=%d, v->LBBitPerPixel = %d\n", __func__, k, v->LBBitPerPixel[k]);
+ dml_print("DML::%s: k=%d, v->HRatio = %f\n", __func__, k, v->HRatio[k]);
+ dml_print("DML::%s: k=%d, v->vtaps = %d\n", __func__, k, v->vtaps[k]);
#endif
- st_vars->EffectiveLBLatencyHidingY = st_vars->LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
- st_vars->EffectiveLBLatencyHidingC = st_vars->LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
- st_vars->EffectiveDETBufferSizeY = DETBufferSizeY[k];
+ EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
+ EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
+ EffectiveDETBufferSizeY = DETBufferSizeY[k];
if (UnboundedRequestEnabled) {
- st_vars->EffectiveDETBufferSizeY = st_vars->EffectiveDETBufferSizeY
+ EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
+ CompressedBufferSizeInkByte * 1024
- * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k])
- / (HTotal[k] / PixelClock[k]) / st_vars->TotalPixelBW;
+ * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k])
+ / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
- st_vars->LinesInDETY[k] = (double) st_vars->EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
- st_vars->LinesInDETYRoundedDownToSwath[k] = dml_floor(st_vars->LinesInDETY[k], SwathHeightY[k]);
- st_vars->FullDETBufferingTimeY = st_vars->LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
+ LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
- st_vars->ActiveClockChangeLatencyHidingY = st_vars->EffectiveLBLatencyHidingY + st_vars->FullDETBufferingTimeY
- - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k];
+ ActiveClockChangeLatencyHidingY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
+ - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k];
- if (NumberOfActiveSurfaces > 1) {
- st_vars->ActiveClockChangeLatencyHidingY = st_vars->ActiveClockChangeLatencyHidingY
- - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightY[k] * HTotal[k]
- / PixelClock[k] / VRatio[k];
+ if (v->NumberOfActiveSurfaces > 1) {
+ ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
+ - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k]
+ / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
- st_vars->LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
- st_vars->LinesInDETCRoundedDownToSwath[k] = dml_floor(st_vars->LinesInDETC[k], SwathHeightC[k]);
- st_vars->FullDETBufferingTimeC = st_vars->LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
- / VRatioChroma[k];
- st_vars->ActiveClockChangeLatencyHidingC = st_vars->EffectiveLBLatencyHidingC + st_vars->FullDETBufferingTimeC
- - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k]
- / PixelClock[k];
- if (NumberOfActiveSurfaces > 1) {
- st_vars->ActiveClockChangeLatencyHidingC = st_vars->ActiveClockChangeLatencyHidingC
- - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightC[k] * HTotal[k]
- / PixelClock[k] / VRatioChroma[k];
+ LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
+ LinesInDETCRoundedDownToSwath[k] = dml_floor(LinesInDETC[k], SwathHeightC[k]);
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k])
+ / v->VRatioChroma[k];
+ ActiveClockChangeLatencyHidingC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
+ - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k]
+ / v->PixelClock[k];
+ if (v->NumberOfActiveSurfaces > 1) {
+ ActiveClockChangeLatencyHidingC = ActiveClockChangeLatencyHidingC
+ - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightC[k] * v->HTotal[k]
+ / v->PixelClock[k] / v->VRatioChroma[k];
}
- st_vars->ActiveClockChangeLatencyHiding = dml_min(st_vars->ActiveClockChangeLatencyHidingY,
- st_vars->ActiveClockChangeLatencyHidingC);
+ ActiveClockChangeLatencyHiding = dml_min(ActiveClockChangeLatencyHidingY,
+ ActiveClockChangeLatencyHidingC);
} else {
- st_vars->ActiveClockChangeLatencyHiding = st_vars->ActiveClockChangeLatencyHidingY;
+ ActiveClockChangeLatencyHiding = ActiveClockChangeLatencyHidingY;
}
- ActiveDRAMClockChangeLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- - Watermark->DRAMClockChangeWatermark;
- st_vars->ActiveFCLKChangeLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- - Watermark->FCLKChangeWatermark;
- st_vars->USRRetrainingLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
+ ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
+ - v->Watermark.DRAMClockChangeWatermark;
+ ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
+ - v->Watermark.FCLKChangeWatermark;
+ USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.USRRetrainingWatermark;
- if (WritebackEnable[k]) {
- st_vars->WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k]
- / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64)
- st_vars->WritebackLatencyHiding = st_vars->WritebackLatencyHiding / 2;
+ if (v->WritebackEnable[k]) {
+ WritebackLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
+ / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64)
+ WritebackLatencyHiding = WritebackLatencyHiding / 2;
- st_vars->WritebackDRAMClockChangeLatencyMargin = st_vars->WritebackLatencyHiding
- - Watermark->WritebackDRAMClockChangeWatermark;
+ WritebackDRAMClockChangeLatencyMargin = WritebackLatencyHiding
+ - v->Watermark.WritebackDRAMClockChangeWatermark;
- st_vars->WritebackFCLKChangeLatencyMargin = st_vars->WritebackLatencyHiding
- - Watermark->WritebackFCLKChangeWatermark;
+ WritebackFCLKChangeLatencyMargin = WritebackLatencyHiding
+ - v->Watermark.WritebackFCLKChangeWatermark;
ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMargin[k],
- st_vars->WritebackFCLKChangeLatencyMargin);
- st_vars->ActiveFCLKChangeLatencyMargin[k] = dml_min(st_vars->ActiveFCLKChangeLatencyMargin[k],
- st_vars->WritebackDRAMClockChangeLatencyMargin);
+ WritebackFCLKChangeLatencyMargin);
+ ActiveFCLKChangeLatencyMargin[k] = dml_min(ActiveFCLKChangeLatencyMargin[k],
+ WritebackDRAMClockChangeLatencyMargin);
}
MaxActiveDRAMClockChangeLatencySupported[k] =
- (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
+ (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
0 :
(ActiveDRAMClockChangeLatencyMargin[k]
+ mmSOCParameters.DRAMClockChangeLatency);
}
- for (i = 0; i < NumberOfActiveSurfaces; ++i) {
- for (j = 0; j < NumberOfActiveSurfaces; ++j) {
+ for (i = 0; i < v->NumberOfActiveSurfaces; ++i) {
+ for (j = 0; j < v->NumberOfActiveSurfaces; ++j) {
if (i == j ||
- (BlendingAndTiming[i] == i && BlendingAndTiming[j] == i) ||
- (BlendingAndTiming[j] == j && BlendingAndTiming[i] == j) ||
- (BlendingAndTiming[i] == BlendingAndTiming[j] && BlendingAndTiming[i] != i) ||
- (SynchronizeTimingsFinal && PixelClock[i] == PixelClock[j] &&
- HTotal[i] == HTotal[j] && VTotal[i] == VTotal[j] &&
- VActive[i] == VActive[j]) || (SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
- (DRRDisplay[i] || DRRDisplay[j]))) {
- st_vars->SynchronizedSurfaces[i][j] = true;
+ (v->BlendingAndTiming[i] == i && v->BlendingAndTiming[j] == i) ||
+ (v->BlendingAndTiming[j] == j && v->BlendingAndTiming[i] == j) ||
+ (v->BlendingAndTiming[i] == v->BlendingAndTiming[j] && v->BlendingAndTiming[i] != i) ||
+ (v->SynchronizeTimingsFinal && v->PixelClock[i] == v->PixelClock[j] &&
+ v->HTotal[i] == v->HTotal[j] && v->VTotal[i] == v->VTotal[j] &&
+ v->VActive[i] == v->VActive[j]) || (v->SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
+ (v->DRRDisplay[i] || v->DRRDisplay[j]))) {
+ SynchronizedSurfaces[i][j] = true;
} else {
- st_vars->SynchronizedSurfaces[i][j] = false;
+ SynchronizedSurfaces[i][j] = false;
}
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (!st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
- st_vars->ActiveFCLKChangeLatencyMargin[k] < st_vars->MinActiveFCLKChangeMargin)) {
- st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
- st_vars->MinActiveFCLKChangeMargin = st_vars->ActiveFCLKChangeLatencyMargin[k];
- st_vars->SurfaceWithMinActiveFCLKChangeMargin = k;
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ (!FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
+ ActiveFCLKChangeLatencyMargin[k] < MinActiveFCLKChangeMargin)) {
+ FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
+ MinActiveFCLKChangeMargin = ActiveFCLKChangeLatencyMargin[k];
+ SurfaceWithMinActiveFCLKChangeMargin = k;
}
}
- *MinActiveFCLKChangeLatencySupported = st_vars->MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
+ *MinActiveFCLKChangeLatencySupported = MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
- st_vars->SameTimingForFCLKChange = true;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (!st_vars->SynchronizedSurfaces[k][st_vars->SurfaceWithMinActiveFCLKChangeMargin]) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (st_vars->SameTimingForFCLKChange ||
- st_vars->ActiveFCLKChangeLatencyMargin[k] <
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = st_vars->ActiveFCLKChangeLatencyMargin[k];
+ SameTimingForFCLKChange = true;
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (!SynchronizedSurfaces[k][SurfaceWithMinActiveFCLKChangeMargin]) {
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ (SameTimingForFCLKChange ||
+ ActiveFCLKChangeLatencyMargin[k] <
+ SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
+ SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = ActiveFCLKChangeLatencyMargin[k];
}
- st_vars->SameTimingForFCLKChange = false;
+ SameTimingForFCLKChange = false;
}
}
- if (st_vars->MinActiveFCLKChangeMargin > 0) {
+ if (MinActiveFCLKChangeMargin > 0) {
*FCLKChangeSupport = dm_fclock_change_vactive;
- } else if ((st_vars->SameTimingForFCLKChange || st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
+ } else if ((SameTimingForFCLKChange || SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
(PrefetchMode <= 1)) {
*FCLKChangeSupport = dm_fclock_change_vblank;
} else {
@@ -4451,95 +4502,95 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
*USRRetrainingSupport = true;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (st_vars->USRRetrainingLatencyMargin[k] < 0)) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ (USRRetrainingLatencyMargin[k] < 0)) {
*USRRetrainingSupport = false;
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (UseMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
- UseMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
- UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
+ v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
+ v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
ActiveDRAMClockChangeLatencyMargin[k] < 0) {
if (PrefetchMode > 0) {
- st_vars->DRAMClockChangeSupportNumber = 2;
- } else if (st_vars->DRAMClockChangeSupportNumber == 0) {
- st_vars->DRAMClockChangeSupportNumber = 1;
- st_vars->LastSurfaceWithoutMargin = k;
- } else if (st_vars->DRAMClockChangeSupportNumber == 1 &&
- !st_vars->SynchronizedSurfaces[st_vars->LastSurfaceWithoutMargin][k]) {
- st_vars->DRAMClockChangeSupportNumber = 2;
+ DRAMClockChangeSupportNumber = 2;
+ } else if (DRAMClockChangeSupportNumber == 0) {
+ DRAMClockChangeSupportNumber = 1;
+ LastSurfaceWithoutMargin = k;
+ } else if (DRAMClockChangeSupportNumber == 1 &&
+ !SynchronizedSurfaces[LastSurfaceWithoutMargin][k]) {
+ DRAMClockChangeSupportNumber = 2;
}
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
- st_vars->DRAMClockChangeMethod = 1;
- else if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
- st_vars->DRAMClockChangeMethod = 2;
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
+ DRAMClockChangeMethod = 1;
+ else if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
+ DRAMClockChangeMethod = 2;
}
- if (st_vars->DRAMClockChangeMethod == 0) {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ if (DRAMClockChangeMethod == 0) {
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
- } else if (st_vars->DRAMClockChangeMethod == 1) {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ } else if (DRAMClockChangeMethod == 1) {
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_full_frame;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_full_frame;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
} else {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_sub_vp;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_sub_vp;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
unsigned int dst_y_pstate;
unsigned int src_y_pstate_l;
unsigned int src_y_pstate_c;
unsigned int src_y_ahead_l, src_y_ahead_c, sub_vp_lines_l, sub_vp_lines_c;
- dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (HTotal[k] / PixelClock[k]), 1);
- src_y_pstate_l = dml_ceil(dst_y_pstate * VRatio[k], SwathHeightY[k]);
- src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + st_vars->LBLatencyHidingSourceLinesY[k];
- sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + meta_row_height[k];
+ dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (v->HTotal[k] / v->PixelClock[k]), 1);
+ src_y_pstate_l = dml_ceil(dst_y_pstate * v->VRatio[k], SwathHeightY[k]);
+ src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + LBLatencyHidingSourceLinesY[k];
+ sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + v->meta_row_height[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
dml_print("DML::%s: k=%d, BytePerPixelDETY = %f\n", __func__, k, BytePerPixelDETY[k]);
dml_print("DML::%s: k=%d, SwathWidthY = %d\n", __func__, k, SwathWidthY[k]);
dml_print("DML::%s: k=%d, SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
-dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, st_vars->LBLatencyHidingSourceLinesY[k]);
+dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, LBLatencyHidingSourceLinesY[k]);
dml_print("DML::%s: k=%d, dst_y_pstate = %d\n", __func__, k, dst_y_pstate);
dml_print("DML::%s: k=%d, src_y_pstate_l = %d\n", __func__, k, src_y_pstate_l);
dml_print("DML::%s: k=%d, src_y_ahead_l = %d\n", __func__, k, src_y_ahead_l);
-dml_print("DML::%s: k=%d, meta_row_height = %d\n", __func__, k, meta_row_height[k]);
+dml_print("DML::%s: k=%d, v->meta_row_height = %d\n", __func__, k, v->meta_row_height[k]);
dml_print("DML::%s: k=%d, sub_vp_lines_l = %d\n", __func__, k, sub_vp_lines_l);
#endif
SubViewportLinesNeededInMALL[k] = sub_vp_lines_l;
if (BytePerPixelDETC[k] > 0) {
- src_y_pstate_c = dml_ceil(dst_y_pstate * VRatioChroma[k], SwathHeightC[k]);
- src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + st_vars->LBLatencyHidingSourceLinesC[k];
- sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + meta_row_height_chroma[k];
+ src_y_pstate_c = dml_ceil(dst_y_pstate * v->VRatioChroma[k], SwathHeightC[k]);
+ src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + LBLatencyHidingSourceLinesC[k];
+ sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + v->meta_row_height_chroma[k];
SubViewportLinesNeededInMALL[k] = dml_max(sub_vp_lines_l, sub_vp_lines_c);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, src_y_pstate_c = %d\n", __func__, k, src_y_pstate_c);
dml_print("DML::%s: k=%d, src_y_ahead_c = %d\n", __func__, k, src_y_ahead_c);
-dml_print("DML::%s: k=%d, meta_row_height_chroma = %d\n", __func__, k, meta_row_height_chroma[k]);
+dml_print("DML::%s: k=%d, v->meta_row_height_chroma = %d\n", __func__, k, v->meta_row_height_chroma[k]);
dml_print("DML::%s: k=%d, sub_vp_lines_c = %d\n", __func__, k, sub_vp_lines_c);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 37a314ce284b..0b427d89b3c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -82,7 +82,6 @@ void dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(
double *DPPCLKUsingSingleDPP);
void dml32_CalculateSwathAndDETConfiguration(
- struct dml32_CalculateSwathAndDETConfiguration *st_vars,
unsigned int DETSizeOverride[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int ConfigReturnBufferSizeInKByte,
@@ -217,6 +216,7 @@ void dml32_CalculateDETBufferSize(
void dml32_CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
+ enum output_format_class OutFormat,
enum output_encoder_class Output,
enum odm_combine_policy ODMUse,
double StateDispclk,
@@ -362,7 +362,6 @@ void dml32_CalculateSurfaceSizeInMall(
bool *ExceededMALLSize);
void dml32_CalculateVMRowAndSwath(
- struct dml32_CalculateVMRowAndSwath *st_vars,
unsigned int NumberOfActiveSurfaces,
DmlPipe myPipe[],
unsigned int SurfaceSizeInMALL[],
@@ -715,29 +714,14 @@ double dml32_CalculateExtraLatency(
unsigned int HostVMMaxNonCachedPageTableLevels);
bool dml32_CalculatePrefetchSchedule(
- struct dml32_CalculatePrefetchSchedule *st_vars,
+ struct vba_vars_st *v,
+ unsigned int k,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -811,59 +795,28 @@ void dml32_CalculateFlipSchedule(
bool *ImmediateFlipSupportedForPipe);
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport *st_vars,
- bool USRRetrainingRequiredFinal,
- enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
+ struct vba_vars_st *v,
unsigned int PrefetchMode,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizeTimingsFinal,
- bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- bool DRRDisplay[],
- unsigned int dpte_group_bytes[],
- unsigned int meta_row_height[],
- unsigned int meta_row_height_chroma[],
SOCParametersList mmSOCParameters,
- unsigned int WritebackChunkSize,
double SOCCLK,
double DCFClkDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int VTaps[],
- unsigned int VTapsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- unsigned int VTotal[],
- unsigned int VActive[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerSurface[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
/* Output */
- Watermarks *Watermark,
enum clock_change_support *DRAMClockChangeSupport,
double MaxActiveDRAMClockChangeLatencySupported[],
unsigned int SubViewportLinesNeededInMALL[],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 84b4b00f29cb..c87091683b5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -498,6 +498,13 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+ if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000)
+ != dc->bb_overrides.fclk_clock_change_latency_ns
+ && dc->bb_overrides.fclk_clock_change_latency_ns) {
+ dcn3_21_soc.fclk_change_latency_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+ }
+
if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 5d27ff0ebb5f..f5400eda07a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -35,6 +35,8 @@
#include "dcn30/display_rq_dlg_calc_30.h"
#include "dcn31/display_mode_vba_31.h"
#include "dcn31/display_rq_dlg_calc_31.h"
+#include "dcn314/display_mode_vba_314.h"
+#include "dcn314/display_rq_dlg_calc_314.h"
#include "dcn32/display_mode_vba_32.h"
#include "dcn32/display_rq_dlg_calc_32.h"
#include "dml_logger.h"
@@ -74,6 +76,13 @@ const struct dml_funcs dml31_funcs = {
.rq_dlg_get_rq_reg = dml31_rq_dlg_get_rq_reg
};
+const struct dml_funcs dml314_funcs = {
+ .validate = dml314_ModeSupportAndSystemConfigurationFull,
+ .recalculate = dml314_recalculate,
+ .rq_dlg_get_dlg_reg = dml314_rq_dlg_get_dlg_reg,
+ .rq_dlg_get_rq_reg = dml314_rq_dlg_get_rq_reg
+};
+
const struct dml_funcs dml32_funcs = {
.validate = dml32_ModeSupportAndSystemConfigurationFull,
.recalculate = dml32_recalculate,
@@ -107,6 +116,9 @@ void dml_init_instance(struct display_mode_lib *lib,
case DML_PROJECT_DCN31_FPGA:
lib->funcs = dml31_funcs;
break;
+ case DML_PROJECT_DCN314:
+ lib->funcs = dml314_funcs;
+ break;
case DML_PROJECT_DCN32:
lib->funcs = dml32_funcs;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 2bdd6ed22611..b1878a1440e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -41,6 +41,7 @@ enum dml_project {
DML_PROJECT_DCN30,
DML_PROJECT_DCN31,
DML_PROJECT_DCN31_FPGA,
+ DML_PROJECT_DCN314,
DML_PROJECT_DCN32,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 8460aefe7b6d..2051ddaa641a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -182,108 +182,6 @@ void Calculate256BBlockSizes(
unsigned int *BlockWidth256BytesY,
unsigned int *BlockWidth256BytesC);
-struct dml32_CalculateSwathAndDETConfiguration {
- unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
- unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
- unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
- unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
- unsigned int RoundedUpSwathSizeBytesY;
- unsigned int RoundedUpSwathSizeBytesC;
- double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
- double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
- unsigned int TotalActiveDPP;
- bool NoChromaSurfaces;
- unsigned int DETBufferSizeInKByteForSwathCalculation;
-};
-
-struct dml32_CalculateVMRowAndSwath {
- unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
- unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
- unsigned int PDEAndMetaPTEBytesFrameY;
- unsigned int PDEAndMetaPTEBytesFrameC;
- unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
- unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
- bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
-};
-
-struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport {
- unsigned int SurfaceWithMinActiveFCLKChangeMargin;
- unsigned int DRAMClockChangeSupportNumber;
- unsigned int LastSurfaceWithoutMargin;
- unsigned int DRAMClockChangeMethod;
- bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin;
- double MinActiveFCLKChangeMargin;
- double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank;
- double ActiveClockChangeLatencyHidingY;
- double ActiveClockChangeLatencyHidingC;
- double ActiveClockChangeLatencyHiding;
- double EffectiveDETBufferSizeY;
- double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
- double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
- double TotalPixelBW;
- bool SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
- double EffectiveLBLatencyHidingY;
- double EffectiveLBLatencyHidingC;
- double LinesInDETY[DC__NUM_DPP__MAX];
- double LinesInDETC[DC__NUM_DPP__MAX];
- unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
- unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
- double FullDETBufferingTimeY;
- double FullDETBufferingTimeC;
- double WritebackDRAMClockChangeLatencyMargin;
- double WritebackFCLKChangeLatencyMargin;
- double WritebackLatencyHiding;
- bool SameTimingForFCLKChange;
- unsigned int TotalActiveWriteback;
- unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
- unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
-};
-
-struct dml32_CalculatePrefetchSchedule {
- unsigned int DPPCycles, DISPCLKCycles;
- double DSTTotalPixelsAfterScaler;
- double LineTime;
- double dst_y_prefetch_equ;
- double prefetch_bw_oto;
- double Tvm_oto;
- double Tr0_oto;
- double Tvm_oto_lines;
- double Tr0_oto_lines;
- double dst_y_prefetch_oto;
- double TimeForFetchingMetaPTE;
- double TimeForFetchingRowInVBlank;
- double LinesToRequestPrefetchPixelData;
- unsigned int HostVMDynamicLevelsTrips;
- double trip_to_mem;
- double Tvm_trips;
- double Tr0_trips;
- double Tvm_trips_rounded;
- double Tr0_trips_rounded;
- double Lsw_oto;
- double Tpre_rounded;
- double prefetch_bw_equ;
- double Tvm_equ;
- double Tr0_equ;
- double Tdmbf;
- double Tdmec;
- double Tdmsks;
- double prefetch_sw_bytes;
- double bytes_pp;
- double dep_bytes;
- unsigned int max_vratio_pre;
- double min_Lsw;
- double Tsw_est1;
- double Tsw_est3;
-};
-
struct DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation {
unsigned int dummy_integer_array[2][DC__NUM_DPP__MAX];
double dummy_single_array[2][DC__NUM_DPP__MAX];
@@ -355,10 +253,6 @@ struct dummy_vars {
struct DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation;
struct dml32_ModeSupportAndSystemConfigurationFull dml32_ModeSupportAndSystemConfigurationFull;
- struct dml32_CalculateSwathAndDETConfiguration dml32_CalculateSwathAndDETConfiguration;
- struct dml32_CalculateVMRowAndSwath dml32_CalculateVMRowAndSwath;
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport;
- struct dml32_CalculatePrefetchSchedule dml32_CalculatePrefetchSchedule;
};
struct vba_vars_st {
@@ -757,10 +651,10 @@ struct vba_vars_st {
unsigned int OutputTypeAndRatePerState[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX];
double RequiredDISPCLKPerSurface[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
- unsigned int MicroTileHeightY[DC__NUM_DPP__MAX];
- unsigned int MicroTileHeightC[DC__NUM_DPP__MAX];
- unsigned int MicroTileWidthY[DC__NUM_DPP__MAX];
- unsigned int MicroTileWidthC[DC__NUM_DPP__MAX];
+ unsigned int MacroTileHeightY[DC__NUM_DPP__MAX];
+ unsigned int MacroTileHeightC[DC__NUM_DPP__MAX];
+ unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
+ unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
bool ImmediateFlipRequiredFinal;
bool DCCProgrammingAssumesScanDirectionUnknownFinal;
bool EnoughWritebackUnits;
@@ -906,8 +800,6 @@ struct vba_vars_st {
double PSCL_FACTOR[DC__NUM_DPP__MAX];
double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
double MaximumVStartup[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
- unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
- unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
double AlignedYPitch[DC__NUM_DPP__MAX];
double AlignedCPitch[DC__NUM_DPP__MAX];
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 5d2b028e5dad..d9f1b0a4fbd4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -214,6 +214,7 @@ struct dummy_pstate_entry {
struct clk_bw_params {
unsigned int vram_type;
unsigned int num_channels;
+ unsigned int dram_channel_width_bytes;
unsigned int dispclk_vco_khz;
unsigned int dc_mode_softmax_memclk;
struct clk_limit_table clk_table;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 58158764adc0..7614125c92c7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -219,6 +219,10 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
struct dc_state *context,
uint8_t disabled_master_pipe_idx);
+void reset_sync_context_for_pipe(const struct dc *dc,
+ struct dc_state *context,
+ uint8_t pipe_idx);
+
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter);
const struct link_hwss *get_link_hwss(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index db7b0b155374..226af06278ce 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -116,7 +116,7 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
- dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, link_enc->inst);
+ dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, stream_enc->inst);
dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
stream_enc->funcs->enable_stream(stream_enc);
@@ -137,7 +137,7 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
stream_enc->funcs->disable(stream_enc);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
- dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, pipe_ctx->link_res.hpo_dp_link_enc->inst);
+ dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, stream_enc->inst);
}
static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index ab06c7fc7452..9f3558c0ef11 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -244,13 +244,15 @@ enum {
#define ASICREV_IS_GC_10_3_7(eChipRev) ((eChipRev >= GC_10_3_7_A0) && (eChipRev < GC_10_3_7_UNKNOWN))
#define AMDGPU_FAMILY_GC_11_0_0 145
-#define AMDGPU_FAMILY_GC_11_0_2 148
+#define AMDGPU_FAMILY_GC_11_0_1 148
#define GC_11_0_0_A0 0x1
#define GC_11_0_2_A0 0x10
+#define GC_11_0_3_A0 0x20
#define GC_11_UNKNOWN 0xFF
#define ASICREV_IS_GC_11_0_0(eChipRev) (eChipRev < GC_11_0_2_A0)
-#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_UNKNOWN)
+#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_0_3_A0)
+#define ASICREV_IS_GC_11_0_3(eChipRev) (eChipRev >= GC_11_0_3_A0 && eChipRev < GC_11_UNKNOWN)
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index f093b49c5e6e..3bf08a60c45c 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -119,13 +119,15 @@ enum dc_log_type {
LOG_HDMI_RETIMER_REDRIVER,
LOG_DSC,
LOG_SMU_MSG,
+ LOG_DC2RESERVED4,
+ LOG_DC2RESERVED5,
LOG_DWB,
LOG_GAMMA_DEBUG,
LOG_MAX_HW_POINTS,
LOG_ALL_TF_CHANNELS,
LOG_SAMPLE_1DLUT,
LOG_DP2,
- LOG_SECTION_TOTAL_COUNT
+ LOG_DC2RESERVED12,
};
#define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 859ffd8725c5..04f7656906ca 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1600,6 +1600,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
struct fixed31_32 lut2;
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
+ const struct fixed31_32 one = dc_fixpt_from_int(1);
i = 0;
/* fixed_pt library has problems handling too small values */
@@ -1628,6 +1629,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
} else
hw_x = coordinates_x[i].x;
+ if (dc_fixpt_le(one, hw_x))
+ hw_x = one;
+
norm_x = dc_fixpt_mul(norm_factor, hw_x);
index = dc_fixpt_floor(norm_x);
if (index < 0 || index > 255)
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index da09ba7589f7..0f39ab9dc5b4 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -613,10 +613,6 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
* Note: We should never go above the field rate of the mode timing set.
*/
infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
-
- /* FreeSync HDR */
- infopacket->sb[9] = 0;
- infopacket->sb[10] = 0;
}
static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
@@ -684,10 +680,6 @@ static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
/* PB16 : Reserved bits 7:1, FixedRate bit 0 */
infopacket->sb[16] = (vrr->state == VRR_STATE_ACTIVE_FIXED) ? 1 : 0;
-
- //FreeSync HDR
- infopacket->sb[9] = 0;
- infopacket->sb[10] = 0;
}
static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
@@ -772,8 +764,7 @@ static void build_vrr_infopacket_header_v2(enum signal_type signal,
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x09] */
infopacket->hb2 = 0x09;
- *payload_size = 0x0A;
-
+ *payload_size = 0x09;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
@@ -822,9 +813,9 @@ static void build_vrr_infopacket_header_v3(enum signal_type signal,
infopacket->hb1 = version;
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length] */
- *payload_size = 0x10;
- infopacket->hb2 = *payload_size - 1; //-1 for checksum
+ infopacket->hb2 = 0x10;
+ *payload_size = 0x10;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
index 2ed95790a600..cf8d60c4df1b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
@@ -15243,6 +15243,8 @@
#define regBIF0_PCIE_TX_TRACKING_ADDR_HI_BASE_IDX 5
#define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS 0x420186
#define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS_BASE_IDX 5
+#define regBIF0_PCIE_TX_POWER_CTRL_1 0x420187
+#define regBIF0_PCIE_TX_POWER_CTRL_1_BASE_IDX 5
#define regBIF0_PCIE_TX_CTRL_4 0x42018b
#define regBIF0_PCIE_TX_CTRL_4_BASE_IDX 5
#define regBIF0_PCIE_TX_STATUS 0x420194
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
index eb62a18fcc48..3d60c9e92548 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
@@ -85627,6 +85627,19 @@
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_PORT_MASK 0x0000000EL
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_UNIT_ID_MASK 0x00007F00L
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_STATUS_VALID_MASK 0x00008000L
+//BIF0_PCIE_TX_POWER_CTRL_1
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN__SHIFT 0x0
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN__SHIFT 0x1
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN__SHIFT 0x2
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN__SHIFT 0x3
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN__SHIFT 0x4
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN__SHIFT 0x5
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN_MASK 0x00000002L
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN_MASK 0x00000004L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN_MASK 0x00000010L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN_MASK 0x00000020L
//BIF0_PCIE_TX_CTRL_4
#define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW__SHIFT 0x0
#define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW_MASK 0x0000000FL
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index 80dab1146439..50bfa513cb35 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -268,7 +268,8 @@ union MESAPI__ADD_QUEUE {
uint32_t is_tmz_queue : 1;
uint32_t map_kiq_utility_queue : 1;
uint32_t is_kfd_process : 1;
- uint32_t reserved : 22;
+ uint32_t trap_en : 1;
+ uint32_t reserved : 21;
};
struct MES_API_STATUS api_status;
uint64_t tma_addr;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index 78620b0bd279..063f4a737605 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -24,12 +24,8 @@
#ifndef SMU13_DRIVER_IF_V13_0_0_H
#define SMU13_DRIVER_IF_V13_0_0_H
-// *** IMPORTANT ***
-// PMFW TEAM: Always increment the interface version on any change to this file
-#define SMU13_DRIVER_IF_VERSION 0x23
-
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x1D
+#define PPTABLE_VERSION 0x24
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -1193,8 +1189,17 @@ typedef struct {
// SECTION: Advanced Options
uint32_t DebugOverrides;
+ // Section: Total Board Power idle vs active coefficients
+ uint8_t TotalBoardPowerSupport;
+ uint8_t TotalBoardPowerPadding[3];
+
+ int16_t TotalIdleBoardPowerM;
+ int16_t TotalIdleBoardPowerB;
+ int16_t TotalBoardPowerM;
+ int16_t TotalBoardPowerB;
+
// SECTION: Sku Reserved
- uint32_t Spare[64];
+ uint32_t Spare[61];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
@@ -1259,7 +1264,8 @@ typedef struct {
// SECTION: Clock Spread Spectrum
// UCLK Spread Spectrum
- uint16_t UclkSpreadPadding;
+ uint8_t UclkTrainingModeSpreadPercent;
+ uint8_t UclkSpreadPadding;
uint16_t UclkSpreadFreq; // kHz
// UCLK Spread Spectrum
@@ -1272,11 +1278,7 @@ typedef struct {
// Section: Memory Config
uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
- uint8_t PaddingMem1[3];
-
- // Section: Total Board Power
- uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
- uint16_t BoardPowerPadding;
+ uint8_t PaddingMem1[7];
// SECTION: UMC feature flags
uint8_t HsrEnabled;
@@ -1375,8 +1377,11 @@ typedef struct {
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
- uint16_t AverageSocketPower ;
+ uint16_t AverageSocketPower;
+ uint16_t AverageTotalBoardPower;
+
uint16_t AvgTemperature[TEMP_COUNT];
+ uint16_t TempPadding;
uint8_t PcieRate ;
uint8_t PcieWidth ;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
index 76f695a1d065..ae2d337158f3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 4
+#define PMFW_DRIVER_IF_VERSION 5
typedef struct {
int32_t value;
@@ -197,6 +197,8 @@ typedef struct {
uint16_t SkinTemp;
uint16_t DeviceState;
+ uint16_t CurTemp; //[centi-Celsius]
+ uint16_t spare2;
} SmuMetrics_t;
typedef struct {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index c02e5e576728..f442bf085a31 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,9 +28,9 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x04
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2C
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
@@ -291,5 +291,11 @@ int smu_v13_0_set_default_dpm_tables(struct smu_context *smu);
void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu);
int smu_v13_0_mode1_reset(struct smu_context *smu);
+
+int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index fa520d79ef67..644ea150e075 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -368,6 +368,17 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
smu_baco->platform_support =
(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
false;
+
+ /*
+ * Disable BACO entry/exit completely on below SKUs to
+ * avoid hardware intermittent failures.
+ */
+ if (((adev->pdev->device == 0x73A1) &&
+ (adev->pdev->revision == 0x00)) ||
+ ((adev->pdev->device == 0x73BF) &&
+ (adev->pdev->revision == 0xCF)))
+ smu_baco->platform_support = false;
+
}
}
@@ -4283,6 +4294,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.dump_pptable = sienna_cichlid_dump_pptable,
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
+ .fini_microcode = smu_v11_0_fini_microcode,
.init_smc_tables = sienna_cichlid_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index e8fe84f806d1..93f9b8377539 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -84,9 +84,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
static const int link_speed[] = {25, 50, 80, 160};
-static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
- uint32_t pptable_id);
-
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -212,40 +209,16 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)))
+ return 0;
+
/* override pptable_id from driver parameter */
if (amdgpu_smu_pptable_id >= 0) {
pptable_id = amdgpu_smu_pptable_id;
dev_info(adev->dev, "override pptable id %d\n", pptable_id);
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
-
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
- pptable_id == 3667)
- pptable_id = 36671;
-
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
- pptable_id == 3688)
- pptable_id = 36881;
- /*
- * Temporary solution for SMU V13.0.0 with SCPM enabled:
- * - use 36831 signed pptable when pp_table_id is 3683
- * - use 36641 signed pptable when pp_table_id is 3664 or 0
- * TODO: drop these when the pptable carried in vbios is ready.
- */
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
- switch (pptable_id) {
- case 0:
- case 3664:
- pptable_id = 36641;
- break;
- case 3683:
- pptable_id = 36831;
- break;
- default:
- dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
- return -EINVAL;
- }
- }
}
/* "pptable_id == 0" means vbios carries the pptable. */
@@ -425,8 +398,10 @@ static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **tabl
return 0;
}
-static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
- uint32_t pptable_id)
+int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id)
{
const struct smc_firmware_header_v1_0 *hdr;
struct amdgpu_device *adev = smu->adev;
@@ -476,25 +451,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
- /*
- * Temporary solution for SMU V13.0.0 with SCPM disabled:
- * - use 3664 or 3683 on request
- * - use 3664 when pptable_id is 0
- * TODO: drop these when the pptable carried in vbios is ready.
- */
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
- switch (pptable_id) {
- case 0:
- pptable_id = 3664;
- break;
- case 3664:
- case 3683:
- break;
- default:
- dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
- return -EINVAL;
- }
- }
}
/* force using vbios pptable in sriov mode */
@@ -2344,8 +2300,8 @@ int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_EnableGfxImu);
-
- return smu_cmn_send_msg_without_waiting(smu, index, 0);
+ /* Param 1 to tell PMFW to enable GFXOFF feature */
+ return smu_cmn_send_msg_without_waiting(smu, index, 1);
}
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 1bbeceeb9e3c..096327513dd0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -388,30 +388,35 @@ static int smu_v13_0_0_append_powerplay_table(struct smu_context *smu)
return 0;
}
-static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
+static int smu_v13_0_0_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
{
struct smu_table_context *smu_table = &smu->smu_table;
void *combo_pptable = smu_table->combo_pptable;
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- /*
- * With SCPM enabled, the pptable used will be signed. It cannot
- * be used directly by driver. To get the raw pptable, we need to
- * rely on the combo pptable(and its revelant SMU message).
- */
- if (adev->scpm_enabled) {
- ret = smu_cmn_get_combo_pptable(smu);
- if (ret)
- return ret;
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
- smu->smu_table.power_play_table = combo_pptable;
- smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_0_powerplay_table);
- } else {
- ret = smu_v13_0_setup_pptable(smu);
- if (ret)
- return ret;
- }
+ *table = combo_pptable;
+ *size = sizeof(struct smu_13_0_0_powerplay_table);
+
+ return 0;
+}
+
+static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
+ if (ret)
+ return ret;
ret = smu_v13_0_0_store_powerplay_table(smu);
if (ret)
@@ -1792,7 +1797,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.dump_pptable = smu_v13_0_0_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_0_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_check_fw_status,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 82d3718d8324..97e1d55dcaad 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -71,7 +71,6 @@ static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
- MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 1),
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
@@ -199,6 +198,9 @@ static int smu_v13_0_4_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
@@ -226,18 +228,6 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
return ret;
}
-static int smu_v13_0_4_post_smu_init(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0;
-
- /* allow message will be sent after enable message */
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
- if (ret)
- dev_err(adev->dev, "Failed to Enable GfxOff!\n");
- return ret;
-}
-
static ssize_t smu_v13_0_4_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1026,7 +1016,6 @@ static const struct pptable_funcs smu_v13_0_4_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.gfx_off_control = smu_v13_0_gfx_off_control,
- .post_init = smu_v13_0_4_post_smu_init,
.mode2_reset = smu_v13_0_4_mode2_reset,
.get_dpm_ultimate_freq = smu_v13_0_4_get_dpm_ultimate_freq,
.od_edit_dpm_table = smu_v13_0_od_edit_dpm_table,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index 47360ef5c175..66445964efbd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -176,6 +176,9 @@ static int smu_v13_0_5_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 9dd56e73218b..c422bf8a09b1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -120,6 +120,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
+ MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -400,11 +401,27 @@ static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu)
return 0;
}
+static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ void *combo_pptable = smu_table->combo_pptable;
+ int ret = 0;
+
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
+
+ *table = combo_pptable;
+ *size = sizeof(struct smu_13_0_7_powerplay_table);
+
+ return 0;
+}
static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- void *combo_pptable = smu_table->combo_pptable;
struct amdgpu_device *adev = smu->adev;
int ret = 0;
@@ -413,18 +430,11 @@ static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
* be used directly by driver. To get the raw pptable, we need to
* rely on the combo pptable(and its revelant SMU message).
*/
- if (adev->scpm_enabled) {
- ret = smu_cmn_get_combo_pptable(smu);
- if (ret)
- return ret;
-
- smu->smu_table.power_play_table = combo_pptable;
- smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_7_powerplay_table);
- } else {
- ret = smu_v13_0_setup_pptable(smu);
- if (ret)
- return ret;
- }
+ ret = smu_v13_0_7_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
+ if (ret)
+ return ret;
ret = smu_v13_0_7_store_powerplay_table(smu);
if (ret)
@@ -1567,6 +1577,16 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
return ret;
}
+static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ /* SRIOV does not support SMU mode1 reset */
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return true;
+}
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -1574,7 +1594,9 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.dump_pptable = smu_v13_0_7_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_7_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_7_check_fw_status,
@@ -1624,6 +1646,8 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.baco_set_state = smu_v13_0_baco_set_state,
.baco_enter = smu_v13_0_baco_enter,
.baco_exit = smu_v13_0_baco_exit,
+ .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
+ .mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state,
};
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 702ea803a743..39e7004de720 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -180,7 +180,7 @@ static int lvds_codec_probe(struct platform_device *pdev)
of_node_put(bus_node);
if (ret == -ENODEV) {
dev_warn(dev, "missing 'data-mapping' DT property\n");
- } else if (ret) {
+ } else if (ret < 0) {
dev_err(dev, "invalid 'data-mapping' DT property\n");
return ret;
} else {
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 281f8a9ba4fd..7ab38d734ad6 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -550,8 +550,9 @@ static int sii902x_audio_hw_params(struct device *dev, void *data,
unsigned long mclk_rate;
int i, ret;
- if (daifmt->bit_clk_master || daifmt->frame_clk_master) {
- dev_dbg(dev, "%s: I2S master mode not supported\n", __func__);
+ if (daifmt->bit_clk_provider || daifmt->frame_clk_provider) {
+ dev_dbg(dev, "%s: I2S clock provider mode not supported\n",
+ __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index f50b47ac11a8..a2f0860b20bb 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -45,7 +45,7 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
u8 inputclkfs = 0;
/* it cares I2S only */
- if (fmt->bit_clk_master | fmt->frame_clk_master) {
+ if (fmt->bit_clk_provider | fmt->frame_clk_provider) {
dev_err(dev, "unsupported clock settings\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 493922069c90..01ee3febb813 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -377,8 +377,8 @@ static int vrr_range_show(struct seq_file *m, void *data)
if (connector->status != connector_status_connected)
return -ENODEV;
- seq_printf(m, "Min: %u\n", (u8)connector->display_info.monitor_range.min_vfreq);
- seq_printf(m, "Max: %u\n", (u8)connector->display_info.monitor_range.max_vfreq);
+ seq_printf(m, "Min: %u\n", connector->display_info.monitor_range.min_vfreq);
+ seq_printf(m, "Max: %u\n", connector->display_info.monitor_range.max_vfreq);
return 0;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index bbc25e3b7220..eaa819381281 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -5971,12 +5971,14 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
static
-void get_monitor_range(const struct detailed_timing *timing,
- void *info_monitor_range)
+void get_monitor_range(const struct detailed_timing *timing, void *c)
{
- struct drm_monitor_range_info *monitor_range = info_monitor_range;
+ struct detailed_mode_closure *closure = c;
+ struct drm_display_info *info = &closure->connector->display_info;
+ struct drm_monitor_range_info *monitor_range = &info->monitor_range;
const struct detailed_non_pixel *data = &timing->data.other_data;
const struct detailed_data_monitor_range *range = &data->data.range;
+ const struct edid *edid = closure->drm_edid->edid;
if (!is_display_descriptor(timing, EDID_DETAIL_MONITOR_RANGE))
return;
@@ -5992,18 +5994,28 @@ void get_monitor_range(const struct detailed_timing *timing,
monitor_range->min_vfreq = range->min_vfreq;
monitor_range->max_vfreq = range->max_vfreq;
+
+ if (edid->revision >= 4) {
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+ monitor_range->min_vfreq += 255;
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+ monitor_range->max_vfreq += 255;
+ }
}
static void drm_get_monitor_range(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
- struct drm_display_info *info = &connector->display_info;
+ const struct drm_display_info *info = &connector->display_info;
+ struct detailed_mode_closure closure = {
+ .connector = connector,
+ .drm_edid = drm_edid,
+ };
if (!version_greater(drm_edid, 1, 1))
return;
- drm_for_each_detailed_block(drm_edid, get_monitor_range,
- &info->monitor_range);
+ drm_for_each_detailed_block(drm_edid, get_monitor_range, &closure);
DRM_DEBUG_KMS("Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
info->monitor_range.min_vfreq,
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index eb0c2d041f13..ad068865ba20 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -168,21 +168,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_private_object_init);
-static void
-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
-{
- /*
- * Note: obj->dma_buf can't disappear as long as we still hold a
- * handle reference in obj->handle_count.
- */
- mutex_lock(&filp->prime.lock);
- if (obj->dma_buf) {
- drm_prime_remove_buf_handle_locked(&filp->prime,
- obj->dma_buf);
- }
- mutex_unlock(&filp->prime.lock);
-}
-
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
@@ -253,7 +238,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
- drm_gem_remove_prime_handles(obj, file_priv);
+ drm_prime_remove_buf_handle(&file_priv->prime, id);
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -1226,7 +1211,7 @@ retry:
ret = dma_resv_lock_slow_interruptible(obj->resv,
acquire_ctx);
if (ret) {
- ww_acquire_done(acquire_ctx);
+ ww_acquire_fini(acquire_ctx);
return ret;
}
}
@@ -1251,7 +1236,7 @@ retry:
goto retry;
}
- ww_acquire_done(acquire_ctx);
+ ww_acquire_fini(acquire_ctx);
return ret;
}
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 8ad0e02991ca..904fc893c905 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -302,6 +302,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
if (WARN_ON(map->is_iomem)) {
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
ret = -EIO;
goto err_put_pages;
}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 1fbbc19f1ac0..7bb98e6a446d 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle);
/* drm_drv.c */
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index a3f180653b8b..eb09e86044c6 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -190,29 +190,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
return -ENOENT;
}
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle)
{
struct rb_node *rb;
- rb = prime_fpriv->dmabufs.rb_node;
+ mutex_lock(&prime_fpriv->lock);
+
+ rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
- if (member->dma_buf == dma_buf) {
+ member = rb_entry(rb, struct drm_prime_member, handle_rb);
+ if (member->handle == handle) {
rb_erase(&member->handle_rb, &prime_fpriv->handles);
rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
- dma_buf_put(dma_buf);
+ dma_buf_put(member->dma_buf);
kfree(member);
- return;
- } else if (member->dma_buf < dma_buf) {
+ break;
+ } else if (member->handle < handle) {
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
}
+
+ mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 7655142a4651..10b0036f8a2e 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1594,12 +1594,12 @@ static int hdmi_audio_hw_params(struct device *dev, void *data,
struct hdmi_context *hdata = dev_get_drvdata(dev);
if (daifmt->fmt != HDMI_I2S || daifmt->bit_clk_inv ||
- daifmt->frame_clk_inv || daifmt->bit_clk_master ||
- daifmt->frame_clk_master) {
+ daifmt->frame_clk_inv || daifmt->bit_clk_provider ||
+ daifmt->frame_clk_provider) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
+ daifmt->bit_clk_provider,
+ daifmt->frame_clk_provider);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index dd32b484dd82..ce96234f3df2 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -581,11 +581,9 @@ static const struct psb_offset cdv_regmap[2] = {
static int cdv_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
- if (pci_enable_msi(pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->use_msi = true;
dev_priv->regmap = cdv_regmap;
gma_get_core_freq(dev);
psb_intel_opregion_init(dev);
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index dffe37490206..4b7627a72637 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -112,12 +112,12 @@ static void psb_gem_free_object(struct drm_gem_object *obj)
{
struct psb_gem_object *pobj = to_psb_gem_object(obj);
- drm_gem_object_release(obj);
-
/* Undo the mmap pin if we are destroying the object */
if (pobj->mmapping)
psb_gem_unpin(pobj);
+ drm_gem_object_release(obj);
+
WARN_ON(pobj->in_gart && !pobj->stolen);
release_resource(&pobj->resource);
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index bd40c040a2c9..2f52eceda3a1 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -532,15 +532,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
gma_crtc->page_flip_event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
/* Call this locked if we want an event at vblank interrupt. */
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
if (ret) {
- gma_crtc->page_flip_event = NULL;
- drm_crtc_vblank_put(crtc);
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (gma_crtc->page_flip_event) {
+ gma_crtc->page_flip_event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
} else {
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 5923a9c89312..f90e628cb482 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -501,12 +501,9 @@ static const struct psb_offset oaktrail_regmap[2] = {
static int oaktrail_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
- if (pci_enable_msi(pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
-
+ dev_priv->use_msi = true;
dev_priv->regmap = oaktrail_regmap;
ret = mid_chip_setup(dev);
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index b91de6d36e41..66873085d450 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -139,8 +139,6 @@ static void gma_suspend_pci(struct pci_dev *pdev)
dev_priv->regs.saveBSM = bsm;
pci_read_config_dword(pdev, 0xFC, &vbt);
dev_priv->regs.saveVBT = vbt;
- pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
- pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
@@ -168,9 +166,6 @@ static bool gma_resume_pci(struct pci_dev *pdev)
pci_restore_state(pdev);
pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM);
pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT);
- /* restoring MSI address and data in PCIx space */
- pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
- pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
ret = pci_enable_device(pdev);
if (ret != 0)
@@ -223,8 +218,7 @@ int gma_power_resume(struct device *_dev)
mutex_lock(&power_mutex);
gma_resume_pci(pdev);
gma_resume_display(pdev);
- gma_irq_preinstall(dev);
- gma_irq_postinstall(dev);
+ gma_irq_install(dev);
mutex_unlock(&power_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 1d8744f3e702..54e756b48606 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -383,7 +383,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- gma_irq_install(dev, pdev->irq);
+ gma_irq_install(dev);
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 0ea3d23575f3..731cc356c07a 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -490,6 +490,7 @@ struct drm_psb_private {
int rpm_enabled;
/* MID specific */
+ bool use_msi;
bool has_gct;
struct oaktrail_gct_data gct_data;
@@ -499,10 +500,6 @@ struct drm_psb_private {
/* Register state */
struct psb_save_area regs;
- /* MSI reg save */
- uint32_t msi_addr;
- uint32_t msi_data;
-
/* Hotplug handling */
struct work_struct hotplug_work;
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index e6e6d61bbeab..038f18ed0a95 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -316,17 +316,24 @@ void gma_irq_postinstall(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
-int gma_irq_install(struct drm_device *dev, unsigned int irq)
+int gma_irq_install(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
- if (irq == IRQ_NOTCONNECTED)
+ if (dev_priv->use_msi && pci_enable_msi(pdev)) {
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->use_msi = false;
+ }
+
+ if (pdev->irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
gma_irq_preinstall(dev);
/* PCI devices require shared interrupts. */
- ret = request_irq(irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
+ ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
if (ret)
return ret;
@@ -369,6 +376,8 @@ void gma_irq_uninstall(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
free_irq(pdev->irq, dev);
+ if (dev_priv->use_msi)
+ pci_disable_msi(pdev);
}
int gma_crtc_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index b51e395194ff..7648f69824a5 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -17,7 +17,7 @@ struct drm_device;
void gma_irq_preinstall(struct drm_device *dev);
void gma_irq_postinstall(struct drm_device *dev);
-int gma_irq_install(struct drm_device *dev, unsigned int irq);
+int gma_irq_install(struct drm_device *dev);
void gma_irq_uninstall(struct drm_device *dev);
int gma_crtc_enable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 073adfe438dd..4e41c144a290 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -2,6 +2,7 @@
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
depends on DRM && PCI && (ARM64 || COMPILE_TEST)
+ depends on MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index 6d11e7938c83..f84d39762a72 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -23,9 +23,6 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
-
DEFINE_DRM_GEM_FOPS(hv_fops);
static struct drm_driver hyperv_driver = {
@@ -133,7 +130,6 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
}
ret = hyperv_setup_vram(hv, hdev);
-
if (ret)
goto err_vmbus_close;
@@ -150,18 +146,20 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
ret = hyperv_mode_config_init(hv);
if (ret)
- goto err_vmbus_close;
+ goto err_free_mmio;
ret = drm_dev_register(dev, 0);
if (ret) {
drm_err(dev, "Failed to register drm driver.\n");
- goto err_vmbus_close;
+ goto err_free_mmio;
}
drm_fbdev_generic_setup(dev, 0);
return 0;
+err_free_mmio:
+ vmbus_free_mmio(hv->mem->start, hv->fb_size);
err_vmbus_close:
vmbus_close(hdev->channel);
err_hv_set_drv_data:
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 7c4455541dbb..f8eb6f69be05 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1096,11 +1096,11 @@ static int tda998x_audio_hw_params(struct device *dev, void *data,
if (!spdif &&
(daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
- daifmt->bit_clk_master || daifmt->frame_clk_master)) {
+ daifmt->bit_clk_provider || daifmt->frame_clk_provider)) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
+ daifmt->bit_clk_provider,
+ daifmt->frame_clk_provider);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 5dcfa7feffa9..1390729401a0 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1629,6 +1629,8 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
/* FIXME: initialize from VBT */
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+
ret = intel_dsc_compute_params(crtc_state);
if (ret)
return ret;
@@ -2070,7 +2072,14 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+
intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
+
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
+
intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
for_each_dsi_port(port, intel_dsi->ports) {
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 110fc98ec280..f5e1d692976e 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -16,6 +16,7 @@
#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
#include "intel_pci_config.h"
+#include "intel_pps.h"
/**
* scale - scale values from one range to another
@@ -971,26 +972,24 @@ int intel_backlight_device_register(struct intel_connector *connector)
if (!name)
return -ENOMEM;
- bd = backlight_device_register(name, connector->base.kdev, connector,
- &intel_backlight_device_ops, &props);
-
- /*
- * Using the same name independent of the drm device or connector
- * prevents registration of multiple backlight devices in the
- * driver. However, we need to use the default name for backward
- * compatibility. Use unique names for subsequent backlight devices as a
- * fallback when the default name already exists.
- */
- if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) {
+ bd = backlight_device_get_by_name(name);
+ if (bd) {
+ put_device(&bd->dev);
+ /*
+ * Using the same name independent of the drm device or connector
+ * prevents registration of multiple backlight devices in the
+ * driver. However, we need to use the default name for backward
+ * compatibility. Use unique names for subsequent backlight devices as a
+ * fallback when the default name already exists.
+ */
kfree(name);
name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
i915->drm.primary->index, connector->base.name);
if (!name)
return -ENOMEM;
-
- bd = backlight_device_register(name, connector->base.kdev, connector,
- &intel_backlight_device_ops, &props);
}
+ bd = backlight_device_register(name, connector->base.kdev, connector,
+ &intel_backlight_device_ops, &props);
if (IS_ERR(bd)) {
drm_err(&i915->drm,
@@ -1773,9 +1772,13 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
}
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
- intel_dp_aux_init_backlight_funcs(connector) == 0)
- return;
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+ connector->panel.backlight.power = intel_pps_backlight_power;
+ }
/* We're using a standard PWM backlight interface */
panel->backlight.funcs = &pwm_bl_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 51dde5bfd956..7d6eb9ad7a02 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -479,6 +479,13 @@ init_bdb_block(struct drm_i915_private *i915,
block_size = get_blocksize(block);
+ /*
+ * Version number and new block size are considered
+ * part of the header for MIPI sequenece block v3+.
+ */
+ if (section_id == BDB_MIPI_SEQUENCE && *(const u8 *)block >= 3)
+ block_size += 5;
+
entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3),
GFP_KERNEL);
if (!entry) {
@@ -1596,6 +1603,8 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
struct intel_panel *panel,
enum port port)
{
+ enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C;
+
if (!panel->vbt.dsi.config->dual_link || i915->vbt.version < 197) {
panel->vbt.dsi.bl_ports = BIT(port);
if (panel->vbt.dsi.config->cabc_supported)
@@ -1609,11 +1618,11 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
panel->vbt.dsi.bl_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- panel->vbt.dsi.bl_ports = BIT(PORT_C);
+ panel->vbt.dsi.bl_ports = BIT(port_bc);
break;
default:
case DL_DCS_PORT_A_AND_C:
- panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
+ panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(port_bc);
break;
}
@@ -1625,12 +1634,12 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
panel->vbt.dsi.cabc_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- panel->vbt.dsi.cabc_ports = BIT(PORT_C);
+ panel->vbt.dsi.cabc_ports = BIT(port_bc);
break;
default:
case DL_DCS_PORT_A_AND_C:
panel->vbt.dsi.cabc_ports =
- BIT(PORT_A) | BIT(PORT_C);
+ BIT(PORT_A) | BIT(port_bc);
break;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 79269d2c476b..3699869ab2db 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -404,15 +404,17 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
int clpchgroup;
int j;
- if (i < num_groups - 1)
- bi_next = &dev_priv->max_bw[i + 1];
-
clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
- if (i < num_groups - 1 && clpchgroup < clperchgroup)
- bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
- else
- bi_next->num_planes = 0;
+ if (i < num_groups - 1) {
+ bi_next = &dev_priv->max_bw[i + 1];
+
+ if (clpchgroup < clperchgroup)
+ bi_next->num_planes = (ipqdepth - clpchgroup) /
+ clpchgroup + 1;
+ else
+ bi_next->num_planes = 0;
+ }
bi->num_qgv_points = qi.num_points;
bi->num_psf_gv_points = qi.num_psf_points;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index a0f84cbe974f..fc5d94862ef3 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -27,7 +27,6 @@
#include <acpi/video.h>
#include <linux/i2c.h>
#include <linux/input.h>
-#include <linux/intel-iommu.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-resv.h>
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 32292c0be2bd..3ed7eeacc706 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -389,23 +389,13 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
}
-static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy)
-{
- u32 voltage;
-
- voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK;
-
- return voltage == VOLTAGE_INFO_0_85V;
-}
-
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
- if (intel_phy_is_combo(dev_priv, phy) &&
- (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp)))
+ if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -413,23 +403,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
static int ehl_max_source_rate(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
-
- if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy))
- return 540000;
-
- return 810000;
-}
-
-static int dg1_max_source_rate(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
-
- if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy))
+ if (intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -491,7 +465,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
max_rate = dg2_max_source_rate(intel_dp);
else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
- max_rate = dg1_max_source_rate(intel_dp);
+ max_rate = 810000;
else if (IS_JSL_EHL(dev_priv))
max_rate = ehl_max_source_rate(intel_dp);
else
@@ -1395,6 +1369,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
* DP_DSC_RC_BUF_SIZE for this.
*/
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
/*
* Slice Height of 8 works for all currently available panels. So start
@@ -5293,8 +5268,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_panel_init(intel_connector);
- if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
- intel_connector->panel.backlight.power = intel_pps_backlight_power;
intel_backlight_setup(intel_connector, pipe);
intel_edp_add_properties(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 9feaf1a589f3..d213d8ad1ea5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -671,6 +671,28 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
&link_bw, &rate_select);
+ /*
+ * WaEdpLinkRateDataReload
+ *
+ * Parade PS8461E MUX (used on varius TGL+ laptops) needs
+ * to snoop the link rates reported by the sink when we
+ * use LINK_RATE_SET in order to operate in jitter cleaning
+ * mode (as opposed to redriver mode). Unfortunately it
+ * loses track of the snooped link rates when powered down,
+ * so we need to make it re-snoop often. Without this high
+ * link rates are not stable.
+ */
+ if (!link_bw) {
+ struct intel_connector *connector = intel_dp->attached_connector;
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n",
+ connector->base.base.id, connector->base.name);
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
+ }
+
if (link_bw)
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n",
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index c8488f5ebd04..e415cd7c0b84 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -191,6 +191,9 @@ static struct intel_quirk intel_quirks[] = {
/* ASRock ITX*/
{ 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ /* ECS Liva Q2 */
+ { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
};
void intel_init_quirks(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 43e1bbc1e303..ca530f0733e0 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -460,7 +460,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
u8 i = 0;
vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
- vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay;
vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
pipe_config->dsc.slice_count);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index b9b1fed99874..35136d26e517 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1933,7 +1933,14 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+
intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
+
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
+
intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
/* Create a DSI host (and a device) for each port. */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index dabdfe09f5e5..0bcde53c50c6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1269,6 +1269,10 @@ static void i915_gem_context_release_work(struct work_struct *work)
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ spin_lock(&ctx->i915->gem.contexts.lock);
+ list_del(&ctx->link);
+ spin_unlock(&ctx->i915->gem.contexts.lock);
+
if (ctx->syncobj)
drm_syncobj_put(ctx->syncobj);
@@ -1521,10 +1525,6 @@ static void context_close(struct i915_gem_context *ctx)
ctx->file_priv = ERR_PTR(-EBADF);
- spin_lock(&ctx->i915->gem.contexts.lock);
- list_del(&ctx->link);
- spin_unlock(&ctx->i915->gem.contexts.lock);
-
client = ctx->client;
if (client) {
spin_lock(&client->ctx_lock);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b7b2c14fd9e1..cd75b0ca2555 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -6,7 +6,6 @@
#include <linux/dma-resv.h>
#include <linux/highmem.h>
-#include <linux/intel-iommu.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index ccec4055fde3..85482a04d158 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -268,7 +268,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
*/
void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
{
- assert_object_held(obj);
+ assert_object_held_shared(obj);
if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
@@ -331,15 +331,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
continue;
}
- if (!i915_gem_object_trylock(obj, NULL)) {
- /* busy, toss it back to the pile */
- if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
- continue;
- }
-
__i915_gem_object_pages_fini(obj);
- i915_gem_object_unlock(obj);
__i915_gem_free_object(obj);
/* But keep the pointer alive for RCU-protected lookups */
@@ -359,7 +351,7 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915)
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
- container_of(work, struct drm_i915_private, mm.free_work.work);
+ container_of(work, struct drm_i915_private, mm.free_work);
i915_gem_flush_free_objects(i915);
}
@@ -391,7 +383,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
*/
if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
+ queue_work(i915->wq, &i915->mm.free_work);
}
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
@@ -731,6 +723,9 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
bool lmem_placement = false;
int i;
+ if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
+ return false;
+
for (i = 0; i < obj->mm.n_placements; i++) {
/* Compression is not allowed for the objects with smem placement */
if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
@@ -745,7 +740,7 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
void i915_gem_init__objects(struct drm_i915_private *i915)
{
- INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
+ INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
}
void i915_objects_module_exit(void)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 5cf36a130061..9f6b14ec189a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -335,7 +335,6 @@ struct drm_i915_gem_object {
#define I915_BO_READONLY BIT(7)
#define I915_TILING_QUIRK_BIT 8 /* unknown swizzling; do not release! */
#define I915_BO_PROTECTED BIT(9)
-#define I915_BO_WAS_BOUND_BIT 10
/**
* @mem_flags - Mutable placement-related flags
*
@@ -616,6 +615,8 @@ struct drm_i915_gem_object {
* pages were last acquired.
*/
bool dirty:1;
+
+ u32 tlb;
} mm;
struct {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 97c820eee115..8357dbdcab5c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -6,14 +6,15 @@
#include <drm/drm_cache.h>
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
-#include "gt/intel_gt.h"
-
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
@@ -190,6 +191,18 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
vunmap(ptr);
}
+static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_gt *gt = to_gt(i915);
+
+ if (!obj->mm.tlb)
+ return;
+
+ intel_gt_invalidate_tlb(gt, obj->mm.tlb);
+ obj->mm.tlb = 0;
+}
+
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
@@ -215,13 +228,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
- if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
- intel_gt_invalidate_tlbs(to_gt(i915));
- }
+ flush_tlb_invalidate(obj);
return pages;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 1030053571a2..8dc5c8874d8a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -426,7 +426,8 @@ void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
i915->mm.shrinker.seeks = DEFAULT_SEEKS;
i915->mm.shrinker.batch = 4096;
- drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker));
+ drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker,
+ "drm-i915_gem"));
i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index f131dc065f47..6f3ab7ade41a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -297,7 +297,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
i915_tt->is_shmem = true;
}
- if (HAS_FLAT_CCS(i915) && i915_gem_object_needs_ccs_pages(obj))
+ if (i915_gem_object_needs_ccs_pages(obj))
ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size,
NUM_BYTES_PER_CCS_BYTE),
PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 68c2b0d8f187..f435e06125aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -11,7 +11,9 @@
#include "pxp/intel_pxp.h"
#include "i915_drv.h"
+#include "i915_perf_oa_regs.h"
#include "intel_context.h"
+#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
@@ -36,8 +38,6 @@ static void __intel_gt_init_early(struct intel_gt *gt)
{
spin_lock_init(&gt->irq_lock);
- mutex_init(&gt->tlb_invalidate_lock);
-
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@@ -48,6 +48,8 @@ static void __intel_gt_init_early(struct intel_gt *gt)
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
+ mutex_init(&gt->tlb.invalidate_lock);
+ seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
intel_gt_pm_init_early(gt);
intel_uc_init_early(&gt->uc);
@@ -768,6 +770,7 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
intel_gt_fini_timelines(gt);
+ mutex_destroy(&gt->tlb.invalidate_lock);
intel_engines_free(gt);
}
}
@@ -906,7 +909,7 @@ get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
return rb;
}
-void intel_gt_invalidate_tlbs(struct intel_gt *gt)
+static void mmio_invalidate_full(struct intel_gt *gt)
{
static const i915_reg_t gen8_regs[] = {
[RENDER_CLASS] = GEN8_RTCR,
@@ -924,13 +927,11 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
+ intel_engine_mask_t awake, tmp;
enum intel_engine_id id;
const i915_reg_t *regs;
unsigned int num = 0;
- if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
- return;
-
if (GRAPHICS_VER(i915) == 12) {
regs = gen12_regs;
num = ARRAY_SIZE(gen12_regs);
@@ -945,28 +946,41 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
"Platform does not implement TLB invalidation!"))
return;
- GEM_TRACE("\n");
-
- assert_rpm_wakelock_held(&i915->runtime_pm);
-
- mutex_lock(&gt->tlb_invalidate_lock);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
+ awake = 0;
for_each_engine(engine, gt, id) {
struct reg_and_bit rb;
+ if (!intel_engine_pm_is_awake(engine))
+ continue;
+
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
if (!i915_mmio_reg_offset(rb.reg))
continue;
intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+ awake |= engine->mask;
}
+ GT_TRACE(gt, "invalidated engines %08x\n", awake);
+
+ /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
+ if (awake &&
+ (IS_TIGERLAKE(i915) ||
+ IS_DG1(i915) ||
+ IS_ROCKETLAKE(i915) ||
+ IS_ALDERLAKE_S(i915) ||
+ IS_ALDERLAKE_P(i915)))
+ intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
+
spin_unlock_irq(&uncore->lock);
- for_each_engine(engine, gt, id) {
+ for_each_engine_masked(engine, gt, awake, tmp) {
+ struct reg_and_bit rb;
+
/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
@@ -974,12 +988,8 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
*/
const unsigned int timeout_us = 100;
const unsigned int timeout_ms = 4;
- struct reg_and_bit rb;
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
- if (!i915_mmio_reg_offset(rb.reg))
- continue;
-
if (__intel_wait_for_register_fw(uncore,
rb.reg, rb.bit, 0,
timeout_us, timeout_ms,
@@ -996,5 +1006,38 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
* transitions.
*/
intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
- mutex_unlock(&gt->tlb_invalidate_lock);
+}
+
+static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
+{
+ u32 cur = intel_gt_tlb_seqno(gt);
+
+ /* Only skip if a *full* TLB invalidate barrier has passed */
+ return (s32)(cur - ALIGN(seqno, 2)) > 0;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
+{
+ intel_wakeref_t wakeref;
+
+ if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+ return;
+
+ if (intel_gt_is_wedged(gt))
+ return;
+
+ if (tlb_seqno_passed(gt, seqno))
+ return;
+
+ with_intel_gt_pm_if_awake(gt, wakeref) {
+ mutex_lock(&gt->tlb.invalidate_lock);
+ if (tlb_seqno_passed(gt, seqno))
+ goto unlock;
+
+ mmio_invalidate_full(gt);
+
+ write_seqcount_invalidate(&gt->tlb.seqno);
+unlock:
+ mutex_unlock(&gt->tlb.invalidate_lock);
+ }
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 82d6f248d876..40b06adf509a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -101,6 +101,16 @@ void intel_gt_info_print(const struct intel_gt_info *info,
void intel_gt_watchdog_work(struct work_struct *work);
-void intel_gt_invalidate_tlbs(struct intel_gt *gt);
+static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
+{
+ return seqprop_sequence(&gt->tlb.seqno);
+}
+
+static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
+{
+ return intel_gt_tlb_seqno(gt) | 1;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno);
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index bc898df7a48c..a334787a4939 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -55,6 +55,9 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
for (tmp = 1, intel_gt_pm_get(gt); tmp; \
intel_gt_pm_put(gt), tmp = 0)
+#define with_intel_gt_pm_if_awake(gt, wf) \
+ for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
+
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
return intel_wakeref_wait_for_idle(&gt->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index df708802889d..3804a583382b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -11,6 +11,7 @@
#include <linux/llist.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/seqlock.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -83,7 +84,22 @@ struct intel_gt {
struct intel_uc uc;
struct intel_gsc gsc;
- struct mutex tlb_invalidate_lock;
+ struct {
+ /* Serialize global tlb invalidations */
+ struct mutex invalidate_lock;
+
+ /*
+ * Batch TLB invalidations
+ *
+ * After unbinding the PTE, we need to ensure the TLB
+ * are invalidated prior to releasing the physical pages.
+ * But we only need one such invalidation for all unbinds,
+ * so we track how many TLB invalidations have been
+ * performed since unbind the PTE and only emit an extra
+ * invalidate if no full barrier has been passed.
+ */
+ seqcount_mutex_t seqno;
+ } tlb;
struct i915_wa_list wa_list;
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index 14fe65812e42..1d19c073ba2e 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -12,6 +12,7 @@
#include "intel_llc.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
+#include "intel_rps.h"
struct ia_constants {
unsigned int min_gpu_freq;
@@ -55,9 +56,6 @@ static bool get_ia_constants(struct intel_llc *llc,
if (!HAS_LLC(i915) || IS_DGFX(i915))
return false;
- if (rps->max_freq <= rps->min_freq)
- return false;
-
consts->max_ia_freq = cpu_max_MHz();
consts->min_ring_freq =
@@ -65,13 +63,8 @@ static bool get_ia_constants(struct intel_llc *llc,
/* convert DDR frequency from units of 266.6MHz to bandwidth */
consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3);
- consts->min_gpu_freq = rps->min_freq;
- consts->max_gpu_freq = rps->max_freq;
- if (GRAPHICS_VER(i915) >= 9) {
- /* Convert GT frequency to 50 HZ units */
- consts->min_gpu_freq /= GEN9_FREQ_SCALER;
- consts->max_gpu_freq /= GEN9_FREQ_SCALER;
- }
+ consts->min_gpu_freq = intel_rps_get_min_raw_freq(rps);
+ consts->max_gpu_freq = intel_rps_get_max_raw_freq(rps);
return true;
}
@@ -131,6 +124,12 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
return;
/*
+ * Although this is unlikely on any platform during initialization,
+ * let's ensure we don't get accidentally into infinite loop
+ */
+ if (consts.max_gpu_freq <= consts.min_gpu_freq)
+ return;
+ /*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 2c35324b5f68..933648cc90ff 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -638,9 +638,9 @@ static int emit_copy(struct i915_request *rq,
return 0;
}
-static int scatter_list_length(struct scatterlist *sg)
+static u64 scatter_list_length(struct scatterlist *sg)
{
- int len = 0;
+ u64 len = 0;
while (sg && sg_dma_len(sg)) {
len += sg_dma_len(sg);
@@ -650,28 +650,26 @@ static int scatter_list_length(struct scatterlist *sg)
return len;
}
-static void
+static int
calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
- int *src_sz, u32 bytes_to_cpy, u32 ccs_bytes_to_cpy)
+ u64 bytes_to_cpy, u64 ccs_bytes_to_cpy)
{
- if (ccs_bytes_to_cpy) {
- if (!src_is_lmem)
- /*
- * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
- * will be taken for the blt. in Flat-ccs supported
- * platform Smem obj will have more pages than required
- * for main meory hence limit it to the required size
- * for main memory
- */
- *src_sz = min_t(int, bytes_to_cpy, CHUNK_SZ);
- } else { /* ccs handling is not required */
- *src_sz = CHUNK_SZ;
- }
+ if (ccs_bytes_to_cpy && !src_is_lmem)
+ /*
+ * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
+ * will be taken for the blt. in Flat-ccs supported
+ * platform Smem obj will have more pages than required
+ * for main meory hence limit it to the required size
+ * for main memory
+ */
+ return min_t(u64, bytes_to_cpy, CHUNK_SZ);
+ else
+ return CHUNK_SZ;
}
-static void get_ccs_sg_sgt(struct sgt_dma *it, u32 bytes_to_cpy)
+static void get_ccs_sg_sgt(struct sgt_dma *it, u64 bytes_to_cpy)
{
- u32 len;
+ u64 len;
do {
GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
@@ -702,13 +700,13 @@ intel_context_migrate_copy(struct intel_context *ce,
{
struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
struct drm_i915_private *i915 = ce->engine->i915;
- u32 ccs_bytes_to_cpy = 0, bytes_to_cpy;
+ u64 ccs_bytes_to_cpy = 0, bytes_to_cpy;
enum i915_cache_level ccs_cache_level;
u32 src_offset, dst_offset;
u8 src_access, dst_access;
struct i915_request *rq;
- int src_sz, dst_sz;
- bool ccs_is_src;
+ u64 src_sz, dst_sz;
+ bool ccs_is_src, overwrite_ccs;
int err;
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
@@ -749,6 +747,8 @@ intel_context_migrate_copy(struct intel_context *ce,
get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
}
+ overwrite_ccs = HAS_FLAT_CCS(i915) && !ccs_bytes_to_cpy && dst_is_lmem;
+
src_offset = 0;
dst_offset = CHUNK_SZ;
if (HAS_64K_PAGES(ce->engine->i915)) {
@@ -788,8 +788,8 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
- calculate_chunk_sz(i915, src_is_lmem, &src_sz,
- bytes_to_cpy, ccs_bytes_to_cpy);
+ src_sz = calculate_chunk_sz(i915, src_is_lmem,
+ bytes_to_cpy, ccs_bytes_to_cpy);
len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,
src_offset, src_sz);
@@ -852,6 +852,25 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
ccs_bytes_to_cpy -= ccs_sz;
+ } else if (overwrite_ccs) {
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ /*
+ * While we can't always restore/manage the CCS state,
+ * we still need to ensure we don't leak the CCS state
+ * from the previous user, so make sure we overwrite it
+ * with something.
+ */
+ err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
+ dst_offset, DIRECT_ACCESS, len);
+ if (err)
+ goto out_rq;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
}
/* Arbitration is re-enabled between requests. */
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index d8b94d638559..6ee8d1127016 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -206,8 +206,12 @@ void ppgtt_bind_vma(struct i915_address_space *vm,
void ppgtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res)
{
- if (vma_res->allocated)
- vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ if (!vma_res->allocated)
+ return;
+
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ if (vma_res->tlb)
+ vma_invalidate_tlb(vm, vma_res->tlb);
}
static unsigned long pd_count(u64 size, int shift)
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 6e90032e12e9..aa6aed837194 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -15,6 +15,7 @@
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
+#ifdef CONFIG_64BIT
static void _release_bars(struct pci_dev *pdev)
{
int resno;
@@ -111,6 +112,9 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
pci_assign_unassigned_bus_resources(pdev->bus);
pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
}
+#else
+static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) {}
+#endif
static int
region_lmem_release(struct intel_memory_region *mem)
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index fb3f57ee450b..7bb967034679 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -2126,6 +2126,31 @@ u32 intel_rps_get_max_frequency(struct intel_rps *rps)
return intel_gpu_freq(rps, rps->max_freq_softlimit);
}
+/**
+ * intel_rps_get_max_raw_freq - returns the max frequency in some raw format.
+ * @rps: the intel_rps structure
+ *
+ * Returns the max frequency in a raw format. In newer platforms raw is in
+ * units of 50 MHz.
+ */
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+ u32 freq;
+
+ if (rps_uses_slpc(rps)) {
+ return DIV_ROUND_CLOSEST(slpc->rp0_freq,
+ GT_FREQUENCY_MULTIPLIER);
+ } else {
+ freq = rps->max_freq;
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+ /* Convert GT frequency to 50 MHz units */
+ freq /= GEN9_FREQ_SCALER;
+ }
+ return freq;
+ }
+}
+
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
@@ -2214,6 +2239,31 @@ u32 intel_rps_get_min_frequency(struct intel_rps *rps)
return intel_gpu_freq(rps, rps->min_freq_softlimit);
}
+/**
+ * intel_rps_get_min_raw_freq - returns the min frequency in some raw format.
+ * @rps: the intel_rps structure
+ *
+ * Returns the min frequency in a raw format. In newer platforms raw is in
+ * units of 50 MHz.
+ */
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+ u32 freq;
+
+ if (rps_uses_slpc(rps)) {
+ return DIV_ROUND_CLOSEST(slpc->min_freq,
+ GT_FREQUENCY_MULTIPLIER);
+ } else {
+ freq = rps->min_freq;
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+ /* Convert GT frequency to 50 MHz units */
+ freq /= GEN9_FREQ_SCALER;
+ }
+ return freq;
+ }
+}
+
static int set_min_freq(struct intel_rps *rps, u32 val)
{
int ret = 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 1e8d56491308..4509dfdc52e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -37,8 +37,10 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
u32 intel_rps_get_min_frequency(struct intel_rps *rps);
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps);
int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val);
u32 intel_rps_get_max_frequency(struct intel_rps *rps);
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps);
int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val);
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps);
u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 76916aed897a..3e91f44829e9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1438,7 +1438,12 @@ void intel_guc_busyness_park(struct intel_gt *gt)
if (!guc_submission_initialized(guc))
return;
- cancel_delayed_work(&guc->timestamp.work);
+ /*
+ * There is a race with suspend flow where the worker runs after suspend
+ * and causes an unclaimed register access warning. Cancel the worker
+ * synchronously here.
+ */
+ cancel_delayed_work_sync(&guc->timestamp.work);
/*
* Before parking, we should sample engine busyness stats if we need to.
@@ -4027,6 +4032,13 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
xa_destroy(&guc->context_lookup);
/*
+ * A reset might have occurred while we had a pending stalled request,
+ * so make sure we clean that up.
+ */
+ guc->stalled_request = NULL;
+ guc->submission_stall_reason = STALL_NONE;
+
+ /*
* Some contexts might have been pinned before we enabled GuC
* submission, so we need to add them to the GuC bookeeping.
* Also, after a reset the of the GuC we want to make sure that the
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 557f3314291a..3b81a6d35a7b 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -298,7 +298,7 @@ no_enough_resource:
}
/**
- * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
+ * intel_vgpu_free_resource() - free HW resource owned by a vGPU
* @vgpu: a vGPU
*
* This function is used to free the HW resource owned by a vGPU.
@@ -328,7 +328,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
}
/**
- * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
+ * intel_vgpu_alloc_resource() - allocate HW resource for a vGPU
* @vgpu: vGPU
* @param: vGPU creation params
*
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index b4f69364f9a1..ce0eb03709c3 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2341,7 +2341,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
* update the entry in this situation p2m will fail
- * settting the shadow entry to point to a scratch page
+ * setting the shadow entry to point to a scratch page
*/
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
} else
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index aee1a45da74b..705689e64011 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -226,7 +226,6 @@ struct intel_vgpu {
unsigned long nr_cache_entries;
struct mutex cache_lock;
- struct notifier_block iommu_notifier;
atomic_t released;
struct kvm_page_track_notifier_node track_node;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index beea5895e499..61423da36710 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -905,7 +905,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_IMR_TO_PIPE(offset);
else {
- gvt_vgpu_err("Unsupport registers %x\n", offset);
+ gvt_vgpu_err("Unsupported registers %x\n", offset);
return -EINVAL;
}
@@ -3052,7 +3052,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
}
/**
- * intel_t_default_mmio_write - default MMIO write handler
+ * intel_vgpu_default_mmio_write() - default MMIO write handler
* @vgpu: a vGPU
* @offset: access offset
* @p_data: write data buffer
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index e2f6c56ab342..e3cd58946477 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -231,57 +231,38 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
- struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- int total_pages;
- int npage;
- int ret;
-
- total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
-
- for (npage = 0; npage < total_pages; npage++) {
- unsigned long cur_gfn = gfn + npage;
-
- ret = vfio_unpin_pages(&vgpu->vfio_device, &cur_gfn, 1);
- drm_WARN_ON(&i915->drm, ret != 1);
- }
+ vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT,
+ DIV_ROUND_UP(size, PAGE_SIZE));
}
/* Pin a normal or compound guest page for dma. */
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, struct page **page)
{
- unsigned long base_pfn = 0;
- int total_pages;
+ int total_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ struct page *base_page = NULL;
int npage;
int ret;
- total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
/*
* We pin the pages one-by-one to avoid allocating a big arrary
* on stack to hold pfns.
*/
for (npage = 0; npage < total_pages; npage++) {
- unsigned long cur_gfn = gfn + npage;
- unsigned long pfn;
+ dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT;
+ struct page *cur_page;
- ret = vfio_pin_pages(&vgpu->vfio_device, &cur_gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
+ ret = vfio_pin_pages(&vgpu->vfio_device, cur_iova, 1,
+ IOMMU_READ | IOMMU_WRITE, &cur_page);
if (ret != 1) {
- gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
- cur_gfn, ret);
- goto err;
- }
-
- if (!pfn_valid(pfn)) {
- gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
- npage++;
- ret = -EFAULT;
+ gvt_vgpu_err("vfio_pin_pages failed for iova %pad, ret %d\n",
+ &cur_iova, ret);
goto err;
}
if (npage == 0)
- base_pfn = pfn;
- else if (base_pfn + npage != pfn) {
+ base_page = cur_page;
+ else if (base_page + npage != cur_page) {
gvt_vgpu_err("The pages are not continuous\n");
ret = -EINVAL;
npage++;
@@ -289,7 +270,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
}
}
- *page = pfn_to_page(base_pfn);
+ *page = base_page;
return 0;
err:
gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
@@ -729,34 +710,25 @@ int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num)
return ret;
}
-static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
+static void intel_vgpu_dma_unmap(struct vfio_device *vfio_dev, u64 iova,
+ u64 length)
{
- struct intel_vgpu *vgpu =
- container_of(nb, struct intel_vgpu, iommu_notifier);
-
- if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
- struct vfio_iommu_type1_dma_unmap *unmap = data;
- struct gvt_dma *entry;
- unsigned long iov_pfn, end_iov_pfn;
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
+ struct gvt_dma *entry;
+ u64 iov_pfn = iova >> PAGE_SHIFT;
+ u64 end_iov_pfn = iov_pfn + length / PAGE_SIZE;
- iov_pfn = unmap->iova >> PAGE_SHIFT;
- end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
+ mutex_lock(&vgpu->cache_lock);
+ for (; iov_pfn < end_iov_pfn; iov_pfn++) {
+ entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
+ if (!entry)
+ continue;
- mutex_lock(&vgpu->cache_lock);
- for (; iov_pfn < end_iov_pfn; iov_pfn++) {
- entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
- if (!entry)
- continue;
-
- gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
- entry->size);
- __gvt_cache_remove_entry(vgpu, entry);
- }
- mutex_unlock(&vgpu->cache_lock);
+ gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
+ entry->size);
+ __gvt_cache_remove_entry(vgpu, entry);
}
-
- return NOTIFY_OK;
+ mutex_unlock(&vgpu->cache_lock);
}
static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
@@ -783,36 +755,20 @@ out:
static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- unsigned long events;
- int ret;
-
- vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
-
- events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(vfio_dev, VFIO_IOMMU_NOTIFY, &events,
- &vgpu->iommu_notifier);
- if (ret != 0) {
- gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
- ret);
- goto out;
- }
- ret = -EEXIST;
if (vgpu->attached)
- goto undo_iommu;
+ return -EEXIST;
- ret = -ESRCH;
if (!vgpu->vfio_device.kvm ||
vgpu->vfio_device.kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
- goto undo_iommu;
+ return -ESRCH;
}
kvm_get_kvm(vgpu->vfio_device.kvm);
- ret = -EEXIST;
if (__kvmgt_vgpu_exist(vgpu))
- goto undo_iommu;
+ return -EEXIST;
vgpu->attached = true;
@@ -831,12 +787,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
atomic_set(&vgpu->released, 0);
return 0;
-
-undo_iommu:
- vfio_unregister_notifier(vfio_dev, VFIO_IOMMU_NOTIFY,
- &vgpu->iommu_notifier);
-out:
- return ret;
}
static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
@@ -853,8 +803,6 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- int ret;
if (!vgpu->attached)
return;
@@ -864,11 +812,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
intel_gvt_release_vgpu(vgpu);
- ret = vfio_unregister_notifier(&vgpu->vfio_device, VFIO_IOMMU_NOTIFY,
- &vgpu->iommu_notifier);
- drm_WARN(&i915->drm, ret,
- "vfio_unregister_notifier for iommu failed: %d\n", ret);
-
debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
@@ -1610,6 +1553,7 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = {
.write = intel_vgpu_write,
.mmap = intel_vgpu_mmap,
.ioctl = intel_vgpu_ioctl,
+ .dma_unmap = intel_vgpu_dma_unmap,
};
static int intel_vgpu_probe(struct mdev_device *mdev)
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index c85bafe7539e..1c6e941c9666 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -546,7 +546,7 @@ static void switch_mmio(struct intel_vgpu *pre,
}
/**
- * intel_gvt_switch_render_mmio - switch mmio context of specific engine
+ * intel_gvt_switch_mmio - switch mmio context of specific engine
* @pre: the last vGPU that own the engine
* @next: the vGPU to switch to
* @engine: the engine
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d25647be25d1..086bbe8945d6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -247,7 +247,7 @@ struct i915_gem_mm {
* List of objects which are pending destruction.
*/
struct llist_head free_list;
- struct delayed_work free_work;
+ struct work_struct free_work;
/**
* Count of objects pending destructions. Used to skip needlessly
* waiting on an RCU barrier if no objects are waiting to be freed.
@@ -1378,7 +1378,7 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
* armed the work again.
*/
while (atomic_read(&i915->mm.free_count)) {
- flush_delayed_work(&i915->mm.free_work);
+ flush_work(&i915->mm.free_work);
flush_delayed_work(&i915->bdev.wq);
rcu_barrier();
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 702e5b89be22..b605d0ceaefa 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1191,7 +1191,8 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
- i915_gem_drain_freed_objects(dev_priv);
+ /* Flush any outstanding work, including i915_gem_context.release_work. */
+ i915_gem_drain_workqueue(dev_priv);
drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3168d7007e10..135d04c2d41c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1857,14 +1857,14 @@
#define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8)
#define GT0_PERF_LIMIT_REASONS_MASK 0xde3
-#define PROCHOT_MASK REG_BIT(1)
-#define THERMAL_LIMIT_MASK REG_BIT(2)
-#define RATL_MASK REG_BIT(6)
-#define VR_THERMALERT_MASK REG_BIT(7)
-#define VR_TDC_MASK REG_BIT(8)
-#define POWER_LIMIT_4_MASK REG_BIT(9)
-#define POWER_LIMIT_1_MASK REG_BIT(11)
-#define POWER_LIMIT_2_MASK REG_BIT(12)
+#define PROCHOT_MASK REG_BIT(0)
+#define THERMAL_LIMIT_MASK REG_BIT(1)
+#define RATL_MASK REG_BIT(5)
+#define VR_THERMALERT_MASK REG_BIT(6)
+#define VR_TDC_MASK REG_BIT(7)
+#define POWER_LIMIT_4_MASK REG_BIT(8)
+#define POWER_LIMIT_1_MASK REG_BIT(10)
+#define POWER_LIMIT_2_MASK REG_BIT(11)
#define CHV_CLK_CTL1 _MMIO(0x101100)
#define VLV_CLK_CTL2 _MMIO(0x101104)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ef3b04c7e153..373582cfd8f3 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -538,8 +538,6 @@ int i915_vma_bind(struct i915_vma *vma,
bind_flags);
}
- set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
-
atomic_or(bind_flags, &vma->flags);
return 0;
}
@@ -1310,6 +1308,19 @@ err_unpin:
return err;
}
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
+{
+ /*
+ * Before we release the pages that were bound by this vma, we
+ * must invalidate all the TLBs that may still have a reference
+ * back to our physical address. It only needs to be done once,
+ * so after updating the PTE to point away from the pages, record
+ * the most recent TLB invalidation seqno, and if we have not yet
+ * flushed the TLBs upon release, perform a full invalidation.
+ */
+ WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
+}
+
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
/* We allocate under vma_get_pages, so beware the shrinker */
@@ -1871,12 +1882,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
enum dma_resv_usage usage;
int idx;
- obj->read_domains = 0;
if (flags & EXEC_OBJECT_WRITE) {
usage = DMA_RESV_USAGE_WRITE;
obj->write_domain = I915_GEM_DOMAIN_RENDER;
+ obj->read_domains = 0;
} else {
usage = DMA_RESV_USAGE_READ;
+ obj->write_domain = 0;
}
dma_fence_array_for_each(curr, idx, fence)
@@ -1941,7 +1953,12 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
vma->vm->skip_pte_rewrite;
trace_i915_vma_unbind(vma);
- unbind_fence = i915_vma_resource_unbind(vma_res);
+ if (async)
+ unbind_fence = i915_vma_resource_unbind(vma_res,
+ &vma->obj->mm.tlb);
+ else
+ unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
+
vma->resource = NULL;
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
@@ -1949,10 +1966,13 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
i915_vma_detach(vma);
- if (!async && unbind_fence) {
- dma_fence_wait(unbind_fence, false);
- dma_fence_put(unbind_fence);
- unbind_fence = NULL;
+ if (!async) {
+ if (unbind_fence) {
+ dma_fence_wait(unbind_fence, false);
+ dma_fence_put(unbind_fence);
+ unbind_fence = NULL;
+ }
+ vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
}
/*
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 88ca0bd9c900..33a58f605d75 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -213,6 +213,7 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb);
struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
index 27c55027387a..5a67995ea5fe 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.c
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -223,10 +223,13 @@ i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
* Return: A refcounted pointer to a dma-fence that signals when unbinding is
* complete.
*/
-struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res)
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
+ u32 *tlb)
{
struct i915_address_space *vm = vma_res->vm;
+ vma_res->tlb = tlb;
+
/* Reference for the sw fence */
i915_vma_resource_get(vma_res);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.h b/drivers/gpu/drm/i915/i915_vma_resource.h
index 5d8427caa2ba..06923d1816e7 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.h
+++ b/drivers/gpu/drm/i915/i915_vma_resource.h
@@ -67,6 +67,7 @@ struct i915_page_sizes {
* taken when the unbind is scheduled.
* @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
* needs to be skipped for unbind.
+ * @tlb: pointer for obj->mm.tlb, if async unbind. Otherwise, NULL
*
* The lifetime of a struct i915_vma_resource is from a binding request to
* the actual possible asynchronous unbind has completed.
@@ -119,6 +120,8 @@ struct i915_vma_resource {
bool immediate_unbind:1;
bool needs_wakeref:1;
bool skip_pte_rewrite:1;
+
+ u32 *tlb;
};
bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
@@ -131,7 +134,8 @@ struct i915_vma_resource *i915_vma_resource_alloc(void);
void i915_vma_resource_free(struct i915_vma_resource *vma_res);
-struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res);
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
+ u32 *tlb);
void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 157e166672d7..5595639d0033 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -1076,7 +1076,8 @@ static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(GEN8_HDC_CHICKEN1);
MMIO_D(GEN9_WM_CHICKEN3);
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
MMIO_D(GAMT_CHKN_BIT_REG);
if (!IS_BROXTON(dev_priv))
MMIO_D(GEN9_CTX_PREEMPT_REG);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f06babdb3a8c..9fe4b583cc28 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6561,7 +6561,10 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
enum plane_id plane_id;
u8 slices;
- skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
+ memset(&crtc_state->wm.skl.optimal, 0,
+ sizeof(crtc_state->wm.skl.optimal));
+ if (crtc_state->hw.active)
+ skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
@@ -6572,6 +6575,9 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ if (!crtc_state->hw.active)
+ continue;
+
skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
plane_id, ddb, ddb_y);
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index 9b84df34a6a1..8cf3352d8858 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -142,8 +142,6 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
drm_kms_helper_poll_init(drm);
- drm_bridge_connector_enable_hpd(kms->connector);
-
ret = drm_dev_register(drm, 0);
if (ret)
goto cleanup_crtc;
diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c
index 8989e215dfc9..011be7ff51e1 100644
--- a/drivers/gpu/drm/lima/lima_devfreq.c
+++ b/drivers/gpu/drm/lima/lima_devfreq.c
@@ -111,6 +111,12 @@ int lima_devfreq_init(struct lima_device *ldev)
struct dev_pm_opp *opp;
unsigned long cur_freq;
int ret;
+ const char *regulator_names[] = { "mali", NULL };
+ const char *clk_names[] = { "core", NULL };
+ struct dev_pm_opp_config config = {
+ .regulator_names = regulator_names,
+ .clk_names = clk_names,
+ };
if (!device_property_present(dev, "operating-points-v2"))
/* Optional, continue without devfreq */
@@ -118,11 +124,7 @@ int lima_devfreq_init(struct lima_device *ldev)
spin_lock_init(&ldevfreq->lock);
- ret = devm_pm_opp_set_clkname(dev, "core");
- if (ret)
- return ret;
-
- ret = devm_pm_opp_set_regulators(dev, (const char *[]){ "mali" }, 1);
+ ret = devm_pm_opp_set_config(dev, &config);
if (ret) {
/* Continue if the optional regulator is missing */
if (ret != -ENODEV)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 2d72cc5ddaba..6b6d5335c834 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -157,7 +157,7 @@ static void mtk_dither_config(struct device *dev, unsigned int w,
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs,
DISP_REG_DITHER_CFG);
mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG,
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 9cc406e1eee1..3b7d13028fb6 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -685,6 +685,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
if (--dsi->refcount != 0)
return;
+ /*
+ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
+ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
+ * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
+ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
+ * after dsi is fully set.
+ */
+ mtk_dsi_stop(dsi);
+
+ mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
mtk_dsi_reset_engine(dsi);
mtk_dsi_lane0_ulp_mode_enter(dsi);
mtk_dsi_clk_ulp_mode_enter(dsi);
@@ -735,17 +745,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
if (!dsi->enabled)
return;
- /*
- * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
- * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
- * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
- * after dsi is fully set.
- */
- mtk_dsi_stop(dsi);
-
- mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
-
dsi->enabled = false;
}
@@ -808,10 +807,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
.attach = mtk_dsi_bridge_attach,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_disable = mtk_dsi_bridge_atomic_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_enable = mtk_dsi_bridge_atomic_enable,
.atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
.atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.mode_set = mtk_dsi_bridge_mode_set,
};
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 1b70938cfd2c..bd4ca11d3ff5 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -115,8 +115,11 @@ static bool meson_vpu_has_available_connectors(struct device *dev)
for_each_endpoint_of_node(dev->of_node, ep) {
/* If the endpoint node exists, consider it enabled */
remote = of_graph_get_remote_port(ep);
- if (remote)
+ if (remote) {
+ of_node_put(remote);
+ of_node_put(ep);
return true;
+ }
}
return false;
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index b9ac932af8d0..03acc68abf2c 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -170,7 +170,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
/* Enable OSD and BLK0, set max global alpha */
priv->viu.osd1_ctrl_stat = OSD_ENABLE |
- (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
+ (0x100 << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
priv->viu.osd1_ctrl_stat2 = readl(priv->io_base +
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 259f3e6bec90..d4b907889a21 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -94,7 +94,7 @@ static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
- writel((m[11] & 0x1fff) << 16,
+ writel((m[11] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
@@ -469,17 +469,17 @@ void meson_viu_init(struct meson_drm *priv)
priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
- writel_relaxed(VIU_OSD_BLEND_REORDER(0, 1) |
- VIU_OSD_BLEND_REORDER(1, 0) |
- VIU_OSD_BLEND_REORDER(2, 0) |
- VIU_OSD_BLEND_REORDER(3, 0) |
- VIU_OSD_BLEND_DIN_EN(1) |
- VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
- VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
- VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
- VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
- VIU_OSD_BLEND_HOLD_LINES(4),
- priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
+ u32 val = (u32)VIU_OSD_BLEND_REORDER(0, 1) |
+ (u32)VIU_OSD_BLEND_REORDER(1, 0) |
+ (u32)VIU_OSD_BLEND_REORDER(2, 0) |
+ (u32)VIU_OSD_BLEND_REORDER(3, 0) |
+ (u32)VIU_OSD_BLEND_DIN_EN(1) |
+ (u32)VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
+ (u32)VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
+ (u32)VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
+ (u32)VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
+ (u32)VIU_OSD_BLEND_HOLD_LINES(4);
+ writel_relaxed(val, priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 251a1bb648cc..a222bf76804f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -262,7 +262,11 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- drm_fbdev_generic_setup(dev, 0);
+ /*
+ * FIXME: A 24-bit color depth does not work with 24 bpp on
+ * G200ER. Force 32 bpp.
+ */
+ drm_fbdev_generic_setup(dev, 32);
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index c682d4e02d1b..52a626117f70 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2061,6 +2061,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ if (phys_enc->hw_intf)
+ intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->hw_wb)
+ intf_cfg.wb = phys_enc->hw_wb->idx;
+
if (phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index ab6aa13b1639..013ca02e17cb 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1214,7 +1214,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
if (ret)
return ret;
- dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
+ dp_ctrl_train_pattern_set(ctrl, pattern);
for (tries = 0; tries <= maximum_retries; tries++) {
drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 2c23324a2296..72c018e26f47 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -109,7 +109,7 @@ static const char * const dsi_8996_bus_clk_names[] = {
static const struct msm_dsi_config msm8996_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 2,
+ .num = 3,
.regs = {
{"vdda", 18160, 1 }, /* 1.25 V */
{"vcca", 17000, 32 }, /* 0.925 V */
@@ -148,7 +148,7 @@ static const char * const dsi_sdm660_bus_clk_names[] = {
static const struct msm_dsi_config sdm660_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 2,
+ .num = 1,
.regs = {
{"vdda", 12560, 4 }, /* 1.2 V */
},
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index a39de3bdc7fa..56dfa2d24be1 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -347,7 +347,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
} else {
timing->shared_timings.clk_pre =
linear_inter(tmax, tmin, pcnt2, 0, false);
- timing->shared_timings.clk_pre_inc_by_2 = 0;
+ timing->shared_timings.clk_pre_inc_by_2 = 0;
}
timing->ta_go = 3;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 1ed4cd09dbf8..16884db272de 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -469,6 +469,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
}
}
+ drm_helper_move_panel_connectors_to_head(ddev);
+
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 6e39d959b9f0..0317055e3253 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -221,7 +221,7 @@ void msm_gem_shrinker_init(struct drm_device *dev)
priv->shrinker.count_objects = msm_gem_shrinker_count;
priv->shrinker.scan_objects = msm_gem_shrinker_scan;
priv->shrinker.seeks = DEFAULT_SEEKS;
- WARN_ON(register_shrinker(&priv->shrinker));
+ WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index d1f70426f554..85c443a37e4e 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -213,6 +213,8 @@ void msm_devfreq_init(struct msm_gpu *gpu)
if (IS_ERR(df->devfreq)) {
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ dev_pm_qos_remove_request(&df->idle_freq);
+ dev_pm_qos_remove_request(&df->boost_freq);
df->devfreq = NULL;
return;
}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index a92ffde53f0b..db2f847c8535 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -196,6 +196,9 @@ static int rd_open(struct inode *inode, struct file *file)
file->private_data = rd;
rd->open = true;
+ /* Reset fifo to clear any previously unread data: */
+ rd->fifo.head = rd->fifo.tail = 0;
+
/* the parsing tools need to know gpu-id to know which
* register database to load.
*
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 05076e530e7d..e29175e4b44c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -820,6 +820,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
+ /* TODO: figure out a better solution here
+ *
+ * wait on the fence here explicitly as going through
+ * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
+ *
+ * Without this the operation can timeout and we'll fallback to a
+ * software copy, which might take several minutes to finish.
+ */
+ nouveau_fence_wait(fence, false, false);
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
evict, false,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 568182e68dd7..d8cf71fb0512 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2605,6 +2605,27 @@ nv172_chipset = {
};
static const struct nvkm_device_chip
+nv173_chipset = {
+ .name = "GA103",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
nv174_chipset = {
.name = "GA104",
.bar = { 0x00000001, tu102_bar_new },
@@ -3067,6 +3088,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x167: device->chip = &nv167_chipset; break;
case 0x168: device->chip = &nv168_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
+ case 0x173: device->chip = &nv173_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index cdb154c8b866..b75c690bb0cc 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1295,7 +1295,8 @@ static const struct panel_desc innolux_n116bca_ea1 = {
},
.delay = {
.hpd_absent = 200,
- .prepare_to_enable = 80,
+ .enable = 80,
+ .disable = 50,
.unprepare = 500,
},
};
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index ff5e1a44c43a..1e716c23019a 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2257,7 +2257,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
.enable = 200,
.disable = 20,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 194af7f607a6..fe5f12f16a63 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -101,8 +101,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
return 0;
}
- ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names,
- pfdev->comp->num_supplies);
+ ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names);
if (ret) {
/* Continue if the optional regulator is missing */
if (ret != -ENODEV) {
@@ -132,6 +131,17 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
return PTR_ERR(opp);
panfrost_devfreq_profile.initial_freq = cur_freq;
+
+ /*
+ * Set the recommend OPP this will enable and configure the regulator
+ * if any and will avoid a switch off by regulator_late_cleanup()
+ */
+ ret = dev_pm_opp_set_opp(dev, opp);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
+ return ret;
+ }
+
dev_pm_opp_put(opp);
/*
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 2d870cf73b07..2fa5afe21288 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -626,24 +626,29 @@ static int panfrost_remove(struct platform_device *pdev)
return 0;
}
-static const char * const default_supplies[] = { "mali" };
+/*
+ * The OPP core wants the supply names to be NULL terminated, but we need the
+ * correct num_supplies value for regulator core. Hence, we NULL terminate here
+ * and then initialize num_supplies with ARRAY_SIZE - 1.
+ */
+static const char * const default_supplies[] = { "mali", NULL };
static const struct panfrost_compatible default_data = {
- .num_supplies = ARRAY_SIZE(default_supplies),
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
.num_pm_domains = 1, /* optional */
.pm_domain_names = NULL,
};
static const struct panfrost_compatible amlogic_data = {
- .num_supplies = ARRAY_SIZE(default_supplies),
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
.vendor_quirk = panfrost_gpu_amlogic_quirk,
};
-static const char * const mediatek_mt8183_supplies[] = { "mali", "sram" };
+static const char * const mediatek_mt8183_supplies[] = { "mali", "sram", NULL };
static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
static const struct panfrost_compatible mediatek_mt8183_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies),
+ .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies) - 1,
.supply_names = mediatek_mt8183_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
.pm_domain_names = mediatek_mt8183_pm_domains,
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 77e7cb6d1ae3..bf0170782f25 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -103,7 +103,7 @@ void panfrost_gem_shrinker_init(struct drm_device *dev)
pfdev->shrinker.count_objects = panfrost_gem_shrinker_count;
pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan;
pfdev->shrinker.seeks = DEFAULT_SEEKS;
- WARN_ON(register_shrinker(&pfdev->shrinker));
+ WARN_ON(register_shrinker(&pfdev->shrinker, "drm-panfrost"));
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 2b12389f841a..ee0165687239 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1605,6 +1605,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
if (r) {
/* delay GPU reset to resume */
radeon_fence_driver_force_completion(rdev, i);
+ } else {
+ /* finish executing delayed work */
+ flush_delayed_work(&rdev->fence_drv[i].lockup_work);
}
}
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index c204e9b95c1f..518ee13b1d6f 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -283,8 +283,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+cdn_dp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
struct drm_display_info *display_info = &dp->connector.display_info;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index e4631f515ba4..f9aa8b96c695 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -1439,11 +1439,15 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_HDMI |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_EDP0:
die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_EDP |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_MIPI0:
die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 61a034a01764..cb82622877d2 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1176,12 +1176,12 @@ static int hdmi_audio_hw_params(struct device *dev,
DRM_DEBUG_DRIVER("\n");
if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv ||
- daifmt->frame_clk_inv || daifmt->bit_clk_master ||
- daifmt->frame_clk_master) {
+ daifmt->frame_clk_inv || daifmt->bit_clk_provider ||
+ daifmt->frame_clk_provider) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
+ daifmt->bit_clk_provider,
+ daifmt->frame_clk_provider);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index b4dfa166eccd..34234a144e87 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -531,7 +531,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
struct mipi_dsi_device *device = dsi->device;
- unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+ int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0;
u32 basic_ctl = 0;
size_t bytes;
@@ -555,7 +555,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* (4 bytes). Its minimal size is therefore 10 bytes
*/
#define HSA_PACKET_OVERHEAD 10
- hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+ hsa = max(HSA_PACKET_OVERHEAD,
(mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
/*
@@ -564,7 +564,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* therefore 6 bytes
*/
#define HBP_PACKET_OVERHEAD 6
- hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+ hbp = max(HBP_PACKET_OVERHEAD,
(mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
/*
@@ -574,7 +574,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* 16 bytes
*/
#define HFP_PACKET_OVERHEAD 16
- hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+ hfp = max(HFP_PACKET_OVERHEAD,
(mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
/*
@@ -583,7 +583,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* bytes). Its minimal size is therefore 10 bytes.
*/
#define HBLK_PACKET_OVERHEAD 10
- hblk = max((unsigned int)HBLK_PACKET_OVERHEAD,
+ hblk = max(HBLK_PACKET_OVERHEAD,
(mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp -
HBLK_PACKET_OVERHEAD);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0e210df65c30..97184c333526 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -912,7 +912,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* We might need to add a TTM.
*/
- if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+ if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
ret = ttm_tt_create(bo, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1cbfb00c1d65..911141d16e95 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -236,16 +236,19 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
- if (fbo->base.resource) {
- ttm_resource_set_bo(fbo->base.resource, &fbo->base);
- bo->resource = NULL;
- }
-
dma_resv_init(&fbo->base.base._resv);
fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
+ if (fbo->base.resource) {
+ ttm_resource_set_bo(fbo->base.resource, &fbo->base);
+ bo->resource = NULL;
+ ttm_bo_set_bulk_move(&fbo->base, NULL);
+ } else {
+ fbo->base.bulk_move = NULL;
+ }
+
ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
if (ret) {
kfree(fbo);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 1bba0a0ed3f9..21b61631f73a 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -722,7 +722,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
mm_shrinker.count_objects = ttm_pool_shrinker_count;
mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
mm_shrinker.seeks = 1;
- return register_shrinker(&mm_shrinker);
+ return register_shrinker(&mm_shrinker, "drm-ttm_pool");
}
/**
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 061be9a6619d..b0f3117102ca 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -8,6 +8,7 @@ config DRM_VC4
depends on DRM
depends on SND && SND_SOC
depends on COMMON_CLK
+ depends on PM
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 6ab83296b0e4..1e5f68704d7d 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -2003,6 +2003,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = {
.name = "vc4-hdmi-cpu-dai-component",
+ .legacy_dai_naming = 1,
};
static int vc4_hdmi_audio_cpu_dai_probe(struct snd_soc_dai *dai)
@@ -2854,7 +2855,7 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
-static int __maybe_unused vc4_hdmi_runtime_suspend(struct device *dev)
+static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
@@ -2971,17 +2972,15 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->disable_4kp60 = true;
}
+ pm_runtime_enable(dev);
+
/*
- * We need to have the device powered up at this point to call
- * our reset hook and for the CEC init.
+ * We need to have the device powered up at this point to call
+ * our reset hook and for the CEC init.
*/
- ret = vc4_hdmi_runtime_resume(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret)
- goto err_put_ddc;
-
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ goto err_disable_runtime_pm;
if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
@@ -3027,6 +3026,7 @@ err_destroy_conn:
err_destroy_encoder:
drm_encoder_cleanup(encoder);
pm_runtime_put_sync(dev);
+err_disable_runtime_pm:
pm_runtime_disable(dev);
err_put_ddc:
put_device(&vc4_hdmi->ddc->dev);
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 4b90c86ee5f8..47774b9ab3de 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -288,11 +288,29 @@ int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
return 0;
}
+static const struct dmi_system_id dmi_nodevs[] = {
+ {
+ /*
+ * Google Chromebooks use Chrome OS Embedded Controller Sensor
+ * Hub instead of Sensor Hub Fusion and leaves MP2
+ * uninitialized, which disables all functionalities, even
+ * including the registers necessary for feature detections.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ },
+ },
+ { }
+};
+
static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct amd_mp2_dev *privdata;
int rc;
+ if (dmi_first_match(dmi_nodevs))
+ return -ENODEV;
+
privdata = devm_kzalloc(&pdev->dev, sizeof(*privdata), GFP_KERNEL);
if (!privdata)
return -ENOMEM;
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 08c9a9a60ae4..b59c3dafa6a4 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -1212,6 +1212,13 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc = new_rdesc;
}
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD &&
+ *rsize == 331 && rdesc[190] == 0x85 && rdesc[191] == 0x5a &&
+ rdesc[204] == 0x95 && rdesc[205] == 0x05) {
+ hid_info(hdev, "Fixing up Asus N-KEY keyb report descriptor\n");
+ rdesc[205] = 0x01;
+ }
+
return rdesc;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0fb720a96399..f80d6193fca6 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -185,6 +185,8 @@
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 0x029c
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 0x029a
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021 0x029f
+#define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102
+#define USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY 0x8302
#define USB_VENDOR_ID_ASUS 0x0486
#define USB_DEVICE_ID_ASUS_T91MT 0x0185
@@ -414,6 +416,7 @@
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
#define I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN 0x2A1C
+#define I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN 0x279F
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 48c1c02c69f4..859aeb07542e 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -383,6 +383,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
@@ -1532,7 +1534,10 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
* assume ours
*/
if (!report->tool)
- hid_report_set_tool(report, input, usage->code);
+ report->tool = usage->code;
+
+ /* drivers may have changed the value behind our back, resend it */
+ hid_report_set_tool(report, input, report->tool);
} else {
hid_report_release_tool(report, input, usage->code);
}
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index 92ac4f605f13..6028af3c3aae 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -1221,6 +1221,7 @@ static void joycon_parse_report(struct joycon_ctlr *ctlr,
spin_lock_irqsave(&ctlr->lock, flags);
if (IS_ENABLED(CONFIG_NINTENDO_FF) && rep->vibrator_report &&
+ ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED &&
(msecs - ctlr->rumble_msecs) >= JC_RUMBLE_PERIOD_MS &&
(ctlr->rumble_queue_head != ctlr->rumble_queue_tail ||
ctlr->rumble_zero_countdown > 0)) {
@@ -1545,12 +1546,13 @@ static int joycon_set_rumble(struct joycon_ctlr *ctlr, u16 amp_r, u16 amp_l,
ctlr->rumble_queue_head = 0;
memcpy(ctlr->rumble_data[ctlr->rumble_queue_head], data,
JC_RUMBLE_DATA_SIZE);
- spin_unlock_irqrestore(&ctlr->lock, flags);
/* don't wait for the periodic send (reduces latency) */
- if (schedule_now)
+ if (schedule_now && ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED)
queue_work(ctlr->rumble_queue, &ctlr->rumble_worker);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+
return 0;
}
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index dc67717d2dab..70f602c64fd1 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -314,6 +314,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) },
#endif
#if IS_ENABLED(CONFIG_HID_APPLEIR)
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index a3b151b29bd7..fc616db4231b 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam,
int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
+ if (!r) {
+ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
+ return -EINVAL;
+ }
+
if (hid_report_len(r) < 64)
return -EINVAL;
@@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam,
int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
+ if (!r) {
+ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
+ return -EINVAL;
+ }
+
if (hid_report_len(r) < 64)
return -EINVAL;
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index c3e6d69fdfbd..cf1679b0d4fb 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -67,12 +67,13 @@ static const struct tm_wheel_info tm_wheels_infos[] = {
{0x0200, 0x0005, "Thrustmaster T300RS (Missing Attachment)"},
{0x0206, 0x0005, "Thrustmaster T300RS"},
{0x0209, 0x0005, "Thrustmaster T300RS (Open Wheel Attachment)"},
+ {0x020a, 0x0005, "Thrustmaster T300RS (Sparco R383 Mod)"},
{0x0204, 0x0005, "Thrustmaster T300 Ferrari Alcantara Edition"},
{0x0002, 0x0002, "Thrustmaster T500RS"}
//{0x0407, 0x0001, "Thrustmaster TMX"}
};
-static const uint8_t tm_wheels_infos_length = 4;
+static const uint8_t tm_wheels_infos_length = 7;
/*
* This structs contains (in little endian) the response data
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 681614a8302a..197b1e7bf029 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -350,6 +350,8 @@ static int hidraw_release(struct inode * inode, struct file * file)
down_write(&minors_rwsem);
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
+ for (int i = list->tail; i < list->head; i++)
+ kfree(list->buffer[i].value);
list_del(&list->node);
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
kfree(list);
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index e600dbf04dfc..fc108f19a64c 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -32,6 +32,7 @@
#define ADL_P_DEVICE_ID 0x51FC
#define ADL_N_DEVICE_ID 0x54FC
#define RPL_S_DEVICE_ID 0x7A78
+#define MTL_P_DEVICE_ID 0x7E45
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 2c67ec17bec6..7120b30ac51d 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -43,6 +43,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_P_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_N_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, RPL_S_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MTL_P_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
index 6a5cc11aefd8..35dddc5015b3 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -105,7 +105,7 @@ struct report_list {
* @multi_packet_cnt: Count of fragmented packet count
*
* This structure is used to store completion flags and per client data like
- * like report description, number of HID devices etc.
+ * report description, number of HID devices etc.
*/
struct ishtp_cl_data {
/* completion flags */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 405e0d5212cc..df0a825694f5 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -626,13 +626,14 @@ static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
}
/**
- * ipc_tx_callback() - IPC tx callback function
+ * ipc_tx_send() - IPC tx send function
* @prm: Pointer to client device instance
*
- * Send message over IPC either first time or on callback on previous message
- * completion
+ * Send message over IPC. Message will be split into fragments
+ * if message size is bigger than IPC FIFO size, and all
+ * fragments will be sent one by one.
*/
-static void ipc_tx_callback(void *prm)
+static void ipc_tx_send(void *prm)
{
struct ishtp_cl *cl = prm;
struct ishtp_cl_tx_ring *cl_msg;
@@ -677,32 +678,41 @@ static void ipc_tx_callback(void *prm)
list);
rem = cl_msg->send_buf.size - cl->tx_offs;
- ishtp_hdr.host_addr = cl->host_client_id;
- ishtp_hdr.fw_addr = cl->fw_client_id;
- ishtp_hdr.reserved = 0;
- pmsg = cl_msg->send_buf.data + cl->tx_offs;
+ while (rem > 0) {
+ ishtp_hdr.host_addr = cl->host_client_id;
+ ishtp_hdr.fw_addr = cl->fw_client_id;
+ ishtp_hdr.reserved = 0;
+ pmsg = cl_msg->send_buf.data + cl->tx_offs;
+
+ if (rem <= dev->mtu) {
+ /* Last fragment or only one packet */
+ ishtp_hdr.length = rem;
+ ishtp_hdr.msg_complete = 1;
+ /* Submit to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs = 0;
+ cl->sending = 0;
- if (rem <= dev->mtu) {
- ishtp_hdr.length = rem;
- ishtp_hdr.msg_complete = 1;
- cl->sending = 0;
- list_del_init(&cl_msg->list); /* Must be before write */
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
- /* Submit to IPC queue with no callback */
- ishtp_write_message(dev, &ishtp_hdr, pmsg);
- spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
- list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
- ++cl->tx_ring_free_size;
- spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
- tx_free_flags);
- } else {
- /* Send IPC fragment */
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
- cl->tx_offs += dev->mtu;
- ishtp_hdr.length = dev->mtu;
- ishtp_hdr.msg_complete = 0;
- ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
+ break;
+ } else {
+ /* Send ipc fragment */
+ ishtp_hdr.length = dev->mtu;
+ ishtp_hdr.msg_complete = 0;
+ /* All fregments submitted to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs += dev->mtu;
+ rem = cl_msg->send_buf.size - cl->tx_offs;
+ }
}
+
+ list_del_init(&cl_msg->list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
}
/**
@@ -720,7 +730,7 @@ static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
return;
cl->tx_offs = 0;
- ipc_tx_callback(cl);
+ ipc_tx_send(cl);
++cl->send_msg_cnt_ipc;
}
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 6218bbf6863a..eca7afd366d6 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -171,6 +171,14 @@ int vmbus_connect(void)
goto cleanup;
}
+ vmbus_connection.rescind_work_queue =
+ create_workqueue("hv_vmbus_rescind");
+ if (!vmbus_connection.rescind_work_queue) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ vmbus_connection.ignore_any_offer_msg = false;
+
vmbus_connection.handle_primary_chan_wq =
create_workqueue("hv_pri_chan");
if (!vmbus_connection.handle_primary_chan_wq) {
@@ -357,6 +365,9 @@ void vmbus_disconnect(void)
if (vmbus_connection.handle_primary_chan_wq)
destroy_workqueue(vmbus_connection.handle_primary_chan_wq);
+ if (vmbus_connection.rescind_work_queue)
+ destroy_workqueue(vmbus_connection.rescind_work_queue);
+
if (vmbus_connection.work_queue)
destroy_workqueue(vmbus_connection.work_queue);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 91e8a72eee14..fdf6decacf06 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/mman.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -248,7 +249,7 @@ struct dm_capabilities_resp_msg {
* num_committed: Committed memory in pages.
* page_file_size: The accumulated size of all page files
* in the system in pages.
- * zero_free: The nunber of zero and free pages.
+ * zero_free: The number of zero and free pages.
* page_file_writes: The writes to the page file in pages.
* io_diff: An indicator of file cache efficiency or page file activity,
* calculated as File Cache Page Fault Count - Page Read Count.
@@ -567,6 +568,11 @@ struct hv_dynmem_device {
__u32 version;
struct page_reporting_dev_info pr_dev_info;
+
+ /*
+ * Maximum number of pages that can be hot_add-ed
+ */
+ __u64 max_dynamic_page_count;
};
static struct hv_dynmem_device dm_device;
@@ -1078,6 +1084,7 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
pr_info("Max. dynamic memory size: %llu MB\n",
(*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT));
+ dm->max_dynamic_page_count = *max_page_count;
}
break;
@@ -1117,6 +1124,19 @@ static unsigned long compute_balloon_floor(void)
}
/*
+ * Compute total committed memory pages
+ */
+
+static unsigned long get_pages_committed(struct hv_dynmem_device *dm)
+{
+ return vm_memory_committed() +
+ dm->num_pages_ballooned +
+ (dm->num_pages_added > dm->num_pages_onlined ?
+ dm->num_pages_added - dm->num_pages_onlined : 0) +
+ compute_balloon_floor();
+}
+
+/*
* Post our status as it relates memory pressure to the
* host. Host expects the guests to post this status
* periodically at 1 second intervals.
@@ -1157,11 +1177,7 @@ static void post_status(struct hv_dynmem_device *dm)
* asking us to balloon them out.
*/
num_pages_avail = si_mem_available();
- num_pages_committed = vm_memory_committed() +
- dm->num_pages_ballooned +
- (dm->num_pages_added > dm->num_pages_onlined ?
- dm->num_pages_added - dm->num_pages_onlined : 0) +
- compute_balloon_floor();
+ num_pages_committed = get_pages_committed(dm);
trace_balloon_status(num_pages_avail, num_pages_committed,
vm_memory_committed(), dm->num_pages_ballooned,
@@ -1807,6 +1823,109 @@ out:
return ret;
}
+/*
+ * DEBUGFS Interface
+ */
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * hv_balloon_debug_show - shows statistics of balloon operations.
+ * @f: pointer to the &struct seq_file.
+ * @offset: ignored.
+ *
+ * Provides the statistics that can be accessed in hv-balloon in the debugfs.
+ *
+ * Return: zero on success or an error code.
+ */
+static int hv_balloon_debug_show(struct seq_file *f, void *offset)
+{
+ struct hv_dynmem_device *dm = f->private;
+ char *sname;
+
+ seq_printf(f, "%-22s: %u.%u\n", "host_version",
+ DYNMEM_MAJOR_VERSION(dm->version),
+ DYNMEM_MINOR_VERSION(dm->version));
+
+ seq_printf(f, "%-22s:", "capabilities");
+ if (ballooning_enabled())
+ seq_puts(f, " enabled");
+
+ if (hot_add_enabled())
+ seq_puts(f, " hot_add");
+
+ seq_puts(f, "\n");
+
+ seq_printf(f, "%-22s: %u", "state", dm->state);
+ switch (dm->state) {
+ case DM_INITIALIZING:
+ sname = "Initializing";
+ break;
+ case DM_INITIALIZED:
+ sname = "Initialized";
+ break;
+ case DM_BALLOON_UP:
+ sname = "Balloon Up";
+ break;
+ case DM_BALLOON_DOWN:
+ sname = "Balloon Down";
+ break;
+ case DM_HOT_ADD:
+ sname = "Hot Add";
+ break;
+ case DM_INIT_ERROR:
+ sname = "Error";
+ break;
+ default:
+ sname = "Unknown";
+ }
+ seq_printf(f, " (%s)\n", sname);
+
+ /* HV Page Size */
+ seq_printf(f, "%-22s: %ld\n", "page_size", HV_HYP_PAGE_SIZE);
+
+ /* Pages added with hot_add */
+ seq_printf(f, "%-22s: %u\n", "pages_added", dm->num_pages_added);
+
+ /* pages that are "onlined"/used from pages_added */
+ seq_printf(f, "%-22s: %u\n", "pages_onlined", dm->num_pages_onlined);
+
+ /* pages we have given back to host */
+ seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned);
+
+ seq_printf(f, "%-22s: %lu\n", "total_pages_committed",
+ get_pages_committed(dm));
+
+ seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count",
+ dm->max_dynamic_page_count);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hv_balloon_debug);
+
+static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
+{
+ debugfs_create_file("hv-balloon", 0444, NULL, b,
+ &hv_balloon_debug_fops);
+}
+
+static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
+{
+ debugfs_remove(debugfs_lookup("hv-balloon", NULL));
+}
+
+#else
+
+static inline void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
+{
+}
+
+static inline void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
static int balloon_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
@@ -1854,6 +1973,8 @@ static int balloon_probe(struct hv_device *dev,
goto probe_error;
}
+ hv_balloon_debugfs_init(&dm_device);
+
return 0;
probe_error:
@@ -1879,6 +2000,8 @@ static int balloon_remove(struct hv_device *dev)
if (dm->num_pages_ballooned != 0)
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
+ hv_balloon_debugfs_exit(dm);
+
cancel_work_sync(&dm->balloon_wrk.wrk);
cancel_work_sync(&dm->ha_wrk.wrk);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 660036da7449..922d83eb7ddf 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -129,7 +129,7 @@ static void fcopy_send_data(struct work_struct *dummy)
/*
* The strings sent from the host are encoded in
- * in utf16; convert it to utf8 strings.
+ * utf16; convert it to utf8 strings.
* The host assures us that the utf16 strings will not exceed
* the max lengths specified. We will however, reserve room
* for the string terminating character - in the utf16s_utf8s()
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 4f5b824b16cf..dc673edf053c 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -261,6 +261,13 @@ struct vmbus_connection {
struct workqueue_struct *work_queue;
struct workqueue_struct *handle_primary_chan_wq;
struct workqueue_struct *handle_sub_chan_wq;
+ struct workqueue_struct *rescind_work_queue;
+
+ /*
+ * On suspension of the vmbus, the accumulated offer messages
+ * must be dropped.
+ */
+ bool ignore_any_offer_msg;
/*
* The number of sub-channels and hv_sock channels that should be
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 547ae334e5cd..3c833ea60db6 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
#include <linux/dma-map-ops.h>
+#include <linux/pci.h>
#include <clocksource/hyperv_timer.h>
#include "hyperv_vmbus.h"
@@ -1160,7 +1161,9 @@ void vmbus_on_msg_dpc(unsigned long data)
* work queue: the RESCIND handler can not start to
* run before the OFFER handler finishes.
*/
- schedule_work(&ctx->work);
+ if (vmbus_connection.ignore_any_offer_msg)
+ break;
+ queue_work(vmbus_connection.rescind_work_queue, &ctx->work);
break;
case CHANNELMSG_OFFERCHANNEL:
@@ -1186,6 +1189,8 @@ void vmbus_on_msg_dpc(unsigned long data)
* to the CPUs which will execute the offer & rescind
* works by the time these works will start execution.
*/
+ if (vmbus_connection.ignore_any_offer_msg)
+ break;
atomic_inc(&vmbus_connection.offer_in_progress);
fallthrough;
@@ -2258,26 +2263,43 @@ static int vmbus_acpi_remove(struct acpi_device *device)
static void vmbus_reserve_fb(void)
{
- int size;
+ resource_size_t start = 0, size;
+ struct pci_dev *pdev;
+
+ if (efi_enabled(EFI_BOOT)) {
+ /* Gen2 VM: get FB base from EFI framebuffer */
+ start = screen_info.lfb_base;
+ size = max_t(__u32, screen_info.lfb_size, 0x800000);
+ } else {
+ /* Gen1 VM: get FB base from PCI */
+ pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+ PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+ if (!pdev)
+ return;
+
+ if (pdev->resource[0].flags & IORESOURCE_MEM) {
+ start = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+ }
+
+ /*
+ * Release the PCI device so hyperv_drm or hyperv_fb driver can
+ * grab it later.
+ */
+ pci_dev_put(pdev);
+ }
+
+ if (!start)
+ return;
+
/*
* Make a claim for the frame buffer in the resource tree under the
* first node, which will be the one below 4GB. The length seems to
* be underreported, particularly in a Generation 1 VM. So start out
* reserving a larger area and make it smaller until it succeeds.
*/
-
- if (screen_info.lfb_base) {
- if (efi_enabled(EFI_BOOT))
- size = max_t(__u32, screen_info.lfb_size, 0x800000);
- else
- size = max_t(__u32, screen_info.lfb_size, 0x4000000);
-
- for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
- fb_mmio = __request_region(hyperv_mmio,
- screen_info.lfb_base, size,
- fb_mmio_name, 0);
- }
- }
+ for (; !fb_mmio && (size >= 0x100000); size >>= 1)
+ fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0);
}
/**
@@ -2309,7 +2331,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
bool fb_overlap_ok)
{
struct resource *iter, *shadow;
- resource_size_t range_min, range_max, start;
+ resource_size_t range_min, range_max, start, end;
const char *dev_n = dev_name(&device_obj->device);
int retval;
@@ -2344,6 +2366,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
range_max = iter->end;
start = (range_min + align - 1) & ~(align - 1);
for (; start + size - 1 <= range_max; start += align) {
+ end = start + size - 1;
+
+ /* Skip the whole fb_mmio region if not fb_overlap_ok */
+ if (!fb_overlap_ok && fb_mmio &&
+ (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
+ ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
+ continue;
+
shadow = __request_region(iter, start, size, NULL,
IORESOURCE_BUSY);
if (!shadow)
@@ -2446,15 +2476,20 @@ acpi_walk_err:
#ifdef CONFIG_PM_SLEEP
static int vmbus_bus_suspend(struct device *dev)
{
+ struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(
+ hv_context.cpu_context, VMBUS_CONNECT_CPU);
struct vmbus_channel *channel, *sc;
- while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
- /*
- * We wait here until the completion of any channel
- * offers that are currently in progress.
- */
- usleep_range(1000, 2000);
- }
+ tasklet_disable(&hv_cpu->msg_dpc);
+ vmbus_connection.ignore_any_offer_msg = true;
+ /* The tasklet_enable() takes care of providing a memory barrier */
+ tasklet_enable(&hv_cpu->msg_dpc);
+
+ /* Drain all the workqueues as we are in suspend */
+ drain_workqueue(vmbus_connection.rescind_work_queue);
+ drain_workqueue(vmbus_connection.work_queue);
+ drain_workqueue(vmbus_connection.handle_primary_chan_wq);
+ drain_workqueue(vmbus_connection.handle_sub_chan_wq);
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
@@ -2531,6 +2566,8 @@ static int vmbus_bus_resume(struct device *dev)
size_t msgsize;
int ret;
+ vmbus_connection.ignore_any_offer_msg = false;
+
/*
* We only use the 'vmbus_proto_version', which was in use before
* hibernation, to re-negotiate with the host.
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 61a4684fc020..81e688975c6a 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -266,9 +266,7 @@ static const struct ec_sensor_info sensors_family_intel_600[] = {
#define SENSOR_SET_WATER_BLOCK \
(SENSOR_TEMP_WATER_BLOCK_IN | SENSOR_TEMP_WATER_BLOCK_OUT)
-
struct ec_board_info {
- const char *board_names[MAX_IDENTICAL_BOARD_VARIATIONS];
unsigned long sensors;
/*
* Defines which mutex to use for guarding access to the state and the
@@ -281,152 +279,194 @@ struct ec_board_info {
enum board_family family;
};
-static const struct ec_board_info board_info[] = {
- {
- .board_names = {"PRIME X470-PRO"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CPU_OPT |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
- .family = family_amd_400_series,
- },
- {
- .board_names = {"PRIME X570-PRO"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ProArt X570-CREATOR WIFI"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- },
- {
- .board_names = {"Pro WS X570-ACE"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG CROSSHAIR VIII DARK HERO"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {
- "ROG CROSSHAIR VIII FORMULA",
- "ROG CROSSHAIR VIII HERO",
- "ROG CROSSHAIR VIII HERO (WI-FI)",
- },
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
- SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {
- "ROG MAXIMUS XI HERO",
- "ROG MAXIMUS XI HERO (WI-FI)",
- },
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_intel_300_series,
- },
- {
- .board_names = {"ROG CROSSHAIR VIII IMPACT"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX B550-E GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CPU_OPT,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX B550-I GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-E GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-E GAMING WIFI II"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-F GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-I GAMING"},
- .sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_FAN_VRM_HS | SENSOR_FAN_CHIPSET |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX Z690-A GAMING WIFI D4"},
- .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
- .family = family_intel_600_series,
- },
- {
- .board_names = {"ROG ZENITH II EXTREME"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | SENSOR_FAN_VRM_HS |
- SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE |
- SENSOR_SET_WATER_BLOCK |
- SENSOR_TEMP_T_SENSOR_2 | SENSOR_TEMP_SENSOR_EXTRA_1 |
- SENSOR_TEMP_SENSOR_EXTRA_2 | SENSOR_TEMP_SENSOR_EXTRA_3,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
- .family = family_amd_500_series,
- },
- {}
+static const struct ec_board_info board_info_prime_x470_pro = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CPU_OPT |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_400_series,
+};
+
+static const struct ec_board_info board_info_prime_x570_pro = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_pro_ws_x570_ace = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_dark_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_maximus_xi_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_intel_300_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_impact = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_b550_e_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_b550_i_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_e_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_e_gaming_wifi_ii = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_f_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_i_gaming = {
+ .sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_FAN_VRM_HS | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_z690_a_gaming_wifi_d4 = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_intel_600_series,
+};
+
+static const struct ec_board_info board_info_zenith_ii_extreme = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | SENSOR_FAN_VRM_HS |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE |
+ SENSOR_SET_WATER_BLOCK |
+ SENSOR_TEMP_T_SENSOR_2 | SENSOR_TEMP_SENSOR_EXTRA_1 |
+ SENSOR_TEMP_SENSOR_EXTRA_2 | SENSOR_TEMP_SENSOR_EXTRA_3,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+ .family = family_amd_500_series,
+};
+
+#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, board_info) \
+ { \
+ .matches = { \
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, \
+ "ASUSTeK COMPUTER INC."), \
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
+ }, \
+ .driver_data = (void *)board_info, \
+ }
+
+static const struct dmi_system_id dmi_table[] = {
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO",
+ &board_info_prime_x470_pro),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO",
+ &board_info_prime_x570_pro),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X570-CREATOR WIFI",
+ &board_info_pro_art_x570_creator_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE",
+ &board_info_pro_ws_x570_ace),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII DARK HERO",
+ &board_info_crosshair_viii_dark_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII FORMULA",
+ &board_info_crosshair_viii_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO",
+ &board_info_crosshair_viii_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO (WI-FI)",
+ &board_info_crosshair_viii_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO",
+ &board_info_maximus_xi_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO (WI-FI)",
+ &board_info_maximus_xi_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT",
+ &board_info_crosshair_viii_impact),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING",
+ &board_info_strix_b550_e_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-I GAMING",
+ &board_info_strix_b550_i_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING",
+ &board_info_strix_x570_e_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING WIFI II",
+ &board_info_strix_x570_e_gaming_wifi_ii),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-F GAMING",
+ &board_info_strix_x570_f_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-I GAMING",
+ &board_info_strix_x570_i_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z690-A GAMING WIFI D4",
+ &board_info_strix_z690_a_gaming_wifi_d4),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME",
+ &board_info_zenith_ii_extreme),
+ {},
};
struct ec_sensor {
@@ -537,12 +577,12 @@ static int find_ec_sensor_index(const struct ec_sensors_data *ec,
return -ENOENT;
}
-static int __init bank_compare(const void *a, const void *b)
+static int bank_compare(const void *a, const void *b)
{
return *((const s8 *)a) - *((const s8 *)b);
}
-static void __init setup_sensor_data(struct ec_sensors_data *ec)
+static void setup_sensor_data(struct ec_sensors_data *ec)
{
struct ec_sensor *s = ec->sensors;
bool bank_found;
@@ -574,7 +614,7 @@ static void __init setup_sensor_data(struct ec_sensors_data *ec)
sort(ec->banks, ec->nr_banks, 1, bank_compare, NULL);
}
-static void __init fill_ec_registers(struct ec_sensors_data *ec)
+static void fill_ec_registers(struct ec_sensors_data *ec)
{
const struct ec_sensor_info *si;
unsigned int i, j, register_idx = 0;
@@ -589,7 +629,7 @@ static void __init fill_ec_registers(struct ec_sensors_data *ec)
}
}
-static int __init setup_lock_data(struct device *dev)
+static int setup_lock_data(struct device *dev)
{
const char *mutex_path;
int status;
@@ -812,7 +852,7 @@ static umode_t asus_ec_hwmon_is_visible(const void *drvdata,
return find_ec_sensor_index(state, type, channel) >= 0 ? S_IRUGO : 0;
}
-static int __init
+static int
asus_ec_hwmon_add_chan_info(struct hwmon_channel_info *asus_ec_hwmon_chan,
struct device *dev, int num,
enum hwmon_sensor_types type, u32 config)
@@ -841,27 +881,15 @@ static struct hwmon_chip_info asus_ec_chip_info = {
.ops = &asus_ec_hwmon_ops,
};
-static const struct ec_board_info * __init get_board_info(void)
+static const struct ec_board_info *get_board_info(void)
{
- const char *dmi_board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
- const char *dmi_board_name = dmi_get_system_info(DMI_BOARD_NAME);
- const struct ec_board_info *board;
-
- if (!dmi_board_vendor || !dmi_board_name ||
- strcasecmp(dmi_board_vendor, "ASUSTeK COMPUTER INC."))
- return NULL;
-
- for (board = board_info; board->sensors; board++) {
- if (match_string(board->board_names,
- MAX_IDENTICAL_BOARD_VARIATIONS,
- dmi_board_name) >= 0)
- return board;
- }
+ const struct dmi_system_id *dmi_entry;
- return NULL;
+ dmi_entry = dmi_first_match(dmi_table);
+ return dmi_entry ? dmi_entry->driver_data : NULL;
}
-static int __init asus_ec_probe(struct platform_device *pdev)
+static int asus_ec_probe(struct platform_device *pdev)
{
const struct hwmon_channel_info **ptr_asus_ec_ci;
int nr_count[hwmon_max] = { 0 }, nr_types = 0;
@@ -970,29 +998,37 @@ static int __init asus_ec_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(hwdev);
}
-
-static const struct acpi_device_id acpi_ec_ids[] = {
- /* Embedded Controller Device */
- { "PNP0C09", 0 },
- {}
-};
+MODULE_DEVICE_TABLE(dmi, dmi_table);
static struct platform_driver asus_ec_sensors_platform_driver = {
.driver = {
.name = "asus-ec-sensors",
- .acpi_match_table = acpi_ec_ids,
},
+ .probe = asus_ec_probe,
};
-MODULE_DEVICE_TABLE(acpi, acpi_ec_ids);
-/*
- * we use module_platform_driver_probe() rather than module_platform_driver()
- * because the probe function (and its dependants) are marked with __init, which
- * means we can't put it into the .probe member of the platform_driver struct
- * above, and we can't mark the asus_ec_sensors_platform_driver object as __init
- * because the object is referenced from the module exit code.
- */
-module_platform_driver_probe(asus_ec_sensors_platform_driver, asus_ec_probe);
+static struct platform_device *asus_ec_sensors_platform_device;
+
+static int __init asus_ec_init(void)
+{
+ asus_ec_sensors_platform_device =
+ platform_create_bundle(&asus_ec_sensors_platform_driver,
+ asus_ec_probe, NULL, 0, NULL, 0);
+
+ if (IS_ERR(asus_ec_sensors_platform_device))
+ return PTR_ERR(asus_ec_sensors_platform_device);
+
+ return 0;
+}
+
+static void __exit asus_ec_exit(void)
+{
+ platform_device_unregister(asus_ec_sensors_platform_device);
+ platform_driver_unregister(&asus_ec_sensors_platform_driver);
+}
+
+module_init(asus_ec_init);
+module_exit(asus_ec_exit);
module_param_named(mutex_path, mutex_path_override, charp, 0);
MODULE_PARM_DESC(mutex_path,
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index befe989ca7b9..fbf3f5a4ecb6 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -391,6 +391,9 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
if (!fan_data)
return -EINVAL;
+ if (state >= fan_data->num_speed)
+ return -EINVAL;
+
set_fan_speed(fan_data, state);
return 0;
}
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 03d07da8c2dc..221de01a327a 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -2321,7 +2321,7 @@ static const char *lm90_detect_nuvoton(struct i2c_client *client, int chip_id,
const char *name = NULL;
if (config2 < 0)
- return ERR_PTR(-ENODEV);
+ return NULL;
if (address == 0x4c && !(config1 & 0x2a) && !(config2 & 0xf8)) {
if (chip_id == 0x01 && convrate <= 0x09) {
diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
index 26278b0f17a9..9259779cc2df 100644
--- a/drivers/hwmon/mr75203.c
+++ b/drivers/hwmon/mr75203.c
@@ -68,8 +68,9 @@
/* VM Individual Macro Register */
#define VM_COM_REG_SIZE 0x200
-#define VM_SDIF_DONE(n) (VM_COM_REG_SIZE + 0x34 + 0x200 * (n))
-#define VM_SDIF_DATA(n) (VM_COM_REG_SIZE + 0x40 + 0x200 * (n))
+#define VM_SDIF_DONE(vm) (VM_COM_REG_SIZE + 0x34 + 0x200 * (vm))
+#define VM_SDIF_DATA(vm, ch) \
+ (VM_COM_REG_SIZE + 0x40 + 0x200 * (vm) + 0x4 * (ch))
/* SDA Slave Register */
#define IP_CTRL 0x00
@@ -115,6 +116,7 @@ struct pvt_device {
u32 t_num;
u32 p_num;
u32 v_num;
+ u32 c_num;
u32 ip_freq;
u8 *vm_idx;
};
@@ -178,14 +180,15 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
{
struct pvt_device *pvt = dev_get_drvdata(dev);
struct regmap *v_map = pvt->v_map;
+ u8 vm_idx, ch_idx;
u32 n, stat;
- u8 vm_idx;
int ret;
- if (channel >= pvt->v_num)
+ if (channel >= pvt->v_num * pvt->c_num)
return -EINVAL;
- vm_idx = pvt->vm_idx[channel];
+ vm_idx = pvt->vm_idx[channel / pvt->c_num];
+ ch_idx = channel % pvt->c_num;
switch (attr) {
case hwmon_in_input:
@@ -196,13 +199,23 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
if (ret)
return ret;
- ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx), &n);
+ ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx, ch_idx), &n);
if(ret < 0)
return ret;
n &= SAMPLE_DATA_MSK;
- /* Convert the N bitstream count into voltage */
- *val = (PVT_N_CONST * n - PVT_R_CONST) >> PVT_CONV_BITS;
+ /*
+ * Convert the N bitstream count into voltage.
+ * To support negative voltage calculation for 64bit machines
+ * n must be cast to long, since n and *val differ both in
+ * signedness and in size.
+ * Division is used instead of right shift, because for signed
+ * numbers, the sign bit is used to fill the vacated bit
+ * positions, and if the number is negative, 1 is used.
+ * BIT(x) may not be used instead of (1 << x) because it's
+ * unsigned.
+ */
+ *val = (PVT_N_CONST * (long)n - PVT_R_CONST) / (1 << PVT_CONV_BITS);
return 0;
default:
@@ -375,6 +388,19 @@ static int pvt_init(struct pvt_device *pvt)
if (ret)
return ret;
+ val = (BIT(pvt->c_num) - 1) | VM_CH_INIT |
+ IP_POLL << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG;
+ ret = regmap_write(v_map, SDIF_W, val);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
+ val, !(val & SDIF_BUSY),
+ PVT_POLL_DELAY_US,
+ PVT_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
val = CFG1_VOL_MEAS_MODE | CFG1_PARALLEL_OUT |
CFG1_14_BIT | IP_CFG << SDIF_ADDR_SFT |
SDIF_WRN_W | SDIF_PROG;
@@ -489,8 +515,8 @@ static int pvt_reset_control_deassert(struct device *dev, struct pvt_device *pvt
static int mr75203_probe(struct platform_device *pdev)
{
+ u32 ts_num, vm_num, pd_num, ch_num, val, index, i;
const struct hwmon_channel_info **pvt_info;
- u32 ts_num, vm_num, pd_num, val, index, i;
struct device *dev = &pdev->dev;
u32 *temp_config, *in_config;
struct device *hwmon_dev;
@@ -531,9 +557,11 @@ static int mr75203_probe(struct platform_device *pdev)
ts_num = (val & TS_NUM_MSK) >> TS_NUM_SFT;
pd_num = (val & PD_NUM_MSK) >> PD_NUM_SFT;
vm_num = (val & VM_NUM_MSK) >> VM_NUM_SFT;
+ ch_num = (val & CH_NUM_MSK) >> CH_NUM_SFT;
pvt->t_num = ts_num;
pvt->p_num = pd_num;
pvt->v_num = vm_num;
+ pvt->c_num = ch_num;
val = 0;
if (ts_num)
val++;
@@ -570,7 +598,7 @@ static int mr75203_probe(struct platform_device *pdev)
}
if (vm_num) {
- u32 num = vm_num;
+ u32 total_ch;
ret = pvt_get_regmap(pdev, "vm", pvt);
if (ret)
@@ -584,30 +612,30 @@ static int mr75203_probe(struct platform_device *pdev)
ret = device_property_read_u8_array(dev, "intel,vm-map",
pvt->vm_idx, vm_num);
if (ret) {
- num = 0;
+ /*
+ * Incase intel,vm-map property is not defined, we
+ * assume incremental channel numbers.
+ */
+ for (i = 0; i < vm_num; i++)
+ pvt->vm_idx[i] = i;
} else {
for (i = 0; i < vm_num; i++)
if (pvt->vm_idx[i] >= vm_num ||
pvt->vm_idx[i] == 0xff) {
- num = i;
+ pvt->v_num = i;
+ vm_num = i;
break;
}
}
- /*
- * Incase intel,vm-map property is not defined, we assume
- * incremental channel numbers.
- */
- for (i = num; i < vm_num; i++)
- pvt->vm_idx[i] = i;
-
- in_config = devm_kcalloc(dev, num + 1,
+ total_ch = ch_num * vm_num;
+ in_config = devm_kcalloc(dev, total_ch + 1,
sizeof(*in_config), GFP_KERNEL);
if (!in_config)
return -ENOMEM;
- memset32(in_config, HWMON_I_INPUT, num);
- in_config[num] = 0;
+ memset32(in_config, HWMON_I_INPUT, total_ch);
+ in_config[total_ch] = 0;
pvt_in.config = in_config;
pvt_info[index++] = &pvt_in;
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index 446964cbae4c..da9ec6983e13 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -1480,7 +1480,7 @@ static int nct6775_update_pwm_limits(struct device *dev)
return 0;
}
-static struct nct6775_data *nct6775_update_device(struct device *dev)
+struct nct6775_data *nct6775_update_device(struct device *dev)
{
struct nct6775_data *data = dev_get_drvdata(dev);
int i, j, err = 0;
@@ -1615,6 +1615,7 @@ out:
mutex_unlock(&data->update_lock);
return err ? ERR_PTR(err) : data;
}
+EXPORT_SYMBOL_GPL(nct6775_update_device);
/*
* Sysfs callback functions
diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
index ab30437221ce..41c97cfacfb8 100644
--- a/drivers/hwmon/nct6775-platform.c
+++ b/drivers/hwmon/nct6775-platform.c
@@ -359,7 +359,7 @@ static int __maybe_unused nct6775_suspend(struct device *dev)
{
int err;
u16 tmp;
- struct nct6775_data *data = dev_get_drvdata(dev);
+ struct nct6775_data *data = nct6775_update_device(dev);
if (IS_ERR(data))
return PTR_ERR(data);
diff --git a/drivers/hwmon/nct6775.h b/drivers/hwmon/nct6775.h
index 93f708148e65..be41848c3cd2 100644
--- a/drivers/hwmon/nct6775.h
+++ b/drivers/hwmon/nct6775.h
@@ -196,6 +196,8 @@ static inline int nct6775_write_value(struct nct6775_data *data, u16 reg, u16 va
return regmap_write(data->regmap, reg, value);
}
+struct nct6775_data *nct6775_update_device(struct device *dev);
+
bool nct6775_reg_is_word_sized(struct nct6775_data *data, u16 reg);
int nct6775_probe(struct device *dev, struct nct6775_data *data,
const struct regmap_config *regmapcfg);
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index f10bac8860fc..81d3f91dd204 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -2861,7 +2861,7 @@ static int pmbus_regulator_get_low_margin(struct i2c_client *client, int page)
.data = -1,
};
- if (!data->vout_low[page]) {
+ if (data->vout_low[page] < 0) {
if (pmbus_check_word_register(client, page, PMBUS_MFR_VOUT_MIN))
s.data = _pmbus_read_word_data(client, page, 0xff,
PMBUS_MFR_VOUT_MIN);
@@ -2887,7 +2887,7 @@ static int pmbus_regulator_get_high_margin(struct i2c_client *client, int page)
.data = -1,
};
- if (!data->vout_high[page]) {
+ if (data->vout_high[page] < 0) {
if (pmbus_check_word_register(client, page, PMBUS_MFR_VOUT_MAX))
s.data = _pmbus_read_word_data(client, page, 0xff,
PMBUS_MFR_VOUT_MAX);
@@ -3016,11 +3016,10 @@ static int pmbus_regulator_register(struct pmbus_data *data)
rdev = devm_regulator_register(dev, &info->reg_desc[i],
&config);
- if (IS_ERR(rdev)) {
- dev_err(dev, "Failed to register %s regulator\n",
- info->reg_desc[i].name);
- return PTR_ERR(rdev);
- }
+ if (IS_ERR(rdev))
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "Failed to register %s regulator\n",
+ info->reg_desc[i].name);
}
return 0;
@@ -3320,6 +3319,7 @@ int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
struct pmbus_data *data;
size_t groups_num = 0;
int ret;
+ int i;
char *name;
if (!info)
@@ -3353,6 +3353,11 @@ int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
data->currpage = -1;
data->currphase = -1;
+ for (i = 0; i < ARRAY_SIZE(data->vout_low); i++) {
+ data->vout_low[i] = -1;
+ data->vout_high[i] = -1;
+ }
+
ret = pmbus_init_common(client, data, info);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
index 42762e87b014..f7c59ff7ae8e 100644
--- a/drivers/hwmon/tps23861.c
+++ b/drivers/hwmon/tps23861.c
@@ -493,18 +493,20 @@ static char *tps23861_port_poe_plus_status(struct tps23861_data *data, int port)
static int tps23861_port_resistance(struct tps23861_data *data, int port)
{
- u16 regval;
+ unsigned int raw_val;
+ __le16 regval;
regmap_bulk_read(data->regmap,
PORT_1_RESISTANCE_LSB + PORT_N_RESISTANCE_LSB_OFFSET * (port - 1),
&regval,
2);
- switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, regval)) {
+ raw_val = le16_to_cpu(regval);
+ switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, raw_val)) {
case PORT_RESISTANCE_RSN_OTHER:
- return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB) / 10000;
+ return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB) / 10000;
case PORT_RESISTANCE_RSN_LOW:
- return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB_LOW) / 10000;
+ return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB_LOW) / 10000;
case PORT_RESISTANCE_RSN_SHORT:
case PORT_RESISTANCE_RSN_OPEN:
default:
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index 33eeff94fc2a..1fb3a2550e29 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -94,11 +94,9 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
* the module SYSSTATUS register
*/
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
goto runtime_err;
- }
/* Determine number of locks */
i = readl(io_base + SYSSTATUS_OFFSET);
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index 364710966665..80ea45b3a815 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -19,6 +19,11 @@
#define QCOM_MUTEX_APPS_PROC_ID 1
#define QCOM_MUTEX_NUM_LOCKS 32
+struct qcom_hwspinlock_of_data {
+ u32 offset;
+ u32 stride;
+};
+
static int qcom_hwspinlock_trylock(struct hwspinlock *lock)
{
struct regmap_field *field = lock->priv;
@@ -63,9 +68,20 @@ static const struct hwspinlock_ops qcom_hwspinlock_ops = {
.unlock = qcom_hwspinlock_unlock,
};
+static const struct qcom_hwspinlock_of_data of_sfpb_mutex = {
+ .offset = 0x4,
+ .stride = 0x4,
+};
+
+/* All modern platform has offset 0 and stride of 4k */
+static const struct qcom_hwspinlock_of_data of_tcsr_mutex = {
+ .offset = 0,
+ .stride = 0x1000,
+};
+
static const struct of_device_id qcom_hwspinlock_of_match[] = {
- { .compatible = "qcom,sfpb-mutex" },
- { .compatible = "qcom,tcsr-mutex" },
+ { .compatible = "qcom,sfpb-mutex", .data = &of_sfpb_mutex },
+ { .compatible = "qcom,tcsr-mutex", .data = &of_tcsr_mutex },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_hwspinlock_of_match);
@@ -112,12 +128,14 @@ static const struct regmap_config tcsr_mutex_config = {
static struct regmap *qcom_hwspinlock_probe_mmio(struct platform_device *pdev,
u32 *offset, u32 *stride)
{
+ const struct qcom_hwspinlock_of_data *data;
struct device *dev = &pdev->dev;
void __iomem *base;
- /* All modern platform has offset 0 and stride of 4k */
- *offset = 0;
- *stride = 0x1000;
+ data = of_device_get_match_data(dev);
+
+ *offset = data->offset;
+ *stride = data->stride;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index cf249ecad5a5..d39660a3e50c 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -98,7 +98,7 @@ u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
}
if (!_relaxed)
- __iormb(res); /* Imitate the !relaxed I/O helpers */
+ __io_ar(res); /* Imitate the !relaxed I/O helpers */
return res;
}
@@ -106,7 +106,7 @@ u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
{
if (!_relaxed)
- __iowmb(); /* Imitate the !relaxed I/O helpers */
+ __io_bw(); /* Imitate the !relaxed I/O helpers */
if (!_64bit)
val &= GENMASK(31, 0);
@@ -130,7 +130,7 @@ static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
}
if (!_relaxed)
- __iormb(res); /* Imitate the !relaxed I/O helpers */
+ __io_ar(res); /* Imitate the !relaxed I/O helpers */
return res;
}
@@ -138,7 +138,7 @@ static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
{
if (!_relaxed)
- __iowmb(); /* Imitate the !relaxed I/O helpers */
+ __io_bw(); /* Imitate the !relaxed I/O helpers */
if (!_64bit)
val &= GENMASK(31, 0);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index a7bfea31f7d8..4b21bb79f168 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -547,14 +547,14 @@
#define etm4x_read32(csa, offset) \
({ \
u32 __val = etm4x_relaxed_read32((csa), (offset)); \
- __iormb(__val); \
+ __io_ar(__val); \
__val; \
})
#define etm4x_read64(csa, offset) \
({ \
u64 __val = etm4x_relaxed_read64((csa), (offset)); \
- __iormb(__val); \
+ __io_ar(__val); \
__val; \
})
@@ -578,13 +578,13 @@
#define etm4x_write32(csa, val, offset) \
do { \
- __iowmb(); \
+ __io_bw(); \
etm4x_relaxed_write32((csa), (val), (offset)); \
} while (0)
#define etm4x_write64(csa, val, offset) \
do { \
- __iowmb(); \
+ __io_bw(); \
etm4x_relaxed_write64((csa), (val), (offset)); \
} while (0)
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index 354cf7e45c4a..50e7f3f670b6 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -447,7 +447,7 @@ static int altr_i2c_probe(struct platform_device *pdev)
mutex_unlock(&idev->isr_mutex);
i2c_set_adapdata(&idev->adapter, idev);
- strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
+ strscpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
idev->adapter.owner = THIS_MODULE;
idev->adapter.algo = &altr_i2c_algo;
idev->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 771e53d3d197..185dedfebbac 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -1022,7 +1022,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
bus->adap.algo = &aspeed_i2c_algo;
bus->adap.dev.parent = &pdev->dev;
bus->adap.dev.of_node = pdev->dev.of_node;
- strlcpy(bus->adap.name, pdev->name, sizeof(bus->adap.name));
+ strscpy(bus->adap.name, pdev->name, sizeof(bus->adap.name));
i2c_set_adapdata(&bus->adap, bus);
bus->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 22aed922552b..99bd24d0e6a5 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -321,7 +321,7 @@ i2c_au1550_probe(struct platform_device *pdev)
priv->adap.algo = &au1550_algo;
priv->adap.algo_data = priv;
priv->adap.dev.parent = &pdev->dev;
- strlcpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name));
/* Now, set up the PSC for SMBus PIO mode. */
i2c_au1550_setup(priv);
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 5294b73beca8..bdf3b50de8ad 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -783,7 +783,7 @@ static int axxia_i2c_probe(struct platform_device *pdev)
}
i2c_set_adapdata(&idev->adapter, idev);
- strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
+ strscpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
idev->adapter.owner = THIS_MODULE;
idev->adapter.algo = &axxia_i2c_algo;
idev->adapter.bus_recovery_info = &axxia_i2c_recovery_info;
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 16bf41f1f086..f3e369f0fd40 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -839,7 +839,7 @@ static int bcm_kona_i2c_probe(struct platform_device *pdev)
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
- strlcpy(adap->name, "Broadcom I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "Broadcom I2C adapter", sizeof(adap->name));
adap->algo = &bcm_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 2ae187e2b642..69383be47905 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -674,7 +674,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
- strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
+ strscpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
adap->algo = &brcmstb_i2c_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index f8639a4457d2..d97c61eec95c 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -245,7 +245,7 @@ static int cbus_i2c_probe(struct platform_device *pdev)
adapter->nr = pdev->id;
adapter->timeout = HZ;
adapter->algo = &cbus_i2c_algo;
- strlcpy(adapter->name, "CBUS I2C adapter", sizeof(adapter->name));
+ strscpy(adapter->name, "CBUS I2C adapter", sizeof(adapter->name));
spin_lock_init(&chost->lock);
chost->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index de15f09c9b47..190abdc46dd3 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -404,7 +404,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
adap->adapter.class = I2C_CLASS_HWMON;
adap->adapter.algo = &cht_wc_i2c_adap_algo;
adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops;
- strlcpy(adap->adapter.name, "PMIC I2C Adapter",
+ strscpy(adap->adapter.name, "PMIC I2C Adapter",
sizeof(adap->adapter.name));
adap->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index 892213d51f43..4e787dc709f9 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -267,7 +267,7 @@ static int ec_i2c_probe(struct platform_device *pdev)
bus->dev = dev;
bus->adap.owner = THIS_MODULE;
- strlcpy(bus->adap.name, "cros-ec-i2c-tunnel", sizeof(bus->adap.name));
+ strscpy(bus->adap.name, "cros-ec-i2c-tunnel", sizeof(bus->adap.name));
bus->adap.algo = &ec_i2c_algorithm;
bus->adap.algo_data = bus;
bus->adap.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 9e09db31a937..471c47db546b 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -845,7 +845,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
- strlcpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name));
adap->algo = &i2c_davinci_algo;
adap->dev.parent = &pdev->dev;
adap->timeout = DAVINCI_I2C_TIMEOUT;
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 60c838c7c454..50925d97fa42 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -322,7 +322,7 @@ static int dc_i2c_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- strlcpy(i2c->adap.name, "Conexant Digicolor I2C adapter",
+ strscpy(i2c->adap.name, "Conexant Digicolor I2C adapter",
sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &dc_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 321b2770feab..4914bfbee2a9 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -773,7 +773,7 @@ static int pch_i2c_probe(struct pci_dev *pdev,
pch_adap->owner = THIS_MODULE;
pch_adap->class = I2C_CLASS_HWMON;
- strlcpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name));
+ strscpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name));
pch_adap->algo = &pch_algorithm;
pch_adap->algo_data = &adap_info->pch_data[i];
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index bdff0e6345d9..f2e537b137b2 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -371,7 +371,7 @@ static int em_i2c_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- strlcpy(priv->adap.name, "EMEV2 I2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "EMEV2 I2C", sizeof(priv->adap.name));
priv->sclk = devm_clk_get(&pdev->dev, "sclk");
if (IS_ERR(priv->sclk))
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index b812d1090c0f..4a6260d04db2 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -802,7 +802,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "clock-frequency", &i2c->op_clock))
i2c->op_clock = I2C_MAX_STANDARD_MODE_FREQ;
- strlcpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &exynos5_i2c_algorithm;
i2c->adap.retries = 3;
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 7a048abbf92b..b1985c1667e1 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -436,7 +436,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
adap->owner = THIS_MODULE;
if (np)
- strlcpy(adap->name, dev_name(dev), sizeof(adap->name));
+ strscpy(adap->name, dev_name(dev), sizeof(adap->name));
else
snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index a2add128d084..4374a8677271 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -402,7 +402,7 @@ static int highlander_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_HWMON;
- strlcpy(adap->name, "HL FPGA I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "HL FPGA I2C adapter", sizeof(adap->name));
adap->algo = &highlander_i2c_algo;
adap->dev.parent = &pdev->dev;
adap->nr = pdev->id;
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 61ae58f57047..0e34cbaca22d 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -423,7 +423,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
}
clk_prepare_enable(priv->clk);
- strlcpy(priv->adap.name, "hix5hd2-i2c", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "hix5hd2-i2c", sizeof(priv->adap.name));
priv->dev = &pdev->dev;
priv->adap.owner = THIS_MODULE;
priv->adap.algo = &hix5hd2_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 81d0da2547bd..a176296f4fff 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1116,7 +1116,7 @@ static void dmi_check_onboard_device(u8 type, const char *name,
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = dmi_devices[i].i2c_addr;
- strlcpy(info.type, dmi_devices[i].i2c_type, I2C_NAME_SIZE);
+ strscpy(info.type, dmi_devices[i].i2c_type, I2C_NAME_SIZE);
i2c_new_client_device(adap, &info);
break;
}
@@ -1267,7 +1267,7 @@ static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = dell_lis3lv02d_devices[i].i2c_addr;
- strlcpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
+ strscpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
i2c_new_client_device(&priv->adapter, &info);
}
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 9f71daf6db64..eeb80e34f9ad 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -738,7 +738,7 @@ static int iic_probe(struct platform_device *ofdev)
adap = &dev->adap;
adap->dev.parent = &ofdev->dev;
adap->dev.of_node = of_node_get(np);
- strlcpy(adap->name, "IBM IIC", sizeof(adap->name));
+ strscpy(adap->name, "IBM IIC", sizeof(adap->name));
i2c_set_adapdata(adap, dev);
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->algo = &iic_algo;
diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c
index 5dae7cab7260..febcb6f01d4d 100644
--- a/drivers/i2c/busses/i2c-icy.c
+++ b/drivers/i2c/busses/i2c-icy.c
@@ -141,7 +141,7 @@ static int icy_probe(struct zorro_dev *z,
i2c->adapter.owner = THIS_MODULE;
/* i2c->adapter.algo assigned by i2c_pcf_add_bus() */
i2c->adapter.algo_data = algo_data;
- strlcpy(i2c->adapter.name, "ICY I2C Zorro adapter",
+ strscpy(i2c->adapter.name, "ICY I2C Zorro adapter",
sizeof(i2c->adapter.name));
if (!devm_request_mem_region(&z->dev,
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 8b9ba055c418..b51ab3cad2b1 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -558,7 +558,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
lpi2c_imx->adapter.algo = &lpi2c_imx_algo;
lpi2c_imx->adapter.dev.parent = &pdev->dev;
lpi2c_imx->adapter.dev.of_node = pdev->dev.of_node;
- strlcpy(lpi2c_imx->adapter.name, pdev->name,
+ strscpy(lpi2c_imx->adapter.name, pdev->name,
sizeof(lpi2c_imx->adapter.name));
lpi2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 78fb1a4274a6..e47fa3465671 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1572,9 +1572,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
int irq, ret;
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(&pdev->dev);
hrtimer_cancel(&i2c_imx->slave_timer);
@@ -1585,17 +1583,21 @@ static int i2c_imx_remove(struct platform_device *pdev)
if (i2c_imx->dma)
i2c_imx_dma_free(i2c_imx);
- /* setup chip registers to defaults */
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ if (ret == 0) {
+ /* setup chip registers to defaults */
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ clk_disable(i2c_imx->clk);
+ }
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
irq = platform_get_irq(pdev, 0);
if (irq >= 0)
free_irq(irq, i2c_imx);
- clk_disable_unprepare(i2c_imx->clk);
+
+ clk_unprepare(i2c_imx->clk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index 5bbb7f0d7852..cf857cf22507 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -303,6 +303,7 @@ static int kempld_i2c_probe(struct platform_device *pdev)
i2c->dev = &pdev->dev;
i2c->adap = kempld_i2c_adapter;
i2c->adap.dev.parent = i2c->dev;
+ ACPI_COMPANION_SET(&i2c->adap.dev, ACPI_COMPANION(&pdev->dev));
i2c_set_adapdata(&i2c->adap, i2c);
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 4e30c5267142..8fff6fbb7065 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -417,7 +417,7 @@ static int i2c_lpc2k_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.owner = THIS_MODULE;
- strlcpy(i2c->adap.name, "LPC2K I2C adapter", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "LPC2K I2C adapter", sizeof(i2c->adap.name));
i2c->adap.algo = &i2c_lpc2k_algorithm;
i2c->adap.dev.parent = &pdev->dev;
i2c->adap.dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 61cc5b2462c6..889eff06b78f 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -502,7 +502,7 @@ static int meson_i2c_probe(struct platform_device *pdev)
return ret;
}
- strlcpy(i2c->adap.name, "Meson I2C adapter",
+ strscpy(i2c->adap.name, "Meson I2C adapter",
sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &meson_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
index 6df0f1c33278..4d7e9b25f018 100644
--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
+++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
@@ -206,7 +206,7 @@ static void mchp_corei2c_empty_rx(struct mchp_corei2c_dev *idev)
idev->msg_len--;
}
- if (idev->msg_len == 0) {
+ if (idev->msg_len <= 1) {
ctrl = readb(idev->base + CORE_I2C_CTRL);
ctrl &= ~CTRL_AA;
writeb(ctrl, idev->base + CORE_I2C_CTRL);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 6c698c10d3cd..81ac92bb4f6f 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -239,6 +239,7 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
u32 *real_clk)
{
+ struct fwnode_handle *fwnode = of_fwnode_handle(node);
const struct mpc_i2c_divider *div = NULL;
unsigned int pvr = mfspr(SPRN_PVR);
u32 divider;
@@ -246,12 +247,12 @@ static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
if (clock == MPC_I2C_CLOCK_LEGACY) {
/* see below - default fdr = 0x3f -> div = 2048 */
- *real_clk = mpc5xxx_get_bus_frequency(node) / 2048;
+ *real_clk = mpc5xxx_fwnode_get_bus_frequency(fwnode) / 2048;
return -EINVAL;
}
/* Determine divider value */
- divider = mpc5xxx_get_bus_frequency(node) / clock;
+ divider = mpc5xxx_fwnode_get_bus_frequency(fwnode) / clock;
/*
* We want to choose an FDR/DFSR that generates an I2C bus speed that
@@ -266,7 +267,7 @@ static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
break;
}
- *real_clk = mpc5xxx_get_bus_frequency(node) / div->divider;
+ *real_clk = mpc5xxx_fwnode_get_bus_frequency(fwnode) / div->divider;
return (int)div->fdr;
}
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 8e6985354fd5..fc7bfd98156b 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -229,6 +229,35 @@ static const u16 mt_i2c_regs_v2[] = {
[OFFSET_DCM_EN] = 0xf88,
};
+static const u16 mt_i2c_regs_v3[] = {
+ [OFFSET_DATA_PORT] = 0x0,
+ [OFFSET_INTR_MASK] = 0x8,
+ [OFFSET_INTR_STAT] = 0xc,
+ [OFFSET_CONTROL] = 0x10,
+ [OFFSET_TRANSFER_LEN] = 0x14,
+ [OFFSET_TRANSAC_LEN] = 0x18,
+ [OFFSET_DELAY_LEN] = 0x1c,
+ [OFFSET_TIMING] = 0x20,
+ [OFFSET_START] = 0x24,
+ [OFFSET_EXT_CONF] = 0x28,
+ [OFFSET_LTIMING] = 0x2c,
+ [OFFSET_HS] = 0x30,
+ [OFFSET_IO_CONFIG] = 0x34,
+ [OFFSET_FIFO_ADDR_CLR] = 0x38,
+ [OFFSET_SDA_TIMING] = 0x3c,
+ [OFFSET_TRANSFER_LEN_AUX] = 0x44,
+ [OFFSET_CLOCK_DIV] = 0x48,
+ [OFFSET_SOFTRESET] = 0x50,
+ [OFFSET_MULTI_DMA] = 0x8c,
+ [OFFSET_SCL_MIS_COMP_POINT] = 0x90,
+ [OFFSET_SLAVE_ADDR] = 0x94,
+ [OFFSET_DEBUGSTAT] = 0xe4,
+ [OFFSET_DEBUGCTRL] = 0xe8,
+ [OFFSET_FIFO_STAT] = 0xf4,
+ [OFFSET_FIFO_THRESH] = 0xf8,
+ [OFFSET_DCM_EN] = 0xf88,
+};
+
struct mtk_i2c_compatible {
const struct i2c_adapter_quirks *quirks;
const u16 *regs;
@@ -442,6 +471,19 @@ static const struct mtk_i2c_compatible mt8186_compat = {
.max_dma_support = 36,
};
+static const struct mtk_i2c_compatible mt8188_compat = {
+ .regs = mt_i2c_regs_v3,
+ .pmic_i2c = 0,
+ .dcm = 0,
+ .auto_restart = 1,
+ .aux_len_reg = 1,
+ .timing_adjust = 1,
+ .dma_sync = 0,
+ .ltiming_adjust = 1,
+ .apdma_sync = 1,
+ .max_dma_support = 36,
+};
+
static const struct mtk_i2c_compatible mt8192_compat = {
.quirks = &mt8183_i2c_quirks,
.regs = mt_i2c_regs_v2,
@@ -465,6 +507,7 @@ static const struct of_device_id mtk_i2c_of_match[] = {
{ .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat },
{ .compatible = "mediatek,mt8183-i2c", .data = &mt8183_compat },
{ .compatible = "mediatek,mt8186-i2c", .data = &mt8186_compat },
+ { .compatible = "mediatek,mt8188-i2c", .data = &mt8188_compat },
{ .compatible = "mediatek,mt8192-i2c", .data = &mt8192_compat },
{}
};
@@ -1389,7 +1432,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
speed_clk = I2C_MT65XX_CLK_MAIN;
}
- strlcpy(i2c->adap.name, I2C_DRV_NAME, sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, I2C_DRV_NAME, sizeof(i2c->adap.name));
ret = mtk_i2c_set_speed(i2c, clk_get_rate(i2c->clocks[speed_clk].clk));
if (ret) {
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index cfe6de8175dd..20eda5738ac4 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -312,7 +312,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
adap->dev.parent = &pdev->dev;
i2c_set_adapdata(adap, i2c);
adap->dev.of_node = pdev->dev.of_node;
- strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
+ strscpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 103a05ecc3d6..047dfef7a657 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -989,7 +989,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
if (IS_ERR(drv_data->reg_base))
return PTR_ERR(drv_data->reg_base);
- strlcpy(drv_data->adapter.name, MV64XXX_I2C_CTLR_NAME " adapter",
+ strscpy(drv_data->adapter.name, MV64XXX_I2C_CTLR_NAME " adapter",
sizeof(drv_data->adapter.name));
init_waitqueue_head(&drv_data->waitq);
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 68f67d084c63..5af5cffc444e 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -838,7 +838,7 @@ static int mxs_i2c_probe(struct platform_device *pdev)
return err;
adap = &i2c->adapter;
- strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &mxs_i2c_algo;
adap->quirks = &mxs_i2c_quirks;
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index 6920c1b9a126..12e330cd7635 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -299,7 +299,7 @@ static int gpu_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id)
i2c_set_adapdata(&i2cd->adapter, i2cd);
i2cd->adapter.owner = THIS_MODULE;
- strlcpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
+ strscpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
sizeof(i2cd->adapter.name));
i2cd->adapter.algo = &gpu_i2c_algorithm;
i2cd->adapter.quirks = &gpu_i2c_quirks;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index d4f6c6d60683..f9ae520aed22 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1488,7 +1488,7 @@ omap_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, omap);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
- strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
adap->algo = &omap_i2c_algo;
adap->quirks = &omap_i2c_quirks;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c
index 6eb0f50c5d28..9f773b4f5ed8 100644
--- a/drivers/i2c/busses/i2c-opal.c
+++ b/drivers/i2c/busses/i2c-opal.c
@@ -220,9 +220,9 @@ static int i2c_opal_probe(struct platform_device *pdev)
adapter->dev.of_node = of_node_get(pdev->dev.of_node);
pname = of_get_property(pdev->dev.of_node, "ibm,port-name", NULL);
if (pname)
- strlcpy(adapter->name, pname, sizeof(adapter->name));
+ strscpy(adapter->name, pname, sizeof(adapter->name));
else
- strlcpy(adapter->name, "opal", sizeof(adapter->name));
+ strscpy(adapter->name, "opal", sizeof(adapter->name));
platform_set_drvdata(pdev, adapter);
rc = i2c_add_adapter(adapter);
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 231145c48728..0af86a542568 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -308,7 +308,7 @@ static void i2c_parport_attach(struct parport *port)
/* Fill the rest of the structure */
adapter->adapter.owner = THIS_MODULE;
adapter->adapter.class = I2C_CLASS_HWMON;
- strlcpy(adapter->adapter.name, "Parallel port adapter",
+ strscpy(adapter->adapter.name, "Parallel port adapter",
sizeof(adapter->adapter.name));
adapter->algo_data = parport_algo_data;
/* Slow down if we can't sense SCL */
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 690188a9ffff..b605b6e43cb9 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1403,7 +1403,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
- strlcpy(i2c->adap.name, "pxa_i2c-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "pxa_i2c-i2c", sizeof(i2c->adap.name));
i2c->clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(i2c->clk)) {
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 6ac179a373ff..84a77512614d 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -494,12 +494,12 @@ static void geni_i2c_gpi_unmap(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
{
if (tx_buf) {
dma_unmap_single(gi2c->se.dev->parent, tx_addr, msg->len, DMA_TO_DEVICE);
- i2c_put_dma_safe_msg_buf(tx_buf, msg, false);
+ i2c_put_dma_safe_msg_buf(tx_buf, msg, !gi2c->err);
}
if (rx_buf) {
dma_unmap_single(gi2c->se.dev->parent, rx_addr, msg->len, DMA_FROM_DEVICE);
- i2c_put_dma_safe_msg_buf(rx_buf, msg, false);
+ i2c_put_dma_safe_msg_buf(rx_buf, msg, !gi2c->err);
}
}
@@ -563,6 +563,7 @@ static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
desc->callback_param = gi2c;
dmaengine_submit(desc);
+ *buf = dma_buf;
*dma_addr_p = addr;
return 0;
@@ -816,7 +817,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&gi2c->adap, gi2c);
gi2c->adap.dev.parent = dev;
gi2c->adap.dev.of_node = dev->of_node;
- strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
+ strscpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
ret = geni_icc_get(&gi2c->se, "qup-memory");
if (ret)
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 69e9f3ecf87d..2e153f2f71b6 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1878,7 +1878,7 @@ nodma:
qup->adap.dev.of_node = pdev->dev.of_node;
qup->is_last = true;
- strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
+ strscpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC);
pm_runtime_use_autosuspend(qup->dev);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 6e7be9d9f504..cef82b205c26 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -1076,7 +1076,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
adap->bus_recovery_info = &rcar_i2c_bri;
adap->quirks = &rcar_i2c_quirks;
i2c_set_adapdata(adap, priv);
- strlcpy(adap->name, pdev->name, sizeof(adap->name));
+ strscpy(adap->name, pdev->name, sizeof(adap->name));
/* Init DMA */
sg_init_table(&priv->sg, 1);
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index cded77e06670..ecba1dfc1278 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -448,7 +448,7 @@ static int riic_i2c_probe(struct platform_device *pdev)
adap = &riic->adapter;
i2c_set_adapdata(adap, riic);
- strlcpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
+ strscpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &riic_algo;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 989040a73626..2e98e7793bba 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -1240,7 +1240,7 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
/* use common interface to get I2C timing properties */
i2c_parse_fw_timings(&pdev->dev, &i2c->t, true);
- strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &rk3x_i2c_algorithm;
i2c->adap.retries = 3;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index b49a1b170bb2..36dab9cd208c 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1076,7 +1076,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
else
s3c24xx_i2c_parse_dt(pdev->dev.of_node, i2c);
- strlcpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &s3c24xx_i2c_algorithm;
i2c->adap.retries = 2;
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 79798fc7462a..6746aa46d96c 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -30,7 +30,7 @@ struct acpi_smbus_cmi {
u8 cap_info:1;
u8 cap_read:1;
u8 cap_write:1;
- const struct smbus_methods_t *methods;
+ struct smbus_methods_t *methods;
};
static const struct smbus_methods_t smbus_methods = {
@@ -361,6 +361,7 @@ static acpi_status acpi_smbus_cmi_query_methods(acpi_handle handle, u32 level,
static int acpi_smbus_cmi_add(struct acpi_device *device)
{
struct acpi_smbus_cmi *smbus_cmi;
+ const struct acpi_device_id *id;
int ret;
smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
@@ -368,7 +369,6 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
return -ENOMEM;
smbus_cmi->handle = device->handle;
- smbus_cmi->methods = device_get_match_data(&device->dev);
strcpy(acpi_device_name(device), ACPI_SMBUS_HC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_SMBUS_HC_CLASS);
device->driver_data = smbus_cmi;
@@ -376,6 +376,11 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
smbus_cmi->cap_read = 0;
smbus_cmi->cap_write = 0;
+ for (id = acpi_smbus_cmi_ids; id->id[0]; id++)
+ if (!strcmp(id->id, acpi_device_hid(device)))
+ smbus_cmi->methods =
+ (struct smbus_methods_t *) id->driver_data;
+
acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 72f024a0c363..29330ee64c9c 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -940,7 +940,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
adap->nr = dev->id;
adap->dev.of_node = dev->dev.of_node;
- strlcpy(adap->name, dev->name, sizeof(adap->name));
+ strscpy(adap->name, dev->name, sizeof(adap->name));
spin_lock_init(&pd->lock);
init_waitqueue_head(&pd->wait);
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 458c7bcf1d24..87701744752f 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -99,7 +99,7 @@ static int simtec_i2c_probe(struct platform_device *dev)
pd->adap.algo_data = &pd->bit;
pd->adap.dev.parent = &dev->dev;
- strlcpy(pd->adap.name, "Simtec I2C", sizeof(pd->adap.name));
+ strscpy(pd->adap.name, "Simtec I2C", sizeof(pd->adap.name));
pd->bit.data = pd;
pd->bit.setsda = simtec_i2c_setsda;
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index b4050f5b6746..b0f0120793e1 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -239,7 +239,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
dev_err(&serio->dev, "TAOS EVM identification failed\n");
goto exit_close;
}
- strlcpy(adapter->name, name, sizeof(adapter->name));
+ strscpy(adapter->name, name, sizeof(adapter->name));
/* Turn echo off for better performance */
taos->state = TAOS_STATE_EOFF;
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index ec0c7cad4240..95139985b2d5 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -305,7 +305,7 @@ static int tegra_bpmp_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c->adapter, i2c);
i2c->adapter.owner = THIS_MODULE;
- strlcpy(i2c->adapter.name, "Tegra BPMP I2C adapter",
+ strscpy(i2c->adapter.name, "Tegra BPMP I2C adapter",
sizeof(i2c->adapter.name));
i2c->adapter.algo = &tegra_bpmp_i2c_algo;
i2c->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 2941e42aa6a0..031c78ac42e6 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1825,7 +1825,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
if (i2c_dev->hw->supports_bus_clear)
i2c_dev->adapter.bus_recovery_info = &tegra_i2c_recovery_info;
- strlcpy(i2c_dev->adapter.name, dev_name(i2c_dev->dev),
+ strscpy(i2c_dev->adapter.name, dev_name(i2c_dev->dev),
sizeof(i2c_dev->adapter.name));
err = i2c_add_numbered_adapter(&i2c_dev->adapter);
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index cb4666c54a23..d7b622891e52 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -564,7 +564,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
priv->adap.algo = &uniphier_fi2c_algo;
priv->adap.dev.parent = dev;
priv->adap.dev.of_node = dev->of_node;
- strlcpy(priv->adap.name, "UniPhier FI2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "UniPhier FI2C", sizeof(priv->adap.name));
priv->adap.bus_recovery_info = &uniphier_fi2c_bus_recovery_info;
i2c_set_adapdata(&priv->adap, priv);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index ee00a44bf4c7..e3ebae381f08 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -358,7 +358,7 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
priv->adap.algo = &uniphier_i2c_algo;
priv->adap.dev.parent = dev;
priv->adap.dev.of_node = dev->of_node;
- strlcpy(priv->adap.name, "UniPhier I2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "UniPhier I2C", sizeof(priv->adap.name));
priv->adap.bus_recovery_info = &uniphier_i2c_bus_recovery_info;
i2c_set_adapdata(&priv->adap, priv);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index 8d980b1374a8..1ab419f8fa52 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -79,7 +79,7 @@ static int i2c_versatile_probe(struct platform_device *dev)
writel(SCL | SDA, i2c->base + I2C_CONTROLS);
i2c->adap.owner = THIS_MODULE;
- strlcpy(i2c->adap.name, "Versatile I2C adapter", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "Versatile I2C adapter", sizeof(i2c->adap.name));
i2c->adap.algo_data = &i2c->algo;
i2c->adap.dev.parent = &dev->dev;
i2c->adap.dev.of_node = dev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index 88f5aafdce5b..7d4bc8736079 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -413,7 +413,7 @@ static int wmt_i2c_probe(struct platform_device *pdev)
adap = &i2c_dev->adapter;
i2c_set_adapdata(adap, i2c_dev);
- strlcpy(adap->name, "WMT I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "WMT I2C adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &wmt_i2c_algo;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 10f35f942066..91007558bcb2 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -933,7 +933,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
client->init_irq = i2c_dev_irq_from_resources(info->resources,
info->num_resources);
- strlcpy(client->name, info->type, sizeof(client->name));
+ strscpy(client->name, info->type, sizeof(client->name));
status = i2c_check_addr_validity(client->addr, client->flags);
if (status) {
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 775332945ad0..8ba9b59a3c40 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -391,7 +391,7 @@ void i2c_register_spd(struct i2c_adapter *adap)
unsigned short addr_list[2];
memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, name, I2C_NAME_SIZE);
+ strscpy(info.type, name, I2C_NAME_SIZE);
addr_list[0] = 0x50 + n;
addr_list[1] = I2C_CLIENT_END;
diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
index 92c68d467c50..a2f9fda25ff3 100644
--- a/drivers/iio/adc/ad7292.c
+++ b/drivers/iio/adc/ad7292.c
@@ -287,10 +287,8 @@ static int ad7292_probe(struct spi_device *spi)
ret = devm_add_action_or_reset(&spi->dev,
ad7292_regulator_disable, st);
- if (ret) {
- regulator_disable(st->reg);
+ if (ret)
return ret;
- }
ret = regulator_get_voltage(st->reg);
if (ret < 0)
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index 1cb4590fe412..890af7dca62d 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -40,8 +40,8 @@
#define MCP3911_CHANNEL(x) (MCP3911_REG_CHANNEL0 + x * 3)
#define MCP3911_OFFCAL(x) (MCP3911_REG_OFFCAL_CH0 + x * 6)
-/* Internal voltage reference in uV */
-#define MCP3911_INT_VREF_UV 1200000
+/* Internal voltage reference in mV */
+#define MCP3911_INT_VREF_MV 1200
#define MCP3911_REG_READ(reg, id) ((((reg) << 1) | ((id) << 5) | (1 << 0)) & 0xff)
#define MCP3911_REG_WRITE(reg, id) ((((reg) << 1) | ((id) << 5) | (0 << 0)) & 0xff)
@@ -113,6 +113,8 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
if (ret)
goto out;
+ *val = sign_extend32(*val, 23);
+
ret = IIO_VAL_INT;
break;
@@ -137,11 +139,18 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
*val = ret / 1000;
} else {
- *val = MCP3911_INT_VREF_UV;
+ *val = MCP3911_INT_VREF_MV;
}
- *val2 = 24;
- ret = IIO_VAL_FRACTIONAL_LOG2;
+ /*
+ * For 24bit Conversion
+ * Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5
+ * Voltage = Raw * (Vref)/(2^23 * Gain * 1.5)
+ */
+
+ /* val2 = (2^23 * 1.5) */
+ *val2 = 12582912;
+ ret = IIO_VAL_FRACTIONAL;
break;
}
@@ -208,7 +217,14 @@ static int mcp3911_config(struct mcp3911 *adc)
u32 configreg;
int ret;
- device_property_read_u32(dev, "device-addr", &adc->dev_addr);
+ ret = device_property_read_u32(dev, "microchip,device-addr", &adc->dev_addr);
+
+ /*
+ * Fallback to "device-addr" due to historical mismatch between
+ * dt-bindings and implementation
+ */
+ if (ret)
+ device_property_read_u32(dev, "device-addr", &adc->dev_addr);
if (adc->dev_addr > 3) {
dev_err(&adc->spi->dev,
"invalid device address (%i). Must be in range 0-3.\n",
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index edbe6a3138d0..001055d09750 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -505,7 +505,7 @@ static int cm32181_resume(struct device *dev)
cm32181->conf_regs[CM32181_REG_ADDR_CMD]);
}
-DEFINE_SIMPLE_DEV_PM_OPS(cm32181_pm_ops, cm32181_suspend, cm32181_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(cm32181_pm_ops, cm32181_suspend, cm32181_resume);
static const struct of_device_id cm32181_of_match[] = {
{ .compatible = "capella,cm3218" },
diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c
index c721b69d5095..0b30db77f78b 100644
--- a/drivers/iio/light/cm3605.c
+++ b/drivers/iio/light/cm3605.c
@@ -226,8 +226,10 @@ static int cm3605_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return dev_err_probe(dev, irq, "failed to get irq\n");
+ if (irq < 0) {
+ ret = dev_err_probe(dev, irq, "failed to get irq\n");
+ goto out_disable_aset;
+ }
ret = devm_request_threaded_irq(dev, irq, cm3605_prox_irq,
NULL, 0, "cm3605", indio_dev);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 46d06678dfbe..be317f2665a9 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1841,8 +1841,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
}
if (!validate_net_dev(*net_dev,
- (struct sockaddr *)&req->listen_addr_storage,
- (struct sockaddr *)&req->src_addr_storage)) {
+ (struct sockaddr *)&req->src_addr_storage,
+ (struct sockaddr *)&req->listen_addr_storage)) {
id_priv = ERR_PTR(-EHOSTUNREACH);
goto err;
}
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 4d98f931a13d..8367974b7998 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -274,33 +274,6 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return 1;
}
-static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
- u32 sg_cnt, enum dma_data_direction dir)
-{
- if (is_pci_p2pdma_page(sg_page(sg)))
- pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
- else
- ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
-}
-
-static int rdma_rw_map_sgtable(struct ib_device *dev, struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- int nents;
-
- if (is_pci_p2pdma_page(sg_page(sgt->sgl))) {
- if (WARN_ON_ONCE(ib_uses_virt_dma(dev)))
- return 0;
- nents = pci_p2pdma_map_sg(dev->dma_device, sgt->sgl,
- sgt->orig_nents, dir);
- if (!nents)
- return -EIO;
- sgt->nents = nents;
- return 0;
- }
- return ib_dma_map_sgtable_attrs(dev, sgt, dir, 0);
-}
-
/**
* rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
* @ctx: context to initialize
@@ -327,7 +300,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
};
int ret;
- ret = rdma_rw_map_sgtable(dev, &sgt, dir);
+ ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
if (ret)
return ret;
sg_cnt = sgt.nents;
@@ -366,7 +339,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
return ret;
out_unmap_sg:
- rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
+ ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_init);
@@ -414,12 +387,12 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return -EINVAL;
}
- ret = rdma_rw_map_sgtable(dev, &sgt, dir);
+ ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
if (ret)
return ret;
if (prot_sg_cnt) {
- ret = rdma_rw_map_sgtable(dev, &prot_sgt, dir);
+ ret = ib_dma_map_sgtable_attrs(dev, &prot_sgt, dir, 0);
if (ret)
goto out_unmap_sg;
}
@@ -486,9 +459,9 @@ out_free_ctx:
kfree(ctx->reg);
out_unmap_prot_sg:
if (prot_sgt.nents)
- rdma_rw_unmap_sg(dev, prot_sgt.sgl, prot_sgt.orig_nents, dir);
+ ib_dma_unmap_sgtable_attrs(dev, &prot_sgt, dir, 0);
out_unmap_sg:
- rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
+ ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
@@ -621,7 +594,7 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
break;
}
- rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy);
@@ -649,8 +622,8 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
kfree(ctx->reg);
if (prot_sg_cnt)
- rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
- rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index fce80a4a5147..04c04e6d24c3 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -18,6 +18,7 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
struct scatterlist *sg;
unsigned long start, end, cur = 0;
unsigned int nmap = 0;
+ long ret;
int i;
dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
@@ -67,9 +68,14 @@ wait_fence:
* may be not up-to-date. Wait for the exporter to finish
* the migration.
*/
- return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
+ ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -ETIMEDOUT;
+ return 0;
}
EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 186ed8859920..d39e16c211e8 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -462,7 +462,7 @@ retry:
mutex_unlock(&umem_odp->umem_mutex);
out_put_mm:
- mmput(owning_mm);
+ mmput_async(owning_mm);
out_put_task:
if (owning_process)
put_task_struct(owning_process);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index c16017f6e8db..14392c942f49 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2468,31 +2468,24 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
opt2 |= CCTRL_ECN_V(1);
}
- skb_get(skb);
- rpl = cplhdr(skb);
if (!is_t4(adapter_type)) {
- BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16));
- skb_trim(skb, sizeof(*rpl5));
- rpl5 = (void *)rpl;
- INIT_TP_WR(rpl5, ep->hwtid);
- } else {
- skb_trim(skb, sizeof(*rpl));
- INIT_TP_WR(rpl, ep->hwtid);
- }
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- ep->hwtid));
-
- if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
u32 isn = (prandom_u32() & ~7UL) - 1;
+
+ skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL);
+ rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
+ rpl = (void *)rpl5;
+ INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid);
opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F;
- rpl5 = (void *)rpl;
- memset_after(rpl5, 0, iss);
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
+ } else {
+ skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
+ rpl = __skb_put_zero(skb, sizeof(*rpl));
+ INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid);
}
rpl->opt0 = cpu_to_be64(opt0);
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index 72f08171a28a..bc3ec22a62c5 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -407,7 +407,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
to_erdma_access_flags(reg_wr(send_wr)->access);
regmr_sge->addr = cpu_to_le64(mr->ibmr.iova);
regmr_sge->length = cpu_to_le32(mr->ibmr.length);
- regmr_sge->stag = cpu_to_le32(mr->ibmr.lkey);
+ regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key);
attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) |
FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) |
FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index a7a3d42e2016..699bd3f59cd3 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -280,7 +280,7 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->vendor_id = PCI_VENDOR_ID_ALIBABA;
attr->vendor_part_id = dev->pdev->device;
attr->hw_ver = dev->pdev->revision;
- attr->max_qp = dev->attrs.max_qp;
+ attr->max_qp = dev->attrs.max_qp - 1;
attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr);
attr->max_qp_rd_atom = dev->attrs.max_ord;
attr->max_qp_init_rd_atom = dev->attrs.max_ird;
@@ -291,7 +291,7 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->max_send_sge = dev->attrs.max_send_sge;
attr->max_recv_sge = dev->attrs.max_recv_sge;
attr->max_sge_rd = dev->attrs.max_sge_rd;
- attr->max_cq = dev->attrs.max_cq;
+ attr->max_cq = dev->attrs.max_cq - 1;
attr->max_cqe = dev->attrs.max_cqe;
attr->max_mr = dev->attrs.max_mr;
attr->max_pd = dev->attrs.max_pd;
diff --git a/drivers/infiniband/hw/hfi1/trace_dbg.h b/drivers/infiniband/hw/hfi1/trace_dbg.h
index 707f1053f0b7..582b6f68df3d 100644
--- a/drivers/infiniband/hw/hfi1/trace_dbg.h
+++ b/drivers/infiniband/hw/hfi1/trace_dbg.h
@@ -26,14 +26,10 @@ DECLARE_EVENT_CLASS(hfi1_trace_template,
TP_PROTO(const char *function, struct va_format *vaf),
TP_ARGS(function, vaf),
TP_STRUCT__entry(__string(function, function)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(__assign_str(function, function);
- WARN_ON_ONCE(vsnprintf
- (__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >=
- MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("(%s) %s",
__get_str(function),
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index f848eedc6a23..d24996526c4d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -730,7 +730,6 @@ struct hns_roce_caps {
u32 num_qps;
u32 num_pi_qps;
u32 reserved_qps;
- int num_qpc_timer;
u32 num_srqs;
u32 max_wqes;
u32 max_srq_wrs;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index cbdafaac678a..c780646bd60a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1977,7 +1977,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
- caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
+ caps->qpc_timer_bt_num = HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM;
caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM;
caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
@@ -2273,7 +2273,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
- caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
caps->num_aeq_vectors = resp_a->num_aeq_vectors;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index f96debac30fe..64797109bab6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -36,11 +36,11 @@
#include <linux/bitops.h>
#define HNS_ROCE_V2_MAX_QP_NUM 0x1000
-#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
#define HNS_ROCE_V2_MAX_SRQ_SGE 64
#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
+#define HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM 0x100
#define HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM 0x100
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
@@ -83,7 +83,7 @@
#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
-#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x0
#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c8af4ebd7cbd..4ccb217b2841 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -725,7 +725,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
HEM_TYPE_QPC_TIMER,
hr_dev->caps.qpc_timer_entry_sz,
- hr_dev->caps.num_qpc_timer, 1);
+ hr_dev->caps.qpc_timer_bt_num, 1);
if (ret) {
dev_err(dev,
"Failed to init QPC timer memory, aborting.\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 48d3616a6d71..7bee7f6c5e70 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -462,11 +462,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
hr_qp->rq.rsv_sge);
- if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
- else
- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
- hr_qp->rq.max_gs);
+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
+ hr_qp->rq.max_gs);
hr_qp->rq.wqe_cnt = cnt;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index daeab5daed5b..a6e5d350a94c 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -497,7 +497,8 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
i = 0;
} else {
- qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list,
+ qp->wqe_ops.iw_set_fragment(wqe, 0,
+ frag_cnt ? op_info->sg_list : NULL,
qp->swqe_polarity);
i = 1;
}
@@ -1005,6 +1006,7 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
int ret_code;
bool move_cq_head = true;
u8 polarity;
+ u8 op_type;
bool ext_valid;
__le64 *ext_cqe;
@@ -1187,7 +1189,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
do {
__le64 *sw_wqe;
u64 wqe_qword;
- u8 op_type;
u32 tail;
tail = qp->sq_ring.tail;
@@ -1204,6 +1205,8 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
break;
}
} while (1);
+ if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
+ info->minor_err = FLUSH_MW_BIND_ERR;
qp->sq_flush_seen = true;
if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
qp->sq_flush_complete = true;
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index fdf4cc88cb91..075defaabee5 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -590,11 +590,14 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
cqp_error = cqp_request->compl_info.error;
if (cqp_error) {
err_code = -EIO;
- if (cqp_request->compl_info.maj_err_code == 0xFFFF &&
- cqp_request->compl_info.min_err_code == 0x8029) {
- if (!rf->reset) {
- rf->reset = true;
- rf->gen_ops.request_reset(rf);
+ if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
+ if (cqp_request->compl_info.min_err_code == 0x8002)
+ err_code = -EBUSY;
+ else if (cqp_request->compl_info.min_err_code == 0x8029) {
+ if (!rf->reset) {
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
}
}
}
@@ -2598,7 +2601,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
spin_unlock_irqrestore(&iwqp->lock, flags2);
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
if (compl_generated)
- irdma_comp_handler(iwqp->iwrcq);
+ irdma_comp_handler(iwqp->iwscq);
} else {
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 9b07b8af2997..9b207f5084eb 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -39,15 +39,18 @@ static int irdma_query_device(struct ib_device *ibdev,
props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
props->max_cq = rf->max_cq - rf->used_cqs;
- props->max_cqe = rf->max_cqe;
+ props->max_cqe = rf->max_cqe - 1;
props->max_mr = rf->max_mr - rf->used_mrs;
props->max_mw = props->max_mr;
props->max_pd = rf->max_pd - rf->used_pds;
props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
props->max_qp_rd_atom = hw_attrs->max_hw_ird;
props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
- if (rdma_protocol_roce(ibdev, 1))
+ if (rdma_protocol_roce(ibdev, 1)) {
+ props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
props->max_pkeys = IRDMA_PKEY_TBL_SZ;
+ }
+
props->max_ah = rf->max_ah;
props->max_mcast_grp = rf->max_mcg;
props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
@@ -3009,6 +3012,7 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
+ int status;
if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
if (iwmr->region) {
@@ -3039,8 +3043,11 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
cqp_info->post_sq = 1;
cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
- irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ if (status)
+ return status;
+
irdma_free_stag(iwdev, iwmr->stag);
done:
if (iwpbl->pbl_allocated)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 293ed709e5ed..b4dc52392275 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -166,6 +166,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+ /* set local port to one for Function-Per-Port HCA. */
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+
/* Declaring support of extended counters */
if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
struct ib_class_port_info cpi = {};
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index a174a0eee8dc..883d7c60143e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2738,26 +2738,24 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err;
int port;
- for (port = 1; port <= ARRAY_SIZE(dev->port_caps); port++) {
- dev->port_caps[port - 1].has_smi = false;
- if (MLX5_CAP_GEN(dev->mdev, port_type) ==
- MLX5_CAP_PORT_TYPE_IB) {
- if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
- err = mlx5_query_hca_vport_context(dev->mdev, 0,
- port, 0,
- &vport_ctx);
- if (err) {
- mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
- port, err);
- return err;
- }
- dev->port_caps[port - 1].has_smi =
- vport_ctx.has_smi;
- } else {
- dev->port_caps[port - 1].has_smi = true;
- }
+ if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ return 0;
+
+ for (port = 1; port <= dev->num_ports; port++) {
+ if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
+ dev->port_caps[port - 1].has_smi = true;
+ continue;
}
+ err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
+ &vport_ctx);
+ if (err) {
+ mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
+ port, err);
+ return err;
+ }
+ dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
}
+
return 0;
}
@@ -4338,7 +4336,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
dev->mdev = mdev;
dev->num_ports = num_ports;
- if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
+ if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
profile = &raw_eth_profile;
else
profile = &pf_profile;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 2e2ad3918385..e66bf72f1f04 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -708,6 +708,7 @@ struct mlx5_ib_umr_context {
};
enum {
+ MLX5_UMR_STATE_UNINIT,
MLX5_UMR_STATE_ACTIVE,
MLX5_UMR_STATE_RECOVER,
MLX5_UMR_STATE_ERR,
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index e00b94d1b1ea..d5105b5c9979 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -177,6 +177,7 @@ int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
sema_init(&dev->umrc.sem, MAX_UMR_WR);
mutex_init(&dev->umrc.lock);
+ dev->umrc.state = MLX5_UMR_STATE_ACTIVE;
return 0;
@@ -191,6 +192,8 @@ destroy_pd:
void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
{
+ if (dev->umrc.state == MLX5_UMR_STATE_UNINIT)
+ return;
ib_destroy_qp(dev->umrc.qp);
ib_free_cq(dev->umrc.cq);
ib_dealloc_pd(dev->umrc.pd);
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 1f4e60257700..7d47b521070b 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
if (paddr)
- return virt_to_page(paddr);
+ return virt_to_page((void *)paddr);
return NULL;
}
@@ -533,13 +533,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
kunmap_local(kaddr);
}
} else {
- u64 va = sge->laddr + sge_off;
+ /*
+ * Cast to an uintptr_t to preserve all 64 bits
+ * in sge->laddr.
+ */
+ uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
- page_array[seg] = virt_to_page(va & PAGE_MASK);
+ /*
+ * virt_to_page() takes a (void *) pointer
+ * so cast to a (void *) meaning it will be 64
+ * bits on a 64 bit platform and 32 bits on a
+ * 32 bit platform.
+ */
+ page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
if (do_crc)
crypto_shash_update(
c_tx->mpa_crc_hd,
- (void *)(uintptr_t)va,
+ (void *)va,
plen);
}
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index bd5f3b5e1727..7b83f48f60c5 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -537,6 +537,7 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
struct iscsi_hdr *hdr;
char *data;
int length;
+ bool full_feature_phase;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
iser_err_comp(wc, "login_rsp");
@@ -550,6 +551,9 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
hdr = desc->rsp + sizeof(struct iser_ctrl);
data = desc->rsp + ISER_HEADERS_LEN;
length = wc->byte_len - ISER_HEADERS_LEN;
+ full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) ==
+ ISCSI_FULL_FEATURE_PHASE) &&
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL);
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, length);
@@ -560,7 +564,8 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
desc->rsp_dma, ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
- if (iser_conn->iscsi_conn->session->discovery_sess)
+ if (!full_feature_phase ||
+ iser_conn->iscsi_conn->session->discovery_sess)
return;
/* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index baecde41d126..449904dac0a9 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -1004,7 +1004,8 @@ rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
struct rtrs_clt_io_req *req,
struct rtrs_rbuf *rbuf, bool fr_en,
- u32 size, u32 imm, struct ib_send_wr *wr,
+ u32 count, u32 size, u32 imm,
+ struct ib_send_wr *wr,
struct ib_send_wr *tail)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
@@ -1024,12 +1025,12 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
num_sge = 2;
ptail = tail;
} else {
- for_each_sg(req->sglist, sg, req->sg_cnt, i) {
+ for_each_sg(req->sglist, sg, count, i) {
sge[i].addr = sg_dma_address(sg);
sge[i].length = sg_dma_len(sg);
sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
}
- num_sge = 1 + req->sg_cnt;
+ num_sge = 1 + count;
}
sge[i].addr = req->iu->dma_addr;
sge[i].length = size;
@@ -1142,7 +1143,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
*/
rtrs_clt_update_all_stats(req, WRITE);
- ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en,
+ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
req->usr_len + sizeof(*msg),
imm, wr, &inv_wr);
if (ret) {
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 34c03bde5064..4894e7329d88 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -595,7 +595,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
struct sg_table *sgt = &srv_mr->sgt;
struct scatterlist *s;
struct ib_mr *mr;
- int nr, chunks;
+ int nr, nr_sgt, chunks;
chunks = chunks_per_mr * mri;
if (!always_invalidate)
@@ -610,19 +610,19 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
sg_set_page(s, srv->chunks[chunks + i],
max_chunk_size, 0);
- nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
+ nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
sgt->nents, DMA_BIDIRECTIONAL);
- if (nr < sgt->nents) {
- err = nr < 0 ? nr : -EINVAL;
+ if (!nr_sgt) {
+ err = -EINVAL;
goto free_sg;
}
mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
- sgt->nents);
+ nr_sgt);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
goto unmap_sg;
}
- nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents,
+ nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
NULL, max_chunk_size);
if (nr < 0 || nr < sgt->nents) {
err = nr < 0 ? nr : -EINVAL;
@@ -641,7 +641,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
}
}
/* Eventually dma addr for each chunk can be cached */
- for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
+ for_each_sg(sgt->sgl, s, nr_sgt, i)
srv_path->dma_addr[chunks + i] = sg_dma_address(s);
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 7720ea270ed8..d7f69e593a63 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1961,7 +1961,8 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
if (scmnd) {
req = scsi_cmd_priv(scmnd);
scmnd = srp_claim_req(ch, req, NULL, scmnd);
- } else {
+ }
+ if (!scmnd) {
shost_printk(KERN_ERR, target->scsi_host,
"Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
rsp->tag, ch - target->ch, ch->qp->qp_num);
diff --git a/drivers/input/input-core-private.h b/drivers/input/input-core-private.h
new file mode 100644
index 000000000000..116834cf8868
--- /dev/null
+++ b/drivers/input/input-core-private.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _INPUT_CORE_PRIVATE_H
+#define _INPUT_CORE_PRIVATE_H
+
+/*
+ * Functions and definitions that are private to input core,
+ * should not be used by input drivers or handlers.
+ */
+
+struct input_dev;
+
+void input_mt_release_slots(struct input_dev *dev);
+void input_handle_event(struct input_dev *dev,
+ unsigned int type, unsigned int code, int value);
+
+#endif /* _INPUT_CORE_PRIVATE_H */
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 44fe6f2f063c..14b53dac1253 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -8,6 +8,7 @@
#include <linux/input/mt.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include "input-core-private.h"
#define TRKID_SGN ((TRKID_MAX + 1) >> 1)
@@ -259,10 +260,13 @@ static void __input_mt_drop_unused(struct input_dev *dev, struct input_mt *mt)
{
int i;
+ lockdep_assert_held(&dev->event_lock);
+
for (i = 0; i < mt->num_slots; i++) {
- if (!input_mt_is_used(mt, &mt->slots[i])) {
- input_mt_slot(dev, i);
- input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
+ if (input_mt_is_active(&mt->slots[i]) &&
+ !input_mt_is_used(mt, &mt->slots[i])) {
+ input_handle_event(dev, EV_ABS, ABS_MT_SLOT, i);
+ input_handle_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
}
}
}
@@ -278,13 +282,44 @@ void input_mt_drop_unused(struct input_dev *dev)
struct input_mt *mt = dev->mt;
if (mt) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
__input_mt_drop_unused(dev, mt);
mt->frame++;
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_mt_drop_unused);
/**
+ * input_mt_release_slots() - Deactivate all slots
+ * @dev: input device with allocated MT slots
+ *
+ * Lift all active slots.
+ */
+void input_mt_release_slots(struct input_dev *dev)
+{
+ struct input_mt *mt = dev->mt;
+
+ lockdep_assert_held(&dev->event_lock);
+
+ if (mt) {
+ /* This will effectively mark all slots unused. */
+ mt->frame++;
+
+ __input_mt_drop_unused(dev, mt);
+
+ if (test_bit(ABS_PRESSURE, dev->absbit))
+ input_handle_event(dev, EV_ABS, ABS_PRESSURE, 0);
+
+ mt->frame++;
+ }
+}
+
+/**
* input_mt_sync_frame() - synchronize mt frame
* @dev: input device with allocated MT slots
*
@@ -300,8 +335,13 @@ void input_mt_sync_frame(struct input_dev *dev)
if (!mt)
return;
- if (mt->flags & INPUT_MT_DROP_UNUSED)
+ if (mt->flags & INPUT_MT_DROP_UNUSED) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
__input_mt_drop_unused(dev, mt);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT))
use_count = true;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1365c9dfb5f2..ebb2b7f0f8ff 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -24,6 +24,7 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include "input-compat.h"
+#include "input-core-private.h"
#include "input-poller.h"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
@@ -142,6 +143,8 @@ static void input_pass_values(struct input_dev *dev,
struct input_handle *handle;
struct input_value *v;
+ lockdep_assert_held(&dev->event_lock);
+
if (!count)
return;
@@ -174,44 +177,6 @@ static void input_pass_values(struct input_dev *dev,
}
}
-static void input_pass_event(struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
-{
- struct input_value vals[] = { { type, code, value } };
-
- input_pass_values(dev, vals, ARRAY_SIZE(vals));
-}
-
-/*
- * Generate software autorepeat event. Note that we take
- * dev->event_lock here to avoid racing with input_event
- * which may cause keys get "stuck".
- */
-static void input_repeat_key(struct timer_list *t)
-{
- struct input_dev *dev = from_timer(dev, t, timer);
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- if (test_bit(dev->repeat_key, dev->key) &&
- is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
- struct input_value vals[] = {
- { EV_KEY, dev->repeat_key, 2 },
- input_value_sync
- };
-
- input_set_timestamp(dev, ktime_get());
- input_pass_values(dev, vals, ARRAY_SIZE(vals));
-
- if (dev->rep[REP_PERIOD])
- mod_timer(&dev->timer, jiffies +
- msecs_to_jiffies(dev->rep[REP_PERIOD]));
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
#define INPUT_IGNORE_EVENT 0
#define INPUT_PASS_TO_HANDLERS 1
#define INPUT_PASS_TO_DEVICE 2
@@ -275,6 +240,10 @@ static int input_get_disposition(struct input_dev *dev,
int disposition = INPUT_IGNORE_EVENT;
int value = *pval;
+ /* filter-out events from inhibited devices */
+ if (dev->inhibited)
+ return INPUT_IGNORE_EVENT;
+
switch (type) {
case EV_SYN:
@@ -375,19 +344,9 @@ static int input_get_disposition(struct input_dev *dev,
return disposition;
}
-static void input_handle_event(struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
+static void input_event_dispose(struct input_dev *dev, int disposition,
+ unsigned int type, unsigned int code, int value)
{
- int disposition;
-
- /* filter-out events from inhibited devices */
- if (dev->inhibited)
- return;
-
- disposition = input_get_disposition(dev, type, code, &value);
- if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN)
- add_input_randomness(type, code, value);
-
if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
dev->event(dev, type, code, value);
@@ -426,7 +385,22 @@ static void input_handle_event(struct input_dev *dev,
input_pass_values(dev, dev->vals, dev->num_vals);
dev->num_vals = 0;
}
+}
+void input_handle_event(struct input_dev *dev,
+ unsigned int type, unsigned int code, int value)
+{
+ int disposition;
+
+ lockdep_assert_held(&dev->event_lock);
+
+ disposition = input_get_disposition(dev, type, code, &value);
+ if (disposition != INPUT_IGNORE_EVENT) {
+ if (type != EV_SYN)
+ add_input_randomness(type, code, value);
+
+ input_event_dispose(dev, disposition, type, code, value);
+ }
}
/**
@@ -613,7 +587,7 @@ static void __input_release_device(struct input_handle *handle)
lockdep_is_held(&dev->mutex));
if (grabber == handle) {
rcu_assign_pointer(dev->grab, NULL);
- /* Make sure input_pass_event() notices that grab is gone */
+ /* Make sure input_pass_values() notices that grab is gone */
synchronize_rcu();
list_for_each_entry(handle, &dev->h_list, d_node)
@@ -736,7 +710,7 @@ void input_close_device(struct input_handle *handle)
if (!--handle->open) {
/*
- * synchronize_rcu() makes sure that input_pass_event()
+ * synchronize_rcu() makes sure that input_pass_values()
* completed and that no more input events are delivered
* through this handle
*/
@@ -751,22 +725,21 @@ EXPORT_SYMBOL(input_close_device);
* Simulate keyup events for all keys that are marked as pressed.
* The function must be called with dev->event_lock held.
*/
-static void input_dev_release_keys(struct input_dev *dev)
+static bool input_dev_release_keys(struct input_dev *dev)
{
bool need_sync = false;
int code;
+ lockdep_assert_held(&dev->event_lock);
+
if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
for_each_set_bit(code, dev->key, KEY_CNT) {
- input_pass_event(dev, EV_KEY, code, 0);
+ input_handle_event(dev, EV_KEY, code, 0);
need_sync = true;
}
-
- if (need_sync)
- input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
-
- memset(dev->key, 0, sizeof(dev->key));
}
+
+ return need_sync;
}
/*
@@ -793,7 +766,8 @@ static void input_disconnect_device(struct input_dev *dev)
* generate events even after we done here but they will not
* reach any handlers.
*/
- input_dev_release_keys(dev);
+ if (input_dev_release_keys(dev))
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
list_for_each_entry(handle, &dev->h_list, d_node)
handle->open = 0;
@@ -1004,12 +978,16 @@ int input_set_keycode(struct input_dev *dev,
} else if (test_bit(EV_KEY, dev->evbit) &&
!is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
__test_and_clear_bit(old_keycode, dev->key)) {
- struct input_value vals[] = {
- { EV_KEY, old_keycode, 0 },
- input_value_sync
- };
-
- input_pass_values(dev, vals, ARRAY_SIZE(vals));
+ /*
+ * We have to use input_event_dispose() here directly instead
+ * of input_handle_event() because the key we want to release
+ * here is considered no longer supported by the device and
+ * input_handle_event() will ignore it.
+ */
+ input_event_dispose(dev, INPUT_PASS_TO_HANDLERS,
+ EV_KEY, old_keycode, 0);
+ input_event_dispose(dev, INPUT_PASS_TO_HANDLERS | INPUT_FLUSH,
+ EV_SYN, SYN_REPORT, 1);
}
out:
@@ -1784,7 +1762,8 @@ void input_reset_device(struct input_dev *dev)
spin_lock_irqsave(&dev->event_lock, flags);
input_dev_toggle(dev, true);
- input_dev_release_keys(dev);
+ if (input_dev_release_keys(dev))
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
spin_unlock_irqrestore(&dev->event_lock, flags);
mutex_unlock(&dev->mutex);
@@ -1806,7 +1785,9 @@ static int input_inhibit_device(struct input_dev *dev)
}
spin_lock_irq(&dev->event_lock);
+ input_mt_release_slots(dev);
input_dev_release_keys(dev);
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
input_dev_toggle(dev, false);
spin_unlock_irq(&dev->event_lock);
@@ -1857,7 +1838,8 @@ static int input_dev_suspend(struct device *dev)
* Keys that are pressed now are unlikely to be
* still pressed when we resume.
*/
- input_dev_release_keys(input_dev);
+ if (input_dev_release_keys(input_dev))
+ input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
/* Turn off LEDs and sounds, if any are active. */
input_dev_toggle(input_dev, false);
@@ -1891,7 +1873,8 @@ static int input_dev_freeze(struct device *dev)
* Keys that are pressed now are unlikely to be
* still pressed when we resume.
*/
- input_dev_release_keys(input_dev);
+ if (input_dev_release_keys(input_dev))
+ input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
spin_unlock_irq(&input_dev->event_lock);
@@ -2259,6 +2242,34 @@ static void devm_input_device_unregister(struct device *dev, void *res)
__input_unregister_device(input);
}
+/*
+ * Generate software autorepeat event. Note that we take
+ * dev->event_lock here to avoid racing with input_event
+ * which may cause keys get "stuck".
+ */
+static void input_repeat_key(struct timer_list *t)
+{
+ struct input_dev *dev = from_timer(dev, t, timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ if (!dev->inhibited &&
+ test_bit(dev->repeat_key, dev->key) &&
+ is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
+
+ input_set_timestamp(dev, ktime_get());
+ input_handle_event(dev, EV_KEY, dev->repeat_key, 2);
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
+
+ if (dev->rep[REP_PERIOD])
+ mod_timer(&dev->timer, jiffies +
+ msecs_to_jiffies(dev->rep[REP_PERIOD]));
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
/**
* input_enable_softrepeat - enable software autorepeat
* @dev: input device
diff --git a/drivers/input/joystick/adc-joystick.c b/drivers/input/joystick/adc-joystick.c
index 78ebca7d400a..e0cfdc84763f 100644
--- a/drivers/input/joystick/adc-joystick.c
+++ b/drivers/input/joystick/adc-joystick.c
@@ -222,13 +222,6 @@ static int adc_joystick_probe(struct platform_device *pdev)
if (error)
return error;
- input_set_drvdata(input, joy);
- error = input_register_device(input);
- if (error) {
- dev_err(dev, "Unable to register input device\n");
- return error;
- }
-
joy->buffer = iio_channel_get_all_cb(dev, adc_joystick_handle, joy);
if (IS_ERR(joy->buffer)) {
dev_err(dev, "Unable to allocate callback buffer\n");
@@ -241,6 +234,14 @@ static int adc_joystick_probe(struct platform_device *pdev)
return error;
}
+ input_set_drvdata(input, joy);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(dev, "Unable to register input device\n");
+ return error;
+ }
+
return 0;
}
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index b2a68bc9f0b4..b86de1312512 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -50,6 +50,7 @@ static struct iforce_device iforce_device[] = {
{ 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce },
{ 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_joystick_avb, abs_avb_pegasus, ff_iforce },
{ 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_wheel, abs_wheel, ff_iforce },
+ { 0x05ef, 0x8886, "Boeder Force Feedback Wheel", btn_wheel, abs_wheel, ff_iforce },
{ 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c
index f95a81b9fac7..2380546d7978 100644
--- a/drivers/input/joystick/iforce/iforce-serio.c
+++ b/drivers/input/joystick/iforce/iforce-serio.c
@@ -39,7 +39,7 @@ static void iforce_serio_xmit(struct iforce *iforce)
again:
if (iforce->xmit.head == iforce->xmit.tail) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
return;
}
@@ -64,7 +64,7 @@ again:
if (test_and_clear_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags))
goto again;
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
}
@@ -169,7 +169,7 @@ static irqreturn_t iforce_serio_irq(struct serio *serio,
iforce_serio->cmd_response_len = iforce_serio->len;
/* Signal that command is done */
- wake_up(&iforce->wait);
+ wake_up_all(&iforce->wait);
} else if (likely(iforce->type)) {
iforce_process_packet(iforce, iforce_serio->id,
iforce_serio->data_in,
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index ea58805c480f..cba92bd590a8 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -30,7 +30,7 @@ static void __iforce_usb_xmit(struct iforce *iforce)
spin_lock_irqsave(&iforce->xmit_lock, flags);
if (iforce->xmit.head == iforce->xmit.tail) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
return;
}
@@ -58,9 +58,9 @@ static void __iforce_usb_xmit(struct iforce *iforce)
XMIT_INC(iforce->xmit.tail, n);
if ( (n=usb_submit_urb(iforce_usb->out, GFP_ATOMIC)) ) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
dev_warn(&iforce_usb->intf->dev,
"usb_submit_urb failed %d\n", n);
+ iforce_clear_xmit_and_wake(iforce);
}
/* The IFORCE_XMIT_RUNNING bit is not cleared here. That's intended.
@@ -175,15 +175,15 @@ static void iforce_usb_out(struct urb *urb)
struct iforce *iforce = &iforce_usb->iforce;
if (urb->status) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
dev_dbg(&iforce_usb->intf->dev, "urb->status %d, exiting\n",
urb->status);
+ iforce_clear_xmit_and_wake(iforce);
return;
}
__iforce_usb_xmit(iforce);
- wake_up(&iforce->wait);
+ wake_up_all(&iforce->wait);
}
static int iforce_usb_probe(struct usb_interface *intf,
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index 6aa761ebbdf7..9ccb9107ccbe 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -119,6 +119,12 @@ static inline int iforce_get_id_packet(struct iforce *iforce, u8 id,
response_data, response_len);
}
+static inline void iforce_clear_xmit_and_wake(struct iforce *iforce)
+{
+ clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ wake_up_all(&iforce->wait);
+}
+
/* Public functions */
/* iforce-main.c */
int iforce_init_device(struct device *parent, u16 bustype,
diff --git a/drivers/input/joystick/sensehat-joystick.c b/drivers/input/joystick/sensehat-joystick.c
index 5ad1fe4ff496..a84df39d3b2f 100644
--- a/drivers/input/joystick/sensehat-joystick.c
+++ b/drivers/input/joystick/sensehat-joystick.c
@@ -98,10 +98,8 @@ static int sensehat_joystick_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Could not retrieve interrupt request");
+ if (irq < 0)
return irq;
- }
error = devm_request_threaded_irq(&pdev->dev, irq,
NULL, sensehat_joystick_report,
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 4ea79db8f134..a20ee693b22b 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -795,7 +795,7 @@ config KEYBOARD_MT6779
config KEYBOARD_MTK_PMIC
tristate "MediaTek PMIC keys support"
- depends on MFD_MT6397
+ depends on MFD_MT6397 || COMPILE_TEST
help
Say Y here if you want to use the pmic keys (powerkey/homekey).
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 1592da4de336..1a1a05d7cd42 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -8,17 +8,19 @@
* Copyright (C) 2008-2010 Analog Devices Inc.
*/
-#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/pm.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/i2c.h>
-#include <linux/gpio/driver.h>
+#include <linux/pm.h>
#include <linux/slab.h>
+#include <linux/timekeeping.h>
#include <linux/platform_data/adp5588.h>
@@ -36,18 +38,18 @@
* asserted.
*/
#define WA_DELAYED_READOUT_REVID(rev) ((rev) < 4)
+#define WA_DELAYED_READOUT_TIME 25
struct adp5588_kpad {
struct i2c_client *client;
struct input_dev *input;
- struct delayed_work work;
+ ktime_t irq_time;
unsigned long delay;
unsigned short keycode[ADP5588_KEYMAPSIZE];
const struct adp5588_gpi_map *gpimap;
unsigned short gpimapsize;
#ifdef CONFIG_GPIOLIB
unsigned char gpiomap[ADP5588_MAXGPIO];
- bool export_gpio;
struct gpio_chip gc;
struct mutex gpio_lock; /* Protect cached dir, dat_out */
u8 dat_out[3];
@@ -179,6 +181,21 @@ static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
return n_unused;
}
+static void adp5588_gpio_do_teardown(void *_kpad)
+{
+ struct adp5588_kpad *kpad = _kpad;
+ struct device *dev = &kpad->client->dev;
+ const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
+ const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
+ int error;
+
+ error = gpio_data->teardown(kpad->client,
+ kpad->gc.base, kpad->gc.ngpio,
+ gpio_data->context);
+ if (error)
+ dev_warn(&kpad->client->dev, "teardown failed %d\n", error);
+}
+
static int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
@@ -195,8 +212,6 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
return 0;
}
- kpad->export_gpio = true;
-
kpad->gc.direction_input = adp5588_gpio_direction_input;
kpad->gc.direction_output = adp5588_gpio_direction_output;
kpad->gc.get = adp5588_gpio_get_value;
@@ -210,9 +225,9 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
mutex_init(&kpad->gpio_lock);
- error = gpiochip_add_data(&kpad->gc, kpad);
+ error = devm_gpiochip_add_data(dev, &kpad->gc, kpad);
if (error) {
- dev_err(dev, "gpiochip_add failed, err: %d\n", error);
+ dev_err(dev, "gpiochip_add failed: %d\n", error);
return error;
}
@@ -227,41 +242,24 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
kpad->gc.base, kpad->gc.ngpio,
gpio_data->context);
if (error)
- dev_warn(dev, "setup failed, %d\n", error);
+ dev_warn(dev, "setup failed: %d\n", error);
}
- return 0;
-}
-
-static void adp5588_gpio_remove(struct adp5588_kpad *kpad)
-{
- struct device *dev = &kpad->client->dev;
- const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
- const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
- int error;
-
- if (!kpad->export_gpio)
- return;
-
if (gpio_data->teardown) {
- error = gpio_data->teardown(kpad->client,
- kpad->gc.base, kpad->gc.ngpio,
- gpio_data->context);
+ error = devm_add_action(dev, adp5588_gpio_do_teardown, kpad);
if (error)
- dev_warn(dev, "teardown failed %d\n", error);
+ dev_warn(dev, "failed to schedule teardown: %d\n",
+ error);
}
- gpiochip_remove(&kpad->gc);
+ return 0;
}
+
#else
static inline int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
return 0;
}
-
-static inline void adp5588_gpio_remove(struct adp5588_kpad *kpad)
-{
-}
#endif
static void adp5588_report_events(struct adp5588_kpad *kpad, int ev_cnt)
@@ -289,13 +287,36 @@ static void adp5588_report_events(struct adp5588_kpad *kpad, int ev_cnt)
}
}
-static void adp5588_work(struct work_struct *work)
+static irqreturn_t adp5588_hard_irq(int irq, void *handle)
{
- struct adp5588_kpad *kpad = container_of(work,
- struct adp5588_kpad, work.work);
+ struct adp5588_kpad *kpad = handle;
+
+ kpad->irq_time = ktime_get();
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t adp5588_thread_irq(int irq, void *handle)
+{
+ struct adp5588_kpad *kpad = handle;
struct i2c_client *client = kpad->client;
+ ktime_t target_time, now;
+ unsigned long delay;
int status, ev_cnt;
+ /*
+ * Readout needs to wait for at least 25ms after the notification
+ * for REVID < 4.
+ */
+ if (kpad->delay) {
+ target_time = ktime_add_ms(kpad->irq_time, kpad->delay);
+ now = ktime_get();
+ if (ktime_before(now, target_time)) {
+ delay = ktime_to_us(ktime_sub(target_time, now));
+ usleep_range(delay, delay + 1000);
+ }
+ }
+
status = adp5588_read(client, INT_STAT);
if (status & ADP5588_OVR_FLOW_INT) /* Unlikely and should never happen */
@@ -308,20 +329,8 @@ static void adp5588_work(struct work_struct *work)
input_sync(kpad->input);
}
}
- adp5588_write(client, INT_STAT, status); /* Status is W1C */
-}
-
-static irqreturn_t adp5588_irq(int irq, void *handle)
-{
- struct adp5588_kpad *kpad = handle;
- /*
- * use keventd context to read the event fifo registers
- * Schedule readout at least 25ms after notification for
- * REVID < 4
- */
-
- schedule_delayed_work(&kpad->work, kpad->delay);
+ adp5588_write(client, INT_STAT, status); /* Status is W1C */
return IRQ_HANDLED;
}
@@ -496,30 +505,27 @@ static int adp5588_probe(struct i2c_client *client,
return -EINVAL;
}
- kpad = kzalloc(sizeof(*kpad), GFP_KERNEL);
- input = input_allocate_device();
- if (!kpad || !input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ kpad = devm_kzalloc(&client->dev, sizeof(*kpad), GFP_KERNEL);
+ if (!kpad)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
kpad->client = client;
kpad->input = input;
- INIT_DELAYED_WORK(&kpad->work, adp5588_work);
ret = adp5588_read(client, DEV_ID);
- if (ret < 0) {
- error = ret;
- goto err_free_mem;
- }
+ if (ret < 0)
+ return ret;
revid = (u8) ret & ADP5588_DEVICE_ID_MASK;
if (WA_DELAYED_READOUT_REVID(revid))
- kpad->delay = msecs_to_jiffies(30);
+ kpad->delay = msecs_to_jiffies(WA_DELAYED_READOUT_TIME);
input->name = client->name;
input->phys = "adp5588-keys/input0";
- input->dev.parent = &client->dev;
input_set_drvdata(input, kpad);
@@ -556,95 +562,63 @@ static int adp5588_probe(struct i2c_client *client,
error = input_register_device(input);
if (error) {
- dev_err(&client->dev, "unable to register input device\n");
- goto err_free_mem;
+ dev_err(&client->dev, "unable to register input device: %d\n",
+ error);
+ return error;
}
- error = request_irq(client->irq, adp5588_irq,
- IRQF_TRIGGER_FALLING,
- client->dev.driver->name, kpad);
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ adp5588_hard_irq, adp5588_thread_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->dev.driver->name, kpad);
if (error) {
- dev_err(&client->dev, "irq %d busy?\n", client->irq);
- goto err_unreg_dev;
+ dev_err(&client->dev, "failed to request irq %d: %d\n",
+ client->irq, error);
+ return error;
}
error = adp5588_setup(client);
if (error)
- goto err_free_irq;
+ return error;
if (kpad->gpimapsize)
adp5588_report_switch_state(kpad);
error = adp5588_gpio_add(kpad);
if (error)
- goto err_free_irq;
-
- device_init_wakeup(&client->dev, 1);
- i2c_set_clientdata(client, kpad);
+ return error;
dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
return 0;
-
- err_free_irq:
- free_irq(client->irq, kpad);
- cancel_delayed_work_sync(&kpad->work);
- err_unreg_dev:
- input_unregister_device(input);
- input = NULL;
- err_free_mem:
- input_free_device(input);
- kfree(kpad);
-
- return error;
}
static int adp5588_remove(struct i2c_client *client)
{
- struct adp5588_kpad *kpad = i2c_get_clientdata(client);
-
adp5588_write(client, CFG, 0);
- free_irq(client->irq, kpad);
- cancel_delayed_work_sync(&kpad->work);
- input_unregister_device(kpad->input);
- adp5588_gpio_remove(kpad);
- kfree(kpad);
+ /* all resources will be freed by devm */
return 0;
}
-#ifdef CONFIG_PM
-static int adp5588_suspend(struct device *dev)
+static int __maybe_unused adp5588_suspend(struct device *dev)
{
- struct adp5588_kpad *kpad = dev_get_drvdata(dev);
- struct i2c_client *client = kpad->client;
+ struct i2c_client *client = to_i2c_client(dev);
disable_irq(client->irq);
- cancel_delayed_work_sync(&kpad->work);
-
- if (device_may_wakeup(&client->dev))
- enable_irq_wake(client->irq);
return 0;
}
-static int adp5588_resume(struct device *dev)
+static int __maybe_unused adp5588_resume(struct device *dev)
{
- struct adp5588_kpad *kpad = dev_get_drvdata(dev);
- struct i2c_client *client = kpad->client;
-
- if (device_may_wakeup(&client->dev))
- disable_irq_wake(client->irq);
+ struct i2c_client *client = to_i2c_client(dev);
enable_irq(client->irq);
return 0;
}
-static const struct dev_pm_ops adp5588_dev_pm_ops = {
- .suspend = adp5588_suspend,
- .resume = adp5588_resume,
-};
-#endif
+static SIMPLE_DEV_PM_OPS(adp5588_dev_pm_ops, adp5588_suspend, adp5588_resume);
static const struct i2c_device_id adp5588_id[] = {
{ "adp5588-keys", 0 },
@@ -656,9 +630,7 @@ MODULE_DEVICE_TABLE(i2c, adp5588_id);
static struct i2c_driver adp5588_driver = {
.driver = {
.name = KBUILD_MODNAME,
-#ifdef CONFIG_PM
.pm = &adp5588_dev_pm_ops,
-#endif
},
.probe = adp5588_probe,
.remove = adp5588_remove,
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index cc73a149da28..c14136b733a9 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -12,6 +12,7 @@
// expensive.
#include <linux/module.h>
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/i2c.h>
#include <linux/input.h>
@@ -518,6 +519,50 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev,
return 0;
}
+static void cros_ec_keyb_parse_vivaldi_physmap(struct cros_ec_keyb *ckdev)
+{
+ u32 *physmap = ckdev->vdata.function_row_physmap;
+ unsigned int row, col, scancode;
+ int n_physmap;
+ int error;
+ int i;
+
+ n_physmap = device_property_count_u32(ckdev->dev,
+ "function-row-physmap");
+ if (n_physmap <= 0)
+ return;
+
+ if (n_physmap >= VIVALDI_MAX_FUNCTION_ROW_KEYS) {
+ dev_warn(ckdev->dev,
+ "only up to %d top row keys is supported (%d specified)\n",
+ VIVALDI_MAX_FUNCTION_ROW_KEYS, n_physmap);
+ n_physmap = VIVALDI_MAX_FUNCTION_ROW_KEYS;
+ }
+
+ error = device_property_read_u32_array(ckdev->dev,
+ "function-row-physmap",
+ physmap, n_physmap);
+ if (error) {
+ dev_warn(ckdev->dev,
+ "failed to parse function-row-physmap property: %d\n",
+ error);
+ return;
+ }
+
+ /*
+ * Convert (in place) from row/column encoding to matrix "scancode"
+ * used by the driver.
+ */
+ for (i = 0; i < n_physmap; i++) {
+ row = KEY_ROW(physmap[i]);
+ col = KEY_COL(physmap[i]);
+ scancode = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
+ physmap[i] = scancode;
+ }
+
+ ckdev->vdata.num_function_row_keys = n_physmap;
+}
+
/**
* cros_ec_keyb_register_matrix - Register matrix keys
*
@@ -534,11 +579,6 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
struct input_dev *idev;
const char *phys;
int err;
- struct property *prop;
- const __be32 *p;
- u32 *physmap;
- u32 key_pos;
- unsigned int row, col, scancode, n_physmap;
err = matrix_keypad_parse_properties(dev, &ckdev->rows, &ckdev->cols);
if (err)
@@ -573,7 +613,7 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
idev->id.product = 0;
idev->dev.parent = dev;
- ckdev->ghost_filter = of_property_read_bool(dev->of_node,
+ ckdev->ghost_filter = device_property_read_bool(dev,
"google,needs-ghost-filter");
err = matrix_keypad_build_keymap(NULL, NULL, ckdev->rows, ckdev->cols,
@@ -589,22 +629,7 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
input_set_drvdata(idev, ckdev);
ckdev->idev = idev;
cros_ec_keyb_compute_valid_keys(ckdev);
-
- physmap = ckdev->vdata.function_row_physmap;
- n_physmap = 0;
- of_property_for_each_u32(dev->of_node, "function-row-physmap",
- prop, p, key_pos) {
- if (n_physmap == VIVALDI_MAX_FUNCTION_ROW_KEYS) {
- dev_warn(dev, "Only support up to %d top row keys\n",
- VIVALDI_MAX_FUNCTION_ROW_KEYS);
- break;
- }
- row = KEY_ROW(key_pos);
- col = KEY_COL(key_pos);
- scancode = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
- physmap[n_physmap++] = scancode;
- }
- ckdev->vdata.num_function_row_keys = n_physmap;
+ cros_ec_keyb_parse_vivaldi_physmap(ckdev);
err = input_register_device(ckdev->idev);
if (err) {
@@ -653,14 +678,19 @@ static const struct attribute_group cros_ec_keyb_attr_group = {
static int cros_ec_keyb_probe(struct platform_device *pdev)
{
- struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_device *ec;
struct device *dev = &pdev->dev;
struct cros_ec_keyb *ckdev;
bool buttons_switches_only = device_get_match_data(dev);
int err;
- if (!dev->of_node)
- return -ENODEV;
+ /*
+ * If the parent ec device has not been probed yet, defer the probe of
+ * this keyboard/button driver until later.
+ */
+ ec = dev_get_drvdata(pdev->dev.parent);
+ if (!ec)
+ return -EPROBE_DEFER;
ckdev = devm_kzalloc(dev, sizeof(*ckdev), GFP_KERNEL);
if (!ckdev)
@@ -713,6 +743,14 @@ static int cros_ec_keyb_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_ec_keyb_acpi_match[] = {
+ { "GOOG0007", true },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ec_keyb_acpi_match);
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id cros_ec_keyb_of_match[] = {
{ .compatible = "google,cros-ec-keyb" },
@@ -730,6 +768,7 @@ static struct platform_driver cros_ec_keyb_driver = {
.driver = {
.name = "cros-ec-keyb",
.of_match_table = of_match_ptr(cros_ec_keyb_of_match),
+ .acpi_match_table = ACPI_PTR(cros_ec_keyb_acpi_match),
.pm = &cros_ec_keyb_pm_ops,
},
};
diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c
index 2e7c9187c10f..bf447bf598fb 100644
--- a/drivers/input/keyboard/mt6779-keypad.c
+++ b/drivers/input/keyboard/mt6779-keypad.c
@@ -17,6 +17,11 @@
#define MTK_KPD_DEBOUNCE 0x0018
#define MTK_KPD_DEBOUNCE_MASK GENMASK(13, 0)
#define MTK_KPD_DEBOUNCE_MAX_MS 256
+#define MTK_KPD_SEL 0x0020
+#define MTK_KPD_SEL_COL GENMASK(15, 10)
+#define MTK_KPD_SEL_ROW GENMASK(9, 4)
+#define MTK_KPD_SEL_COLMASK(c) GENMASK((c) + 9, 10)
+#define MTK_KPD_SEL_ROWMASK(r) GENMASK((r) + 3, 4)
#define MTK_KPD_NUM_MEMS 5
#define MTK_KPD_NUM_BITS 136 /* 4*32+8 MEM5 only use 8 BITS */
@@ -42,7 +47,7 @@ static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
const unsigned short *keycode = keypad->input_dev->keycode;
DECLARE_BITMAP(new_state, MTK_KPD_NUM_BITS);
DECLARE_BITMAP(change, MTK_KPD_NUM_BITS);
- unsigned int bit_nr;
+ unsigned int bit_nr, key;
unsigned int row, col;
unsigned int scancode;
unsigned int row_shift = get_count_order(keypad->n_cols);
@@ -61,8 +66,10 @@ static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
if (bit_nr % 32 >= 16)
continue;
- row = bit_nr / 32;
- col = bit_nr % 32;
+ key = bit_nr / 32 * 16 + bit_nr % 32;
+ row = key / 9;
+ col = key % 9;
+
scancode = MATRIX_SCAN_CODE(row, col, row_shift);
/* 1: not pressed, 0: pressed */
pressed = !test_bit(bit_nr, new_state);
@@ -159,6 +166,11 @@ static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
regmap_write(keypad->regmap, MTK_KPD_DEBOUNCE,
(debounce * (1 << 5)) & MTK_KPD_DEBOUNCE_MASK);
+ regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_ROW,
+ MTK_KPD_SEL_ROWMASK(keypad->n_rows));
+ regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_COL,
+ MTK_KPD_SEL_COLMASK(keypad->n_cols));
+
keypad->clk = devm_clk_get(&pdev->dev, "kpd");
if (IS_ERR(keypad->clk))
return PTR_ERR(keypad->clk);
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index c31ab4368388..6404081253ea 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -18,17 +18,9 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#define MTK_PMIC_PWRKEY_RST_EN_MASK 0x1
-#define MTK_PMIC_PWRKEY_RST_EN_SHIFT 6
-#define MTK_PMIC_HOMEKEY_RST_EN_MASK 0x1
-#define MTK_PMIC_HOMEKEY_RST_EN_SHIFT 5
-#define MTK_PMIC_RST_DU_MASK 0x3
-#define MTK_PMIC_RST_DU_SHIFT 8
-
-#define MTK_PMIC_PWRKEY_RST \
- (MTK_PMIC_PWRKEY_RST_EN_MASK << MTK_PMIC_PWRKEY_RST_EN_SHIFT)
-#define MTK_PMIC_HOMEKEY_RST \
- (MTK_PMIC_HOMEKEY_RST_EN_MASK << MTK_PMIC_HOMEKEY_RST_EN_SHIFT)
+#define MTK_PMIC_RST_DU_MASK GENMASK(9, 8)
+#define MTK_PMIC_PWRKEY_RST BIT(6)
+#define MTK_PMIC_HOMEKEY_RST BIT(5)
#define MTK_PMIC_PWRKEY_INDEX 0
#define MTK_PMIC_HOMEKEY_INDEX 1
@@ -39,50 +31,58 @@ struct mtk_pmic_keys_regs {
u32 deb_mask;
u32 intsel_reg;
u32 intsel_mask;
+ u32 rst_en_mask;
};
#define MTK_PMIC_KEYS_REGS(_deb_reg, _deb_mask, \
- _intsel_reg, _intsel_mask) \
+ _intsel_reg, _intsel_mask, _rst_mask) \
{ \
.deb_reg = _deb_reg, \
.deb_mask = _deb_mask, \
.intsel_reg = _intsel_reg, \
.intsel_mask = _intsel_mask, \
+ .rst_en_mask = _rst_mask, \
}
struct mtk_pmic_regs {
const struct mtk_pmic_keys_regs keys_regs[MTK_PMIC_MAX_KEY_COUNT];
u32 pmic_rst_reg;
+ u32 rst_lprst_mask; /* Long-press reset timeout bitmask */
};
static const struct mtk_pmic_regs mt6397_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6397_CHRSTATUS,
- 0x8, MT6397_INT_RSV, 0x10),
+ 0x8, MT6397_INT_RSV, 0x10, MTK_PMIC_PWRKEY_RST),
.keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6397_OCSTATUS2,
- 0x10, MT6397_INT_RSV, 0x8),
+ 0x10, MT6397_INT_RSV, 0x8, MTK_PMIC_HOMEKEY_RST),
.pmic_rst_reg = MT6397_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
static const struct mtk_pmic_regs mt6323_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6323_CHRSTATUS,
- 0x2, MT6323_INT_MISC_CON, 0x10),
+ 0x2, MT6323_INT_MISC_CON, 0x10, MTK_PMIC_PWRKEY_RST),
.keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6323_CHRSTATUS,
- 0x4, MT6323_INT_MISC_CON, 0x8),
+ 0x4, MT6323_INT_MISC_CON, 0x8, MTK_PMIC_HOMEKEY_RST),
.pmic_rst_reg = MT6323_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
static const struct mtk_pmic_regs mt6358_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
- 0x2, MT6358_PSC_TOP_INT_CON0, 0x5),
+ 0x2, MT6358_PSC_TOP_INT_CON0, 0x5,
+ MTK_PMIC_PWRKEY_RST),
.keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
- 0x8, MT6358_PSC_TOP_INT_CON0, 0xa),
+ 0x8, MT6358_PSC_TOP_INT_CON0, 0xa,
+ MTK_PMIC_HOMEKEY_RST),
.pmic_rst_reg = MT6358_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
struct mtk_pmic_keys_info {
@@ -108,53 +108,49 @@ enum mtk_pmic_keys_lp_mode {
};
static void mtk_pmic_keys_lp_reset_setup(struct mtk_pmic_keys *keys,
- u32 pmic_rst_reg)
+ const struct mtk_pmic_regs *regs)
{
- int ret;
+ const struct mtk_pmic_keys_regs *kregs_home, *kregs_pwr;
u32 long_press_mode, long_press_debounce;
+ u32 value, mask;
+ int error;
+
+ kregs_home = keys->keys[MTK_PMIC_HOMEKEY_INDEX].regs;
+ kregs_pwr = keys->keys[MTK_PMIC_PWRKEY_INDEX].regs;
- ret = of_property_read_u32(keys->dev->of_node,
- "power-off-time-sec", &long_press_debounce);
- if (ret)
+ error = of_property_read_u32(keys->dev->of_node, "power-off-time-sec",
+ &long_press_debounce);
+ if (error)
long_press_debounce = 0;
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_RST_DU_MASK << MTK_PMIC_RST_DU_SHIFT,
- long_press_debounce << MTK_PMIC_RST_DU_SHIFT);
+ mask = regs->rst_lprst_mask;
+ value = long_press_debounce << (ffs(regs->rst_lprst_mask) - 1);
- ret = of_property_read_u32(keys->dev->of_node,
- "mediatek,long-press-mode", &long_press_mode);
- if (ret)
+ error = of_property_read_u32(keys->dev->of_node,
+ "mediatek,long-press-mode",
+ &long_press_mode);
+ if (error)
long_press_mode = LP_DISABLE;
switch (long_press_mode) {
- case LP_ONEKEY:
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_PWRKEY_RST,
- MTK_PMIC_PWRKEY_RST);
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_HOMEKEY_RST,
- 0);
- break;
case LP_TWOKEY:
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_PWRKEY_RST,
- MTK_PMIC_PWRKEY_RST);
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_HOMEKEY_RST,
- MTK_PMIC_HOMEKEY_RST);
- break;
+ value |= kregs_home->rst_en_mask;
+ fallthrough;
+
+ case LP_ONEKEY:
+ value |= kregs_pwr->rst_en_mask;
+ fallthrough;
+
case LP_DISABLE:
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_PWRKEY_RST,
- 0);
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_HOMEKEY_RST,
- 0);
+ mask |= kregs_home->rst_en_mask;
+ mask |= kregs_pwr->rst_en_mask;
break;
+
default:
break;
}
+
+ regmap_update_bits(keys->regmap, regs->pmic_rst_reg, mask, value);
}
static irqreturn_t mtk_pmic_keys_irq_handler_thread(int irq, void *data)
@@ -358,7 +354,7 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
return error;
}
- mtk_pmic_keys_lp_reset_setup(keys, mtk_pmic_regs->pmic_rst_reg);
+ mtk_pmic_keys_lp_reset_setup(keys, mtk_pmic_regs);
platform_set_drvdata(pdev, keys);
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 8a7ce41b8c56..ee9d04a3f0d5 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -179,11 +179,9 @@ static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id)
int error;
u64 keys;
- error = pm_runtime_get_sync(dev);
- if (error < 0) {
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error)
return IRQ_NONE;
- }
low = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE31_0);
high = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE63_32);
@@ -207,11 +205,9 @@ static int omap4_keypad_open(struct input_dev *input)
struct device *dev = input->dev.parent;
int error;
- error = pm_runtime_get_sync(dev);
- if (error < 0) {
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error)
return error;
- }
disable_irq(keypad_data->irq);
@@ -254,9 +250,10 @@ static void omap4_keypad_close(struct input_dev *input)
struct device *dev = input->dev.parent;
int error;
- error = pm_runtime_get_sync(dev);
- if (error < 0)
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error)
+ dev_err(dev, "%s: pm_runtime_resume_and_get() failed: %d\n",
+ __func__, error);
disable_irq(keypad_data->irq);
omap4_keypad_stop(keypad_data);
@@ -392,10 +389,9 @@ static int omap4_keypad_probe(struct platform_device *pdev)
* Enable clocks for the keypad module so that we can read
* revision register.
*/
- error = pm_runtime_get_sync(dev);
- if (error < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error) {
+ dev_err(dev, "pm_runtime_resume_and_get() failed\n");
return error;
}
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index 6b4138771a3f..b2e8097a2e6d 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -40,7 +40,6 @@
#define IQS7222_SLDR_SETUP_2_RES_MASK GENMASK(15, 8)
#define IQS7222_SLDR_SETUP_2_RES_SHIFT 8
#define IQS7222_SLDR_SETUP_2_TOP_SPEED_MASK GENMASK(7, 0)
-#define IQS7222_SLDR_SETUP_3_CHAN_SEL_MASK GENMASK(9, 0)
#define IQS7222_GPIO_SETUP_0_GPIO_EN BIT(0)
@@ -54,6 +53,9 @@
#define IQS7222_SYS_SETUP_ACK_RESET BIT(0)
#define IQS7222_EVENT_MASK_ATI BIT(12)
+#define IQS7222_EVENT_MASK_SLDR BIT(10)
+#define IQS7222_EVENT_MASK_TOUCH BIT(1)
+#define IQS7222_EVENT_MASK_PROX BIT(0)
#define IQS7222_COMMS_HOLD BIT(0)
#define IQS7222_COMMS_ERROR 0xEEEE
@@ -92,11 +94,11 @@ enum iqs7222_reg_key_id {
enum iqs7222_reg_grp_id {
IQS7222_REG_GRP_STAT,
+ IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_CYCLE,
IQS7222_REG_GRP_GLBL,
IQS7222_REG_GRP_BTN,
IQS7222_REG_GRP_CHAN,
- IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_SLDR,
IQS7222_REG_GRP_GPIO,
IQS7222_REG_GRP_SYS,
@@ -135,12 +137,12 @@ struct iqs7222_event_desc {
static const struct iqs7222_event_desc iqs7222_kp_events[] = {
{
.name = "event-prox",
- .enable = BIT(0),
+ .enable = IQS7222_EVENT_MASK_PROX,
.reg_key = IQS7222_REG_KEY_PROX,
},
{
.name = "event-touch",
- .enable = BIT(1),
+ .enable = IQS7222_EVENT_MASK_TOUCH,
.reg_key = IQS7222_REG_KEY_TOUCH,
},
};
@@ -556,13 +558,6 @@ static const struct iqs7222_prop_desc iqs7222_props[] = {
.label = "current reference trim",
},
{
- .name = "azoteq,rf-filt-enable",
- .reg_grp = IQS7222_REG_GRP_GLBL,
- .reg_offset = 0,
- .reg_shift = 15,
- .reg_width = 1,
- },
- {
.name = "azoteq,max-counts",
.reg_grp = IQS7222_REG_GRP_GLBL,
.reg_offset = 0,
@@ -1272,9 +1267,22 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
struct i2c_client *client = iqs7222->client;
ktime_t ati_timeout;
u16 sys_status = 0;
- u16 sys_setup = iqs7222->sys_setup[0] & ~IQS7222_SYS_SETUP_ACK_RESET;
+ u16 sys_setup;
int error, i;
+ /*
+ * The reserved fields of the system setup register may have changed
+ * as a result of other registers having been written. As such, read
+ * the register's latest value to avoid unexpected behavior when the
+ * register is written in the loop that follows.
+ */
+ error = iqs7222_read_word(iqs7222, IQS7222_SYS_SETUP, &sys_setup);
+ if (error)
+ return error;
+
+ sys_setup &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
+ sys_setup &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
+
for (i = 0; i < IQS7222_NUM_RETRIES; i++) {
/*
* Trigger ATI from streaming and normal-power modes so that
@@ -1299,12 +1307,15 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
if (error)
return error;
- if (sys_status & IQS7222_SYS_STATUS_ATI_ACTIVE)
- continue;
+ if (sys_status & IQS7222_SYS_STATUS_RESET)
+ return 0;
if (sys_status & IQS7222_SYS_STATUS_ATI_ERROR)
break;
+ if (sys_status & IQS7222_SYS_STATUS_ATI_ACTIVE)
+ continue;
+
/*
* Use stream-in-touch mode if either slider reports
* absolute position.
@@ -1321,7 +1332,7 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
dev_err(&client->dev,
"ATI attempt %d of %d failed with status 0x%02X, %s\n",
i + 1, IQS7222_NUM_RETRIES, (u8)sys_status,
- i < IQS7222_NUM_RETRIES ? "retrying..." : "stopping");
+ i + 1 < IQS7222_NUM_RETRIES ? "retrying" : "stopping");
}
return -ETIMEDOUT;
@@ -1334,6 +1345,34 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
int error, i, j, k;
/*
+ * Acknowledge reset before writing any registers in case the device
+ * suffers a spurious reset during initialization. Because this step
+ * may change the reserved fields of the second filter beta register,
+ * its cache must be updated.
+ *
+ * Writing the second filter beta register, in turn, may clobber the
+ * system status register. As such, the filter beta register pair is
+ * written first to protect against this hazard.
+ */
+ if (dir == WRITE) {
+ u16 reg = dev_desc->reg_grps[IQS7222_REG_GRP_FILT].base + 1;
+ u16 filt_setup;
+
+ error = iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP,
+ iqs7222->sys_setup[0] |
+ IQS7222_SYS_SETUP_ACK_RESET);
+ if (error)
+ return error;
+
+ error = iqs7222_read_word(iqs7222, reg, &filt_setup);
+ if (error)
+ return error;
+
+ iqs7222->filt_setup[1] &= GENMASK(7, 0);
+ iqs7222->filt_setup[1] |= (filt_setup & ~GENMASK(7, 0));
+ }
+
+ /*
* Take advantage of the stop-bit disable function, if available, to
* save the trouble of having to reopen a communication window after
* each burst read or write.
@@ -1957,8 +1996,8 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
int ext_chan = rounddown(num_chan, 10);
int count, error, reg_offset, i;
+ u16 *event_mask = &iqs7222->sys_setup[dev_desc->event_offset];
u16 *sldr_setup = iqs7222->sldr_setup[sldr_index];
- u16 *sys_setup = iqs7222->sys_setup;
unsigned int chan_sel[4], val;
error = iqs7222_parse_props(iqs7222, &sldr_node, sldr_index,
@@ -2003,7 +2042,7 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
reg_offset = dev_desc->sldr_res < U16_MAX ? 0 : 1;
sldr_setup[0] |= count;
- sldr_setup[3 + reg_offset] &= ~IQS7222_SLDR_SETUP_3_CHAN_SEL_MASK;
+ sldr_setup[3 + reg_offset] &= ~GENMASK(ext_chan - 1, 0);
for (i = 0; i < ARRAY_SIZE(chan_sel); i++) {
sldr_setup[5 + reg_offset + i] = 0;
@@ -2081,17 +2120,19 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
sldr_setup[0] |= dev_desc->wheel_enable;
}
+ /*
+ * The absence of a register offset makes it safe to assume the device
+ * supports gestures, each of which is first disabled until explicitly
+ * enabled.
+ */
+ if (!reg_offset)
+ for (i = 0; i < ARRAY_SIZE(iqs7222_sl_events); i++)
+ sldr_setup[9] &= ~iqs7222_sl_events[i].enable;
+
for (i = 0; i < ARRAY_SIZE(iqs7222_sl_events); i++) {
const char *event_name = iqs7222_sl_events[i].name;
struct fwnode_handle *event_node;
- /*
- * The absence of a register offset means the remaining fields
- * in the group represent gesture settings.
- */
- if (iqs7222_sl_events[i].enable && !reg_offset)
- sldr_setup[9] &= ~iqs7222_sl_events[i].enable;
-
event_node = fwnode_get_named_child_node(sldr_node, event_name);
if (!event_node)
continue;
@@ -2104,6 +2145,22 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
if (error)
return error;
+ /*
+ * The press/release event does not expose a direct GPIO link,
+ * but one can be emulated by tying each of the participating
+ * channels to the same GPIO.
+ */
+ error = iqs7222_gpio_select(iqs7222, event_node,
+ i ? iqs7222_sl_events[i].enable
+ : sldr_setup[3 + reg_offset],
+ i ? 1568 + sldr_index * 30
+ : sldr_setup[4 + reg_offset]);
+ if (error)
+ return error;
+
+ if (!reg_offset)
+ sldr_setup[9] |= iqs7222_sl_events[i].enable;
+
error = fwnode_property_read_u32(event_node, "linux,code",
&val);
if (error) {
@@ -2115,26 +2172,20 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
iqs7222->sl_code[sldr_index][i] = val;
input_set_capability(iqs7222->keypad, EV_KEY, val);
- /*
- * The press/release event is determined based on whether the
- * coordinate field reports 0xFFFF and has no explicit enable
- * control.
- */
- if (!iqs7222_sl_events[i].enable || reg_offset)
- continue;
-
- sldr_setup[9] |= iqs7222_sl_events[i].enable;
-
- error = iqs7222_gpio_select(iqs7222, event_node,
- iqs7222_sl_events[i].enable,
- 1568 + sldr_index * 30);
- if (error)
- return error;
-
if (!dev_desc->event_offset)
continue;
- sys_setup[dev_desc->event_offset] |= BIT(10 + sldr_index);
+ /*
+ * The press/release event is determined based on whether the
+ * coordinate field reports 0xFFFF and solely relies on touch
+ * or proximity interrupts to be unmasked.
+ */
+ if (i && !reg_offset)
+ *event_mask |= (IQS7222_EVENT_MASK_SLDR << sldr_index);
+ else if (sldr_setup[4 + reg_offset] == dev_desc->touch_link)
+ *event_mask |= IQS7222_EVENT_MASK_TOUCH;
+ else
+ *event_mask |= IQS7222_EVENT_MASK_PROX;
}
/*
@@ -2227,11 +2278,6 @@ static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
return error;
}
- sys_setup[0] &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
- sys_setup[0] &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
-
- sys_setup[0] |= IQS7222_SYS_SETUP_ACK_RESET;
-
return iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_SYS,
IQS7222_REG_KEY_NONE);
}
@@ -2299,29 +2345,37 @@ static int iqs7222_report(struct iqs7222_private *iqs7222)
input_report_abs(iqs7222->keypad, iqs7222->sl_axis[i],
sldr_pos);
- for (j = 0; j < ARRAY_SIZE(iqs7222_sl_events); j++) {
- u16 mask = iqs7222_sl_events[j].mask;
- u16 val = iqs7222_sl_events[j].val;
+ input_report_key(iqs7222->keypad, iqs7222->sl_code[i][0],
+ sldr_pos < dev_desc->sldr_res);
- if (!iqs7222_sl_events[j].enable) {
- input_report_key(iqs7222->keypad,
- iqs7222->sl_code[i][j],
- sldr_pos < dev_desc->sldr_res);
- continue;
- }
+ /*
+ * A maximum resolution indicates the device does not support
+ * gestures, in which case the remaining fields are ignored.
+ */
+ if (dev_desc->sldr_res == U16_MAX)
+ continue;
- /*
- * The remaining offsets represent gesture state, and
- * are discarded in the case of IQS7222C because only
- * absolute position is reported.
- */
- if (num_stat < IQS7222_MAX_COLS_STAT)
- continue;
+ if (!(le16_to_cpu(status[1]) & IQS7222_EVENT_MASK_SLDR << i))
+ continue;
+
+ /*
+ * Skip the press/release event, as it does not have separate
+ * status fields and is handled separately.
+ */
+ for (j = 1; j < ARRAY_SIZE(iqs7222_sl_events); j++) {
+ u16 mask = iqs7222_sl_events[j].mask;
+ u16 val = iqs7222_sl_events[j].val;
input_report_key(iqs7222->keypad,
iqs7222->sl_code[i][j],
(state & mask) == val);
}
+
+ input_sync(iqs7222->keypad);
+
+ for (j = 1; j < ARRAY_SIZE(iqs7222_sl_events); j++)
+ input_report_key(iqs7222->keypad,
+ iqs7222->sl_code[i][j], 0);
}
input_sync(iqs7222->keypad);
diff --git a/drivers/input/misc/rk805-pwrkey.c b/drivers/input/misc/rk805-pwrkey.c
index 3fb64dbda1a2..76873aa005b4 100644
--- a/drivers/input/misc/rk805-pwrkey.c
+++ b/drivers/input/misc/rk805-pwrkey.c
@@ -98,6 +98,7 @@ static struct platform_driver rk805_pwrkey_driver = {
};
module_platform_driver(rk805_pwrkey_driver);
+MODULE_ALIAS("platform:rk805-pwrkey");
MODULE_AUTHOR("Joseph Chen <chenjh@rock-chips.com>");
MODULE_DESCRIPTION("RK805 PMIC Power Key driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
index 812edfced86e..0caaf3e64215 100644
--- a/drivers/input/mouse/cyapa_gen6.c
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -57,7 +57,7 @@ struct pip_app_resp_head {
* The value of data_status can be the first byte of data or
* the command status or the unsupported command code depending on the
* requested command code.
- */
+ */
u8 data_status;
} __packed;
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 23507fce3a2b..18ccbd45004a 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -41,7 +41,7 @@ struct gpio_mouse {
/*
* Timer function which is run every scan_ms ms when the device is opened.
- * The dev input variable is set to the the input_dev pointer.
+ * The dev input variable is set to the input_dev pointer.
*/
static void gpio_mouse_scan(struct input_dev *input)
{
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index a9065c6ab550..da2c67cb8642 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -350,6 +350,10 @@ static int __init gscps2_probe(struct parisc_device *dev)
ps2port->port = serio;
ps2port->padev = dev;
ps2port->addr = ioremap(hpa, GSC_STATUS + 4);
+ if (!ps2port->addr) {
+ ret = -ENOMEM;
+ goto fail_nomem;
+ }
spin_lock_init(&ps2port->lock);
gscps2_reset(ps2port);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 148a7c5fd0e2..4fbec7bbecca 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -67,25 +67,84 @@ static inline void i8042_write_command(int val)
#include <linux/dmi.h>
-static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+#define SERIO_QUIRK_NOKBD BIT(0)
+#define SERIO_QUIRK_NOAUX BIT(1)
+#define SERIO_QUIRK_NOMUX BIT(2)
+#define SERIO_QUIRK_FORCEMUX BIT(3)
+#define SERIO_QUIRK_UNLOCK BIT(4)
+#define SERIO_QUIRK_PROBE_DEFER BIT(5)
+#define SERIO_QUIRK_RESET_ALWAYS BIT(6)
+#define SERIO_QUIRK_RESET_NEVER BIT(7)
+#define SERIO_QUIRK_DIECT BIT(8)
+#define SERIO_QUIRK_DUMBKBD BIT(9)
+#define SERIO_QUIRK_NOLOOP BIT(10)
+#define SERIO_QUIRK_NOTIMEOUT BIT(11)
+#define SERIO_QUIRK_KBDRESET BIT(12)
+#define SERIO_QUIRK_DRITEK BIT(13)
+#define SERIO_QUIRK_NOPNP BIT(14)
+
+/* Quirk table for different mainboards. Options similar or identical to i8042
+ * module parameters.
+ * ORDERING IS IMPORTANT! The first match will be apllied and the rest ignored.
+ * This allows entries to overwrite vendor wide quirks on a per device basis.
+ * Where this is irrelevant, entries are sorted case sensitive by DMI_SYS_VENDOR
+ * and/or DMI_BOARD_VENDOR to make it easier to avoid dublicate entries.
+ */
+static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
{
- /*
- * Arima-Rioworks HDAMB -
- * AUX LOOP command does not raise AUX IRQ
- */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"),
- DMI_MATCH(DMI_BOARD_NAME, "HDAMB"),
- DMI_MATCH(DMI_BOARD_VERSION, "Rev E"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* ASUS G1S */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_BOARD_NAME, "G1S"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
+ },
+ {
+ /* Asus X450LCP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ /* ASUS ZenBook UX425UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
},
+ .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ /* ASUS ZenBook UM325UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
+ },
+ /*
+ * On some Asus laptops, just running self tests cause problems.
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER)
},
{
/* ASUS P65UP5 - AUX LOOP command does not raise AUX IRQ */
@@ -94,585 +153,689 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "P/I-P65UP5"),
DMI_MATCH(DMI_BOARD_VERSION, "REV 2.X"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
+ /* ASUS G1S */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "G1S"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
- DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Acer Aspire 5710 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
- DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Embedded Box PC 3000 */
+ /* Acer Aspire 7738 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* OQO Model 01 */
+ /* Acer Aspire 5536 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "00"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* ULI EV4873 - AUX LOOP does not work properly */
+ /*
+ * Acer Aspire 5738z
+ * Touchpad stops working in mux mode when dis- + re-enabled
+ * with the touchpad enable/disable toggle hotkey
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ULI"),
- DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Microsoft Virtual Machine */
+ /* Acer Aspire One 150 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Medion MAM 2070 */
+ /* Acer Aspire One 532h */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AO532h"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Medion Akoya E7225 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Blue FB5601 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "blue"),
- DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "M606"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte M912 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "M912"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "01"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte M1022M netbook */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
- DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte Spring Peak - defines wrong chassis type */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte T1005 - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some Wistron based laptops need us to explicitly enable the 'Dritek
+ * keyboard extension' to make their extra keys start generating scancodes.
+ * Originally, this was just confined to older laptops, but a few Acer laptops
+ * have turned up in 2007 that also need this again.
+ */
{
+ /* Acer Aspire 5100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
+ /* Acer Aspire 5610 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
- DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
+ /* Acer Aspire 5630 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
- { }
-};
-
-/*
- * Some Fujitsu notebooks are having trouble with touchpads if
- * active multiplexing mode is activated. Luckily they don't have
- * external PS/2 ports so we can safely disable it.
- * ... apparently some Toshibas don't like MUX mode either and
- * die horrible death on reboot.
- */
-static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
{
- /* Fujitsu Lifebook P7010/P7010D */
+ /* Acer Aspire 5650 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook P7010 */
+ /* Acer Aspire 5680 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook P5020D */
+ /* Acer Aspire 5720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook S2000 */
+ /* Acer Aspire 9110 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook S6230 */
+ /* Acer TravelMate 660 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook T725 laptop */
+ /* Acer TravelMate 2490 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook U745 */
+ /* Acer TravelMate 4280 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu T70H */
+ /* Amoi M636/A737 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Fujitsu-Siemens Lifebook T3010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Fujitsu-Siemens Lifebook E4010 */
+ /* Compal HEL80I */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Fujitsu-Siemens Amilo Pro 2010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Fujitsu-Siemens Amilo Pro 2030 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /*
- * No data is coming from the touchscreen unless KBC
- * is in legacy mode.
- */
- /* Panasonic CF-29 */
+ /* Advent 4211 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
+ DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /*
- * HP Pavilion DV4017EA -
- * errors on MUX ports are reported without raising AUXDATA
- * causing "spurious NAK" messages.
- */
+ /* Dell Embedded Box PC 3000 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /*
- * HP Pavilion ZT1000 -
- * like DV4017EA does not raise AUXERR for errors on MUX ports.
- */
+ /* Dell XPS M1530 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * HP Pavilion DV4270ca -
- * like DV4017EA does not raise AUXERR for errors on MUX ports.
- */
+ /* Dell Vostro 1510 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Dell Vostro V13 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
+ /* Dell Vostro 1320 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Dell Vostro 1520 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Dell Vostro 1720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Sharp Actius MM20 */
+ /* Entroware Proteus */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some Fujitsu notebooks are having trouble with touchpads if
+ * active multiplexing mode is activated. Luckily they don't have
+ * external PS/2 ports so we can safely disable it.
+ * ... apparently some Toshibas don't like MUX mode either and
+ * die horrible death on reboot.
+ */
{
- /* Sony Vaio FS-115b */
+ /* Fujitsu Lifebook P7010/P7010D */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * Sony Vaio FZ-240E -
- * reset and GET ID commands issued via KBD port are
- * sometimes being delivered to AUX3.
- */
+ /* Fujitsu Lifebook P5020D */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * Most (all?) VAIOs do not have external PS/2 ports nor
- * they implement active multiplexing properly, and
- * MUX discovery usually messes up keyboard/touchpad.
- */
+ /* Fujitsu Lifebook S2000 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Amoi M636/A737 */
+ /* Fujitsu Lifebook S6230 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Lenovo 3000 n100 */
+ /* Fujitsu Lifebook T725 laptop */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Lenovo XiaoXin Air 12 */
+ /* Fujitsu Lifebook U745 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Fujitsu T70H */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5710 */
+ /* Fujitsu A544 laptop */
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Acer Aspire 7738 */
+ /* Fujitsu AH544 laptop */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Gericom Bellagio */
+ /* Fujitsu U574 laptop */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* IBM 2656 */
+ /* Fujitsu UH554 laptop */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
- DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Dell XPS M1530 */
+ /* Fujitsu Lifebook P7010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Compal HEL80I */
+ /* Fujitsu-Siemens Lifebook T3010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro 1510 */
+ /* Fujitsu-Siemens Lifebook E4010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5536 */
+ /* Fujitsu-Siemens Amilo Pro 2010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro V13 */
+ /* Fujitsu-Siemens Amilo Pro 2030 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Newer HP Pavilion dv4 models */
+ /* Gigabyte M912 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M912"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "01"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Asus X450LCP */
+ /* Gigabyte Spring Peak - defines wrong chassis type */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Avatar AVIU-145A6 */
+ /* Gigabyte T1005 - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
- DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* TUXEDO BU1406 */
+ /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
+ /*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
{
- /* Lenovo LaVie Z */
+ /* Gigabyte P35 v2 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
+ },
+ {
+ /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
},
{
- /*
- * Acer Aspire 5738z
- * Touchpad stops working in mux mode when dis- + re-enabled
- * with the touchpad enable/disable toggle hotkey
- */
+ /* Gigabyte P34 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
},
{
- /* Entroware Proteus */
+ /* Gigabyte P57 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
+ },
+ {
+ /* Gericom Bellagio */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
+ /* Gigabyte M1022M netbook */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
- { }
-};
-
-static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = {
{
/*
- * Sony Vaio VGN-CS series require MUX or the touch sensor
- * buttons will disturb touchpad operation
+ * HP Pavilion DV4017EA -
+ * errors on MUX ports are reported without raising AUXDATA
+ * causing "spurious NAK" messages.
*/
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-
-/*
- * On some Asus laptops, just running self tests cause problems.
- */
-static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
{
+ /*
+ * HP Pavilion ZT1000 -
+ * like DV4017EA does not raise AUXERR for errors on MUX ports.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"),
},
- }, {
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
+ /*
+ * HP Pavilion DV4270ca -
+ * like DV4017EA does not raise AUXERR for errors on MUX ports.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
{
- /* MSI Wind U-100 */
+ /* Newer HP Pavilion dv4 models */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "U-100"),
- DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
- /* LG Electronics X110 */
+ /* IBM 2656 */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "X110"),
- DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire One 150 */
+ /* Avatar AVIU-145A6 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Intel MBO Desktop D845PESV */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
},
{
+ /*
+ * Intel NUC D54250WYK - does not have i8042 controller but
+ * declares PS/2 devices in DSDT.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
},
{
+ /* Lenovo 3000 n100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo XiaoXin Air 12 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo LaVie Z */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo Ideapad U455 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Lenovo ThinkPad L460 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Advent 4211 */
+ /* Lenovo ThinkPad Twist S230u */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
+ },
+ {
+ /* LG Electronics X110 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "X110"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
/* Medion Akoya Mini E1210 */
@@ -680,6 +843,7 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_PRODUCT_NAME, "E1210"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
/* Medion Akoya E1222 */
@@ -687,331 +851,434 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_PRODUCT_NAME, "E122X"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Mivvy M310 */
+ /* MSI Wind U-100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_BOARD_NAME, "U-100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOPNP)
},
{
- /* Dell Vostro 1320 */
+ /*
+ * No data is coming from the touchscreen unless KBC
+ * is in legacy mode.
+ */
+ /* Panasonic CF-29 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro 1520 */
+ /* Medion Akoya E7225 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Dell Vostro 1720 */
+ /* Microsoft Virtual Machine */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo Ideapad U455 */
+ /* Medion MAM 2070 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo ThinkPad L460 */
+ /* TUXEDO BU1406 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+ /* OQO Model 01 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
+ DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "00"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo ThinkPad Twist S230u */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Entroware Proteus */
+ /* Acer Aspire 5 A515 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
+ DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
},
- { }
-};
-
-#ifdef CONFIG_PNP
-static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
{
- /* Intel MBO Desktop D845PESV */
+ /* ULI EV4873 - AUX LOOP does not work properly */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
- DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ULI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
/*
- * Intel NUC D54250WYK - does not have i8042 controller but
- * declares PS/2 devices in DSDT.
+ * Arima-Rioworks HDAMB -
+ * AUX LOOP command does not raise AUX IRQ
*/
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"),
- DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"),
+ DMI_MATCH(DMI_BOARD_NAME, "HDAMB"),
+ DMI_MATCH(DMI_BOARD_VERSION, "Rev E"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* MSI Wind U-100 */
+ /* Sharp Actius MM20 */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "U-100"),
- DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5 A515 */
+ /*
+ * Sony Vaio FZ-240E -
+ * reset and GET ID commands issued via KBD port are
+ * sometimes being delivered to AUX3.
+ */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
- DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
+ /*
+ * Most (all?) VAIOs do not have external PS/2 ports nor
+ * they implement active multiplexing properly, and
+ * MUX discovery usually messes up keyboard/touchpad.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-
-static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
{
+ /* Sony Vaio FS-115b */
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /*
+ * Sony Vaio VGN-CS series require MUX or the touch sensor
+ * buttons will disturb touchpad operation
+ */
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
},
+ .driver_data = (void *)(SERIO_QUIRK_FORCEMUX)
},
{
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-#endif
-
-static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
{
- /* Dell Vostro V13 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
+ /*
+ * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+ * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
+ * none of them have an external PS/2 port so this can safely be set for
+ * all of them. These two are based on a Clevo design, but have the
+ * board_name changed.
+ */
{
- /* Newer HP Pavilion dv4 models */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Fujitsu A544 laptop */
- /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Fujitsu AH544 laptop */
- /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+ /* Mivvy M310 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
+ DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
{
- /* Fujitsu Lifebook T725 laptop */
+ /* Schenker XMG C504 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
+ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
},
{
- /* Fujitsu U574 laptop */
- /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+ /* Blue FB5601 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
+ DMI_MATCH(DMI_SYS_VENDOR, "blue"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "M606"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
+ /*
+ * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+ * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
+ * none of them have an external PS/2 port so this can safely be set for
+ * all of them.
+ * Clevo barebones come with board_vendor and/or system_vendor set to
+ * either the very generic string "Notebook" and/or a different value
+ * for each individual reseller. The only somewhat universal way to
+ * identify them is by board_name.
+ */
{
- /* Fujitsu UH554 laptop */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
- { }
-};
-
-/*
- * Some Wistron based laptops need us to explicitly enable the 'Dritek
- * keyboard extension' to make their extra keys start generating scancodes.
- * Originally, this was just confined to older laptops, but a few Acer laptops
- * have turned up in 2007 that also need this again.
- */
-static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
{
- /* Acer Aspire 5100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5610 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
+ DMI_MATCH(DMI_BOARD_NAME, "N140CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5630 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
+ DMI_MATCH(DMI_BOARD_NAME, "N141CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5650 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
+ DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5680 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
+ /*
+ * At least one modern Clevo barebone has the touchpad connected both
+ * via PS/2 and i2c interface. This causes a race condition between the
+ * psmouse and i2c-hid driver. Since the full capability of the touchpad
+ * is available via the i2c interface and the device has no external
+ * PS/2 port, it is safe to just ignore all ps2 mouses here to avoid
+ * this issue. The known affected device is the
+ * TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU which comes with one of
+ * the two different dmi strings below. NS50MU is not a typo!
+ */
{
- /* Acer Aspire 5720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
+ DMI_MATCH(DMI_BOARD_NAME, "NS50MU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
+ SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+ SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 9110 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
+ DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
+ SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+ SERIO_QUIRK_NOPNP)
},
{
- /* Acer TravelMate 660 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
+ DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer TravelMate 2490 */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65xH"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer TravelMate 4280 */
+ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
- { }
-};
-
-/*
- * Some laptops need keyboard reset before probing for the trackpad to get
- * it detected, initialised & finally work.
- */
-static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
{
- /* Gigabyte P35 v2 - Elantech touchpad */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65_P67H"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
- {
- /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+ {
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RP"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Gigabyte P34 - Elantech touchpad */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RS"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Gigabyte P57 - Elantech touchpad */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P67xRP"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Schenker XMG C504 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
- DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
+ DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{ }
};
-static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = {
+#ifdef CONFIG_PNP
+static const struct dmi_system_id i8042_dmi_laptop_table[] __initconst = {
{
- /* ASUS ZenBook UX425UA */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
},
},
{
- /* ASUS ZenBook UM325UA */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
},
},
{ }
};
+#endif
#endif /* CONFIG_X86 */
@@ -1167,11 +1434,6 @@ static int __init i8042_pnp_init(void)
bool pnp_data_busted = false;
int err;
-#ifdef CONFIG_X86
- if (dmi_check_system(i8042_dmi_nopnp_table))
- i8042_nopnp = true;
-#endif
-
if (i8042_nopnp) {
pr_info("PNP detection disabled\n");
return 0;
@@ -1275,6 +1537,59 @@ static inline int i8042_pnp_init(void) { return 0; }
static inline void i8042_pnp_exit(void) { }
#endif /* CONFIG_PNP */
+
+#ifdef CONFIG_X86
+static void __init i8042_check_quirks(void)
+{
+ const struct dmi_system_id *device_quirk_info;
+ uintptr_t quirks;
+
+ device_quirk_info = dmi_first_match(i8042_dmi_quirk_table);
+ if (!device_quirk_info)
+ return;
+
+ quirks = (uintptr_t)device_quirk_info->driver_data;
+
+ if (quirks & SERIO_QUIRK_NOKBD)
+ i8042_nokbd = true;
+ if (quirks & SERIO_QUIRK_NOAUX)
+ i8042_noaux = true;
+ if (quirks & SERIO_QUIRK_NOMUX)
+ i8042_nomux = true;
+ if (quirks & SERIO_QUIRK_FORCEMUX)
+ i8042_nomux = false;
+ if (quirks & SERIO_QUIRK_UNLOCK)
+ i8042_unlock = true;
+ if (quirks & SERIO_QUIRK_PROBE_DEFER)
+ i8042_probe_defer = true;
+ /* Honor module parameter when value is not default */
+ if (i8042_reset == I8042_RESET_DEFAULT) {
+ if (quirks & SERIO_QUIRK_RESET_ALWAYS)
+ i8042_reset = I8042_RESET_ALWAYS;
+ if (quirks & SERIO_QUIRK_RESET_NEVER)
+ i8042_reset = I8042_RESET_NEVER;
+ }
+ if (quirks & SERIO_QUIRK_DIECT)
+ i8042_direct = true;
+ if (quirks & SERIO_QUIRK_DUMBKBD)
+ i8042_dumbkbd = true;
+ if (quirks & SERIO_QUIRK_NOLOOP)
+ i8042_noloop = true;
+ if (quirks & SERIO_QUIRK_NOTIMEOUT)
+ i8042_notimeout = true;
+ if (quirks & SERIO_QUIRK_KBDRESET)
+ i8042_kbdreset = true;
+ if (quirks & SERIO_QUIRK_DRITEK)
+ i8042_dritek = true;
+#ifdef CONFIG_PNP
+ if (quirks & SERIO_QUIRK_NOPNP)
+ i8042_nopnp = true;
+#endif
+}
+#else
+static inline void i8042_check_quirks(void) {}
+#endif
+
static int __init i8042_platform_init(void)
{
int retval;
@@ -1297,45 +1612,42 @@ static int __init i8042_platform_init(void)
i8042_kbd_irq = I8042_MAP_IRQ(1);
i8042_aux_irq = I8042_MAP_IRQ(12);
- retval = i8042_pnp_init();
- if (retval)
- return retval;
-
#if defined(__ia64__)
- i8042_reset = I8042_RESET_ALWAYS;
+ i8042_reset = I8042_RESET_ALWAYS;
#endif
+ i8042_check_quirks();
+
+ pr_debug("Active quirks (empty means none):%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ i8042_nokbd ? " nokbd" : "",
+ i8042_noaux ? " noaux" : "",
+ i8042_nomux ? " nomux" : "",
+ i8042_unlock ? " unlock" : "",
+ i8042_probe_defer ? "probe_defer" : "",
+ i8042_reset == I8042_RESET_DEFAULT ?
+ "" : i8042_reset == I8042_RESET_ALWAYS ?
+ " reset_always" : " reset_never",
+ i8042_direct ? " direct" : "",
+ i8042_dumbkbd ? " dumbkbd" : "",
+ i8042_noloop ? " noloop" : "",
+ i8042_notimeout ? " notimeout" : "",
+ i8042_kbdreset ? " kbdreset" : "",
#ifdef CONFIG_X86
- /* Honor module parameter when value is not default */
- if (i8042_reset == I8042_RESET_DEFAULT) {
- if (dmi_check_system(i8042_dmi_reset_table))
- i8042_reset = I8042_RESET_ALWAYS;
-
- if (dmi_check_system(i8042_dmi_noselftest_table))
- i8042_reset = I8042_RESET_NEVER;
- }
-
- if (dmi_check_system(i8042_dmi_noloop_table))
- i8042_noloop = true;
-
- if (dmi_check_system(i8042_dmi_nomux_table))
- i8042_nomux = true;
-
- if (dmi_check_system(i8042_dmi_forcemux_table))
- i8042_nomux = false;
-
- if (dmi_check_system(i8042_dmi_notimeout_table))
- i8042_notimeout = true;
-
- if (dmi_check_system(i8042_dmi_dritek_table))
- i8042_dritek = true;
-
- if (dmi_check_system(i8042_dmi_kbdreset_table))
- i8042_kbdreset = true;
+ i8042_dritek ? " dritek" : "",
+#else
+ "",
+#endif
+#ifdef CONFIG_PNP
+ i8042_nopnp ? " nopnp" : "");
+#else
+ "");
+#endif
- if (dmi_check_system(i8042_dmi_probe_defer_table))
- i8042_probe_defer = true;
+ retval = i8042_pnp_init();
+ if (retval)
+ return retval;
+#ifdef CONFIG_X86
/*
* A20 was already enabled during early kernel init. But some buggy
* BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index bb2e1cbffba7..82beddb28761 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/ratelimit.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -47,6 +48,8 @@
#define M09_REGISTER_NUM_X 0x94
#define M09_REGISTER_NUM_Y 0x95
+#define M12_REGISTER_REPORT_RATE 0x88
+
#define EV_REGISTER_THRESHOLD 0x40
#define EV_REGISTER_GAIN 0x41
#define EV_REGISTER_OFFSET_Y 0x45
@@ -127,9 +130,12 @@ struct edt_ft5x06_ts_data {
int max_support_points;
char name[EDT_NAME_LEN];
+ char fw_version[EDT_NAME_LEN];
struct edt_reg_addr reg_addr;
enum edt_ver version;
+ unsigned int crc_errors;
+ unsigned int header_errors;
};
struct edt_i2c_chip_data {
@@ -178,6 +184,7 @@ static bool edt_ft5x06_ts_check_crc(struct edt_ft5x06_ts_data *tsdata,
crc ^= buf[i];
if (crc != buf[buflen-1]) {
+ tsdata->crc_errors++;
dev_err_ratelimited(&tsdata->client->dev,
"crc error: 0x%02x expected, got 0x%02x\n",
crc, buf[buflen-1]);
@@ -235,6 +242,7 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
if (tsdata->version == EDT_M06) {
if (rdbuf[0] != 0xaa || rdbuf[1] != 0xaa ||
rdbuf[2] != datalen) {
+ tsdata->header_errors++;
dev_err_ratelimited(dev,
"Unexpected header: %02x%02x%02x!\n",
rdbuf[0], rdbuf[1], rdbuf[2]);
@@ -523,9 +531,55 @@ static EDT_ATTR(offset_y, S_IWUSR | S_IRUGO, NO_REGISTER, NO_REGISTER,
/* m06: range 20 to 80, m09: range 0 to 30, m12: range 1 to 255... */
static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD,
M09_REGISTER_THRESHOLD, EV_REGISTER_THRESHOLD, 0, 255);
-/* m06: range 3 to 14, m12: (0x64: 100Hz) */
+/* m06: range 3 to 14, m12: range 1 to 255 */
static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
- NO_REGISTER, NO_REGISTER, 0, 255);
+ M12_REGISTER_REPORT_RATE, NO_REGISTER, 0, 255);
+
+static ssize_t model_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%s\n", tsdata->name);
+}
+
+static DEVICE_ATTR_RO(model);
+
+static ssize_t fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%s\n", tsdata->fw_version);
+}
+
+static DEVICE_ATTR_RO(fw_version);
+
+/* m06 only */
+static ssize_t header_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%d\n", tsdata->header_errors);
+}
+
+static DEVICE_ATTR_RO(header_errors);
+
+/* m06 only */
+static ssize_t crc_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%d\n", tsdata->crc_errors);
+}
+
+static DEVICE_ATTR_RO(crc_errors);
static struct attribute *edt_ft5x06_attrs[] = {
&edt_ft5x06_attr_gain.dattr.attr,
@@ -534,6 +588,10 @@ static struct attribute *edt_ft5x06_attrs[] = {
&edt_ft5x06_attr_offset_y.dattr.attr,
&edt_ft5x06_attr_threshold.dattr.attr,
&edt_ft5x06_attr_report_rate.dattr.attr,
+ &dev_attr_model.attr,
+ &dev_attr_fw_version.attr,
+ &dev_attr_header_errors.attr,
+ &dev_attr_crc_errors.attr,
NULL
};
@@ -820,13 +878,13 @@ static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
#endif /* CONFIG_DEBUGFS */
static int edt_ft5x06_ts_identify(struct i2c_client *client,
- struct edt_ft5x06_ts_data *tsdata,
- char *fw_version)
+ struct edt_ft5x06_ts_data *tsdata)
{
u8 rdbuf[EDT_NAME_LEN];
char *p;
int error;
char *model_name = tsdata->name;
+ char *fw_version = tsdata->fw_version;
/* see what we find if we assume it is a M06 *
* if we get less than EDT_NAME_LEN, we don't want
@@ -1030,7 +1088,8 @@ static void edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
case EDT_M09:
case EDT_M12:
reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
- reg_addr->reg_report_rate = NO_REGISTER;
+ reg_addr->reg_report_rate = tsdata->version == EDT_M12 ?
+ M12_REGISTER_REPORT_RATE : NO_REGISTER;
reg_addr->reg_gain = M09_REGISTER_GAIN;
reg_addr->reg_offset = M09_REGISTER_OFFSET;
reg_addr->reg_offset_x = NO_REGISTER;
@@ -1081,7 +1140,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
struct input_dev *input;
unsigned long irq_flags;
int error;
- char fw_version[EDT_NAME_LEN];
+ u32 report_rate;
dev_dbg(&client->dev, "probing for EDT FT5x06 I2C\n");
@@ -1194,7 +1253,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
tsdata->input = input;
tsdata->factory_mode = false;
- error = edt_ft5x06_ts_identify(client, tsdata, fw_version);
+ error = edt_ft5x06_ts_identify(client, tsdata);
if (error) {
dev_err(&client->dev, "touchscreen probe failed\n");
return error;
@@ -1210,9 +1269,30 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
edt_ft5x06_ts_get_defaults(&client->dev, tsdata);
edt_ft5x06_ts_get_parameters(tsdata);
+ if (tsdata->reg_addr.reg_report_rate != NO_REGISTER &&
+ !device_property_read_u32(&client->dev,
+ "report-rate-hz", &report_rate)) {
+ if (tsdata->version == EDT_M06)
+ tsdata->report_rate = clamp_val(report_rate, 30, 140);
+ else
+ tsdata->report_rate = clamp_val(report_rate, 1, 255);
+
+ if (report_rate != tsdata->report_rate)
+ dev_warn(&client->dev,
+ "report-rate %dHz is unsupported, use %dHz\n",
+ report_rate, tsdata->report_rate);
+
+ if (tsdata->version == EDT_M06)
+ tsdata->report_rate /= 10;
+
+ edt_ft5x06_register_write(tsdata,
+ tsdata->reg_addr.reg_report_rate,
+ tsdata->report_rate);
+ }
+
dev_dbg(&client->dev,
"Model \"%s\", Rev. \"%s\", %dx%d sensors\n",
- tsdata->name, fw_version, tsdata->num_x, tsdata->num_y);
+ tsdata->name, tsdata->fw_version, tsdata->num_x, tsdata->num_y);
input->name = tsdata->name;
input->id.bustype = BUS_I2C;
diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
index cbe0dd412912..4b7eee01c6aa 100644
--- a/drivers/input/touchscreen/exc3000.c
+++ b/drivers/input/touchscreen/exc3000.c
@@ -220,6 +220,7 @@ static int exc3000_vendor_data_request(struct exc3000_data *data, u8 *request,
{
u8 buf[EXC3000_LEN_VENDOR_REQUEST] = { 0x67, 0x00, 0x42, 0x00, 0x03 };
int ret;
+ unsigned long time_left;
mutex_lock(&data->query_lock);
@@ -233,9 +234,9 @@ static int exc3000_vendor_data_request(struct exc3000_data *data, u8 *request,
goto out_unlock;
if (response) {
- ret = wait_for_completion_timeout(&data->wait_event,
- timeout * HZ);
- if (ret <= 0) {
+ time_left = wait_for_completion_timeout(&data->wait_event,
+ timeout * HZ);
+ if (time_left == 0) {
ret = -ETIMEDOUT;
goto out_unlock;
}
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index aa45a9fee6a0..21c0dddbe41d 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -95,6 +95,7 @@ static const struct goodix_chip_data gt9x_chip_data = {
static const struct goodix_chip_id goodix_chip_ids[] = {
{ .id = "1151", .data = &gt1x_chip_data },
+ { .id = "1158", .data = &gt1x_chip_data },
{ .id = "5663", .data = &gt1x_chip_data },
{ .id = "5688", .data = &gt1x_chip_data },
{ .id = "917S", .data = &gt1x_chip_data },
@@ -822,22 +823,16 @@ static int goodix_resource(struct acpi_resource *ares, void *data)
struct device *dev = &ts->client->dev;
struct acpi_resource_gpio *gpio;
- switch (ares->type) {
- case ACPI_RESOURCE_TYPE_GPIO:
- gpio = &ares->data.gpio;
- if (gpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT) {
- if (ts->gpio_int_idx == -1) {
- ts->gpio_int_idx = ts->gpio_count;
- } else {
- dev_err(dev, "More then one GpioInt resource, ignoring ACPI GPIO resources\n");
- ts->gpio_int_idx = -2;
- }
+ if (acpi_gpio_get_irq_resource(ares, &gpio)) {
+ if (ts->gpio_int_idx == -1) {
+ ts->gpio_int_idx = ts->gpio_count;
+ } else {
+ dev_err(dev, "More then one GpioInt resource, ignoring ACPI GPIO resources\n");
+ ts->gpio_int_idx = -2;
}
ts->gpio_count++;
- break;
- default:
- break;
- }
+ } else if (acpi_gpio_get_io_resource(ares, &gpio))
+ ts->gpio_count++;
return 0;
}
@@ -1514,6 +1509,7 @@ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id goodix_of_match[] = {
{ .compatible = "goodix,gt1151" },
+ { .compatible = "goodix,gt1158" },
{ .compatible = "goodix,gt5663" },
{ .compatible = "goodix,gt5688" },
{ .compatible = "goodix,gt911" },
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index 8bd03278ad9a..52f9e9eaab14 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -15,75 +15,75 @@
/* Register Map */
-#define BT541_SWRESET_CMD 0x0000
-#define BT541_WAKEUP_CMD 0x0001
+#define ZINITIX_SWRESET_CMD 0x0000
+#define ZINITIX_WAKEUP_CMD 0x0001
-#define BT541_IDLE_CMD 0x0004
-#define BT541_SLEEP_CMD 0x0005
+#define ZINITIX_IDLE_CMD 0x0004
+#define ZINITIX_SLEEP_CMD 0x0005
-#define BT541_CLEAR_INT_STATUS_CMD 0x0003
-#define BT541_CALIBRATE_CMD 0x0006
-#define BT541_SAVE_STATUS_CMD 0x0007
-#define BT541_SAVE_CALIBRATION_CMD 0x0008
-#define BT541_RECALL_FACTORY_CMD 0x000f
+#define ZINITIX_CLEAR_INT_STATUS_CMD 0x0003
+#define ZINITIX_CALIBRATE_CMD 0x0006
+#define ZINITIX_SAVE_STATUS_CMD 0x0007
+#define ZINITIX_SAVE_CALIBRATION_CMD 0x0008
+#define ZINITIX_RECALL_FACTORY_CMD 0x000f
-#define BT541_THRESHOLD 0x0020
+#define ZINITIX_THRESHOLD 0x0020
-#define BT541_LARGE_PALM_REJECT_AREA_TH 0x003F
+#define ZINITIX_LARGE_PALM_REJECT_AREA_TH 0x003F
-#define BT541_DEBUG_REG 0x0115 /* 0~7 */
+#define ZINITIX_DEBUG_REG 0x0115 /* 0~7 */
-#define BT541_TOUCH_MODE 0x0010
-#define BT541_CHIP_REVISION 0x0011
-#define BT541_FIRMWARE_VERSION 0x0012
+#define ZINITIX_TOUCH_MODE 0x0010
+#define ZINITIX_CHIP_REVISION 0x0011
+#define ZINITIX_FIRMWARE_VERSION 0x0012
#define ZINITIX_USB_DETECT 0x116
-#define BT541_MINOR_FW_VERSION 0x0121
+#define ZINITIX_MINOR_FW_VERSION 0x0121
-#define BT541_VENDOR_ID 0x001C
-#define BT541_HW_ID 0x0014
+#define ZINITIX_VENDOR_ID 0x001C
+#define ZINITIX_HW_ID 0x0014
-#define BT541_DATA_VERSION_REG 0x0013
-#define BT541_SUPPORTED_FINGER_NUM 0x0015
-#define BT541_EEPROM_INFO 0x0018
-#define BT541_INITIAL_TOUCH_MODE 0x0019
+#define ZINITIX_DATA_VERSION_REG 0x0013
+#define ZINITIX_SUPPORTED_FINGER_NUM 0x0015
+#define ZINITIX_EEPROM_INFO 0x0018
+#define ZINITIX_INITIAL_TOUCH_MODE 0x0019
-#define BT541_TOTAL_NUMBER_OF_X 0x0060
-#define BT541_TOTAL_NUMBER_OF_Y 0x0061
+#define ZINITIX_TOTAL_NUMBER_OF_X 0x0060
+#define ZINITIX_TOTAL_NUMBER_OF_Y 0x0061
-#define BT541_DELAY_RAW_FOR_HOST 0x007f
+#define ZINITIX_DELAY_RAW_FOR_HOST 0x007f
-#define BT541_BUTTON_SUPPORTED_NUM 0x00B0
-#define BT541_BUTTON_SENSITIVITY 0x00B2
-#define BT541_DUMMY_BUTTON_SENSITIVITY 0X00C8
+#define ZINITIX_BUTTON_SUPPORTED_NUM 0x00B0
+#define ZINITIX_BUTTON_SENSITIVITY 0x00B2
+#define ZINITIX_DUMMY_BUTTON_SENSITIVITY 0X00C8
-#define BT541_X_RESOLUTION 0x00C0
-#define BT541_Y_RESOLUTION 0x00C1
+#define ZINITIX_X_RESOLUTION 0x00C0
+#define ZINITIX_Y_RESOLUTION 0x00C1
-#define BT541_POINT_STATUS_REG 0x0080
-#define BT541_ICON_STATUS_REG 0x00AA
+#define ZINITIX_POINT_STATUS_REG 0x0080
+#define ZINITIX_ICON_STATUS_REG 0x00AA
-#define BT541_POINT_COORD_REG (BT541_POINT_STATUS_REG + 2)
+#define ZINITIX_POINT_COORD_REG (ZINITIX_POINT_STATUS_REG + 2)
-#define BT541_AFE_FREQUENCY 0x0100
-#define BT541_DND_N_COUNT 0x0122
-#define BT541_DND_U_COUNT 0x0135
+#define ZINITIX_AFE_FREQUENCY 0x0100
+#define ZINITIX_DND_N_COUNT 0x0122
+#define ZINITIX_DND_U_COUNT 0x0135
-#define BT541_RAWDATA_REG 0x0200
+#define ZINITIX_RAWDATA_REG 0x0200
-#define BT541_EEPROM_INFO_REG 0x0018
+#define ZINITIX_EEPROM_INFO_REG 0x0018
-#define BT541_INT_ENABLE_FLAG 0x00f0
-#define BT541_PERIODICAL_INTERRUPT_INTERVAL 0x00f1
+#define ZINITIX_INT_ENABLE_FLAG 0x00f0
+#define ZINITIX_PERIODICAL_INTERRUPT_INTERVAL 0x00f1
-#define BT541_BTN_WIDTH 0x016d
+#define ZINITIX_BTN_WIDTH 0x016d
-#define BT541_CHECKSUM_RESULT 0x012c
+#define ZINITIX_CHECKSUM_RESULT 0x012c
-#define BT541_INIT_FLASH 0x01d0
-#define BT541_WRITE_FLASH 0x01d1
-#define BT541_READ_FLASH 0x01d2
+#define ZINITIX_INIT_FLASH 0x01d0
+#define ZINITIX_WRITE_FLASH 0x01d1
+#define ZINITIX_READ_FLASH 0x01d2
#define ZINITIX_INTERNAL_FLAG_02 0x011e
#define ZINITIX_INTERNAL_FLAG_03 0x011f
@@ -196,13 +196,13 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
int i;
int error;
- error = zinitix_write_cmd(client, BT541_SWRESET_CMD);
+ error = zinitix_write_cmd(client, ZINITIX_SWRESET_CMD);
if (error) {
dev_err(&client->dev, "Failed to write reset command\n");
return error;
}
- error = zinitix_write_u16(client, BT541_INT_ENABLE_FLAG, 0x0);
+ error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG, 0x0);
if (error) {
dev_err(&client->dev,
"Failed to reset interrupt enable flag\n");
@@ -210,32 +210,32 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
}
/* initialize */
- error = zinitix_write_u16(client, BT541_X_RESOLUTION,
+ error = zinitix_write_u16(client, ZINITIX_X_RESOLUTION,
bt541->prop.max_x);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_Y_RESOLUTION,
+ error = zinitix_write_u16(client, ZINITIX_Y_RESOLUTION,
bt541->prop.max_y);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_SUPPORTED_FINGER_NUM,
+ error = zinitix_write_u16(client, ZINITIX_SUPPORTED_FINGER_NUM,
MAX_SUPPORTED_FINGER_NUM);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_INITIAL_TOUCH_MODE,
+ error = zinitix_write_u16(client, ZINITIX_INITIAL_TOUCH_MODE,
bt541->zinitix_mode);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_TOUCH_MODE,
+ error = zinitix_write_u16(client, ZINITIX_TOUCH_MODE,
bt541->zinitix_mode);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_INT_ENABLE_FLAG,
+ error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG,
BIT_PT_CNT_CHANGE | BIT_DOWN | BIT_MOVE |
BIT_UP);
if (error)
@@ -243,7 +243,7 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
/* clear queue */
for (i = 0; i < 10; i++) {
- zinitix_write_cmd(client, BT541_CLEAR_INT_STATUS_CMD);
+ zinitix_write_cmd(client, ZINITIX_CLEAR_INT_STATUS_CMD);
udelay(10);
}
@@ -361,7 +361,7 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
memset(&touch_event, 0, sizeof(struct touch_event));
- error = zinitix_read_data(bt541->client, BT541_POINT_STATUS_REG,
+ error = zinitix_read_data(bt541->client, ZINITIX_POINT_STATUS_REG,
&touch_event, sizeof(struct touch_event));
if (error) {
dev_err(&client->dev, "Failed to read in touchpoint struct\n");
@@ -381,7 +381,7 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
input_sync(bt541->input_dev);
out:
- zinitix_write_cmd(bt541->client, BT541_CLEAR_INT_STATUS_CMD);
+ zinitix_write_cmd(bt541->client, ZINITIX_CLEAR_INT_STATUS_CMD);
return IRQ_HANDLED;
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c79a0df090c0..5c5cb5bee8b6 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -363,6 +363,16 @@ config ARM_SMMU_QCOM
When running on a Qualcomm platform that has the custom variant
of the ARM SMMU, this needs to be built into the SMMU driver.
+config ARM_SMMU_QCOM_DEBUG
+ bool "ARM SMMU QCOM implementation defined debug support"
+ depends on ARM_SMMU_QCOM
+ help
+ Support for implementation specific debug features in ARM SMMU
+ hardware found in QTI platforms.
+
+ Say Y here to enable debug for issues such as TLB sync timeouts
+ which requires implementation defined register dumps.
+
config ARM_SMMU_V3
tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
depends on ARM64
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 1ab31074f5b3..84e5bb1bf01b 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -13,12 +13,13 @@
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
-extern void amd_iommu_apply_erratum_63(u16 devid);
+extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
extern int amd_iommu_init_api(void);
+extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
@@ -114,10 +115,17 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
amd_iommu_domain_set_pt_root(domain, 0);
}
+static inline int get_pci_sbdf_id(struct pci_dev *pdev)
+{
+ int seg = pci_domain_nr(pdev->bus);
+ u16 devid = pci_dev_id(pdev);
+
+ return PCI_SEG_DEVID_TO_SBDF(seg, devid);
+}
extern bool translation_pre_enabled(struct amd_iommu *iommu);
extern bool amd_iommu_is_attach_deferred(struct device *dev);
-extern int __init add_special_device(u8 type, u8 id, u16 *devid,
+extern int __init add_special_device(u8 type, u8 id, u32 *devid,
bool cmd_line);
#ifdef CONFIG_DMI
@@ -128,4 +136,10 @@ static inline void amd_iommu_apply_ivrs_quirks(void) { }
extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
+extern struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
+
+extern u64 amd_iommu_efr;
+extern u64 amd_iommu_efr2;
+
+extern bool amd_iommu_snp_en;
#endif
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 72d0f5e2f651..5b1019dab328 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -67,6 +67,7 @@
#define MMIO_INTCAPXT_EVT_OFFSET 0x0170
#define MMIO_INTCAPXT_PPR_OFFSET 0x0178
#define MMIO_INTCAPXT_GALOG_OFFSET 0x0180
+#define MMIO_EXT_FEATURES2 0x01A0
#define MMIO_CMD_HEAD_OFFSET 0x2000
#define MMIO_CMD_TAIL_OFFSET 0x2008
#define MMIO_EVT_HEAD_OFFSET 0x2010
@@ -102,6 +103,12 @@
#define FEATURE_GLXVAL_SHIFT 14
#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
+/* Extended Feature 2 Bits */
+#define FEATURE_SNPAVICSUP_SHIFT 5
+#define FEATURE_SNPAVICSUP_MASK (0x07ULL << FEATURE_SNPAVICSUP_SHIFT)
+#define FEATURE_SNPAVICSUP_GAM(x) \
+ ((x & FEATURE_SNPAVICSUP_MASK) >> FEATURE_SNPAVICSUP_SHIFT == 0x1)
+
/* Note:
* The current driver only support 16-bit PASID.
* Currently, hardware only implement upto 16-bit PASID
@@ -143,27 +150,28 @@
#define EVENT_FLAG_I 0x008
/* feature control bits */
-#define CONTROL_IOMMU_EN 0x00ULL
-#define CONTROL_HT_TUN_EN 0x01ULL
-#define CONTROL_EVT_LOG_EN 0x02ULL
-#define CONTROL_EVT_INT_EN 0x03ULL
-#define CONTROL_COMWAIT_EN 0x04ULL
-#define CONTROL_INV_TIMEOUT 0x05ULL
-#define CONTROL_PASSPW_EN 0x08ULL
-#define CONTROL_RESPASSPW_EN 0x09ULL
-#define CONTROL_COHERENT_EN 0x0aULL
-#define CONTROL_ISOC_EN 0x0bULL
-#define CONTROL_CMDBUF_EN 0x0cULL
-#define CONTROL_PPRLOG_EN 0x0dULL
-#define CONTROL_PPRINT_EN 0x0eULL
-#define CONTROL_PPR_EN 0x0fULL
-#define CONTROL_GT_EN 0x10ULL
-#define CONTROL_GA_EN 0x11ULL
-#define CONTROL_GAM_EN 0x19ULL
-#define CONTROL_GALOG_EN 0x1CULL
-#define CONTROL_GAINT_EN 0x1DULL
-#define CONTROL_XT_EN 0x32ULL
-#define CONTROL_INTCAPXT_EN 0x33ULL
+#define CONTROL_IOMMU_EN 0
+#define CONTROL_HT_TUN_EN 1
+#define CONTROL_EVT_LOG_EN 2
+#define CONTROL_EVT_INT_EN 3
+#define CONTROL_COMWAIT_EN 4
+#define CONTROL_INV_TIMEOUT 5
+#define CONTROL_PASSPW_EN 8
+#define CONTROL_RESPASSPW_EN 9
+#define CONTROL_COHERENT_EN 10
+#define CONTROL_ISOC_EN 11
+#define CONTROL_CMDBUF_EN 12
+#define CONTROL_PPRLOG_EN 13
+#define CONTROL_PPRINT_EN 14
+#define CONTROL_PPR_EN 15
+#define CONTROL_GT_EN 16
+#define CONTROL_GA_EN 17
+#define CONTROL_GAM_EN 25
+#define CONTROL_GALOG_EN 28
+#define CONTROL_GAINT_EN 29
+#define CONTROL_XT_EN 50
+#define CONTROL_INTCAPXT_EN 51
+#define CONTROL_SNPAVIC_EN 61
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
#define CTRL_INV_TO_NONE 0
@@ -445,8 +453,6 @@ struct irq_remap_table {
u32 *table;
};
-extern struct irq_remap_table **irq_lookup_table;
-
/* Interrupt remapping feature used? */
extern bool amd_iommu_irq_remap;
@@ -456,6 +462,16 @@ extern bool amdr_ivrs_remap_support;
/* kmem_cache to get tables with 128 byte alignement */
extern struct kmem_cache *amd_iommu_irq_cache;
+#define PCI_SBDF_TO_SEGID(sbdf) (((sbdf) >> 16) & 0xffff)
+#define PCI_SBDF_TO_DEVID(sbdf) ((sbdf) & 0xffff)
+#define PCI_SEG_DEVID_TO_SBDF(seg, devid) ((((u32)(seg) & 0xffff) << 16) | \
+ ((devid) & 0xffff))
+
+/* Make iterating over all pci segment easier */
+#define for_each_pci_segment(pci_seg) \
+ list_for_each_entry((pci_seg), &amd_iommu_pci_seg_list, list)
+#define for_each_pci_segment_safe(pci_seg, next) \
+ list_for_each_entry_safe((pci_seg), (next), &amd_iommu_pci_seg_list, list)
/*
* Make iterating over all IOMMUs easier
*/
@@ -478,13 +494,14 @@ extern struct kmem_cache *amd_iommu_irq_cache;
struct amd_iommu_fault {
u64 address; /* IO virtual address of the fault*/
u32 pasid; /* Address space identifier */
- u16 device_id; /* Originating PCI device id */
+ u32 sbdf; /* Originating PCI device id */
u16 tag; /* PPR tag */
u16 flags; /* Fault flags */
};
+struct amd_iommu;
struct iommu_domain;
struct irq_domain;
struct amd_irte_ops;
@@ -531,6 +548,75 @@ struct protection_domain {
};
/*
+ * This structure contains information about one PCI segment in the system.
+ */
+struct amd_iommu_pci_seg {
+ /* List with all PCI segments in the system */
+ struct list_head list;
+
+ /* List of all available dev_data structures */
+ struct llist_head dev_data_list;
+
+ /* PCI segment number */
+ u16 id;
+
+ /* Largest PCI device id we expect translation requests for */
+ u16 last_bdf;
+
+ /* Size of the device table */
+ u32 dev_table_size;
+
+ /* Size of the alias table */
+ u32 alias_table_size;
+
+ /* Size of the rlookup table */
+ u32 rlookup_table_size;
+
+ /*
+ * device table virtual address
+ *
+ * Pointer to the per PCI segment device table.
+ * It is indexed by the PCI device id or the HT unit id and contains
+ * information about the domain the device belongs to as well as the
+ * page table root pointer.
+ */
+ struct dev_table_entry *dev_table;
+
+ /*
+ * The rlookup iommu table is used to find the IOMMU which is
+ * responsible for a specific device. It is indexed by the PCI
+ * device id.
+ */
+ struct amd_iommu **rlookup_table;
+
+ /*
+ * This table is used to find the irq remapping table for a given
+ * device id quickly.
+ */
+ struct irq_remap_table **irq_lookup_table;
+
+ /*
+ * Pointer to a device table which the content of old device table
+ * will be copied to. It's only be used in kdump kernel.
+ */
+ struct dev_table_entry *old_dev_tbl_cpy;
+
+ /*
+ * The alias table is a driver specific data structure which contains the
+ * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
+ * More than one device can share the same requestor id.
+ */
+ u16 *alias_table;
+
+ /*
+ * A list of required unity mappings we find in ACPI. It is not locked
+ * because as runtime it is only read. It is created at ACPI table
+ * parsing time.
+ */
+ struct list_head unity_map;
+};
+
+/*
* Structure where we save information about one hardware AMD IOMMU in the
* system.
*/
@@ -567,6 +653,9 @@ struct amd_iommu {
/* Extended features */
u64 features;
+ /* Extended features 2 */
+ u64 features2;
+
/* IOMMUv2 */
bool is_iommu_v2;
@@ -581,7 +670,7 @@ struct amd_iommu {
u16 cap_ptr;
/* pci domain of this IOMMU */
- u16 pci_seg;
+ struct amd_iommu_pci_seg *pci_seg;
/* start of exclusion range of that IOMMU */
u64 exclusion_start;
@@ -666,8 +755,8 @@ struct acpihid_map_entry {
struct list_head list;
u8 uid[ACPIHID_UID_LEN];
u8 hid[ACPIHID_HID_LEN];
- u16 devid;
- u16 root_devid;
+ u32 devid;
+ u32 root_devid;
bool cmd_line;
struct iommu_group *group;
};
@@ -675,7 +764,7 @@ struct acpihid_map_entry {
struct devid_map {
struct list_head list;
u8 id;
- u16 devid;
+ u32 devid;
bool cmd_line;
};
@@ -689,7 +778,7 @@ struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
- struct pci_dev *pdev;
+ struct device *dev;
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
struct {
@@ -710,6 +799,12 @@ extern struct list_head hpet_map;
extern struct list_head acpihid_map;
/*
+ * List with all PCI segments in the system. This list is not locked because
+ * it is only written at driver initialization time
+ */
+extern struct list_head amd_iommu_pci_seg_list;
+
+/*
* List with all IOMMUs in the system. This list is not locked because it is
* only written and read at driver initialization or suspend time
*/
@@ -749,38 +844,12 @@ struct unity_map_entry {
};
/*
- * List of all unity mappings. It is not locked because as runtime it is only
- * read. It is created at ACPI table parsing time.
- */
-extern struct list_head amd_iommu_unity_map;
-
-/*
* Data structures for device handling
*/
-/*
- * Device table used by hardware. Read and write accesses by software are
- * locked with the amd_iommu_pd_table lock.
- */
-extern struct dev_table_entry *amd_iommu_dev_table;
-
-/*
- * Alias table to find requestor ids to device ids. Not locked because only
- * read on runtime.
- */
-extern u16 *amd_iommu_alias_table;
-
-/*
- * Reverse lookup table to find the IOMMU which translates a specific device.
- */
-extern struct amd_iommu **amd_iommu_rlookup_table;
-
/* size of the dma_ops aperture as power of 2 */
extern unsigned amd_iommu_aperture_order;
-/* largest PCI device id we expect translation requests for */
-extern u16 amd_iommu_last_bdf;
-
/* allocation bitmap for domain ids */
extern unsigned long *amd_iommu_pd_alloc_bitmap;
@@ -913,6 +982,7 @@ struct irq_2_irte {
struct amd_ir_data {
u32 cached_ga_tag;
+ struct amd_iommu *iommu;
struct irq_2_irte irq_2_irte;
struct msi_msg msi_entry;
void *entry; /* Pointer to union irte or struct irte_ga */
@@ -930,9 +1000,9 @@ struct amd_ir_data {
struct amd_irte_ops {
void (*prepare)(void *, u32, bool, u8, u32, int);
- void (*activate)(void *, u16, u16);
- void (*deactivate)(void *, u16, u16);
- void (*set_affinity)(void *, u16, u16, u8, u32);
+ void (*activate)(struct amd_iommu *iommu, void *, u16, u16);
+ void (*deactivate)(struct amd_iommu *iommu, void *, u16, u16);
+ void (*set_affinity)(struct amd_iommu *iommu, void *, u16, u16, u8, u32);
void *(*get)(struct irq_remap_table *, int);
void (*set_allocated)(struct irq_remap_table *, int);
bool (*is_allocated)(struct irq_remap_table *, int);
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 1d08f87e734b..fdc642362c14 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -84,6 +84,10 @@
#define ACPI_DEVFLAG_ATSDIS 0x10000000
#define LOOP_TIMEOUT 2000000
+
+#define IVRS_GET_SBDF_ID(seg, bus, dev, fd) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
+ | ((dev & 0x1f) << 3) | (fn & 0x7))
+
/*
* ACPI table definitions
*
@@ -110,7 +114,7 @@ struct ivhd_header {
/* Following only valid on IVHD type 11h and 40h */
u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
- u64 res;
+ u64 efr_reg2;
} __attribute__((packed));
/*
@@ -141,7 +145,8 @@ struct ivmd_header {
u16 length;
u16 devid;
u16 aux;
- u64 resv;
+ u16 pci_seg;
+ u8 resv[6];
u64 range_start;
u64 range_length;
} __attribute__((packed));
@@ -159,11 +164,15 @@ static bool amd_iommu_disabled __initdata;
static bool amd_iommu_force_enable __initdata;
static int amd_iommu_target_ivhd_type;
-u16 amd_iommu_last_bdf; /* largest PCI device id we have
- to handle */
-LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
- we find in ACPI */
+/* Global EFR and EFR2 registers */
+u64 amd_iommu_efr;
+u64 amd_iommu_efr2;
+/* SNP is enabled on the system? */
+bool amd_iommu_snp_en;
+EXPORT_SYMBOL(amd_iommu_snp_en);
+
+LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
system */
@@ -186,47 +195,11 @@ bool amdr_ivrs_remap_support __read_mostly;
bool amd_iommu_force_isolation __read_mostly;
/*
- * Pointer to the device table which is shared by all AMD IOMMUs
- * it is indexed by the PCI device id or the HT unit id and contains
- * information about the domain the device belongs to as well as the
- * page table root pointer.
- */
-struct dev_table_entry *amd_iommu_dev_table;
-/*
- * Pointer to a device table which the content of old device table
- * will be copied to. It's only be used in kdump kernel.
- */
-static struct dev_table_entry *old_dev_tbl_cpy;
-
-/*
- * The alias table is a driver specific data structure which contains the
- * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
- * More than one device can share the same requestor id.
- */
-u16 *amd_iommu_alias_table;
-
-/*
- * The rlookup table is used to find the IOMMU which is responsible
- * for a specific device. It is also indexed by the PCI device id.
- */
-struct amd_iommu **amd_iommu_rlookup_table;
-
-/*
- * This table is used to find the irq remapping table for a given device id
- * quickly.
- */
-struct irq_remap_table **irq_lookup_table;
-
-/*
* AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
* to know which ones are already in use.
*/
unsigned long *amd_iommu_pd_alloc_bitmap;
-static u32 dev_table_size; /* size of the device table */
-static u32 alias_table_size; /* size of the alias table */
-static u32 rlookup_table_size; /* size if the rlookup table */
-
enum iommu_init_state {
IOMMU_START_STATE,
IOMMU_IVRS_DETECTED,
@@ -256,7 +229,7 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
-static void init_device_table_dma(void);
+static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
static bool amd_iommu_pre_enabled = true;
@@ -281,16 +254,10 @@ static void init_translation_status(struct amd_iommu *iommu)
iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
}
-static inline void update_last_devid(u16 devid)
-{
- if (devid > amd_iommu_last_bdf)
- amd_iommu_last_bdf = devid;
-}
-
-static inline unsigned long tbl_size(int entry_size)
+static inline unsigned long tbl_size(int entry_size, int last_bdf)
{
unsigned shift = PAGE_SHIFT +
- get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
+ get_order((last_bdf + 1) * entry_size);
return 1UL << shift;
}
@@ -300,21 +267,46 @@ int amd_iommu_get_num_iommus(void)
return amd_iommus_present;
}
-#ifdef CONFIG_IRQ_REMAP
-static bool check_feature_on_all_iommus(u64 mask)
+/*
+ * Iterate through all the IOMMUs to get common EFR
+ * masks among all IOMMUs and warn if found inconsistency.
+ */
+static void get_global_efr(void)
{
- bool ret = false;
struct amd_iommu *iommu;
for_each_iommu(iommu) {
- ret = iommu_feature(iommu, mask);
- if (!ret)
- return false;
+ u64 tmp = iommu->features;
+ u64 tmp2 = iommu->features2;
+
+ if (list_is_first(&iommu->list, &amd_iommu_list)) {
+ amd_iommu_efr = tmp;
+ amd_iommu_efr2 = tmp2;
+ continue;
+ }
+
+ if (amd_iommu_efr == tmp &&
+ amd_iommu_efr2 == tmp2)
+ continue;
+
+ pr_err(FW_BUG
+ "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n",
+ tmp, tmp2, amd_iommu_efr, amd_iommu_efr2,
+ iommu->index, iommu->pci_seg->id,
+ PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid),
+ PCI_FUNC(iommu->devid));
+
+ amd_iommu_efr &= tmp;
+ amd_iommu_efr2 &= tmp2;
}
- return true;
+ pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
+}
+
+static bool check_feature_on_all_iommus(u64 mask)
+{
+ return !!(amd_iommu_efr & mask);
}
-#endif
/*
* For IVHD type 0x11/0x40, EFR is also available via IVHD.
@@ -324,8 +316,10 @@ static bool check_feature_on_all_iommus(u64 mask)
static void __init early_iommu_features_init(struct amd_iommu *iommu,
struct ivhd_header *h)
{
- if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
+ if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) {
iommu->features = h->efr_reg;
+ iommu->features2 = h->efr_reg2;
+ }
if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
amdr_ivrs_remap_support = true;
}
@@ -399,7 +393,7 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu)
u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
u64 entry = start & PM_ADDR_MASK;
- if (!iommu_feature(iommu, FEATURE_SNP))
+ if (!check_feature_on_all_iommus(FEATURE_SNP))
return;
/* Note:
@@ -421,10 +415,12 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu)
static void iommu_set_device_table(struct amd_iommu *iommu)
{
u64 entry;
+ u32 dev_table_size = iommu->pci_seg->dev_table_size;
+ void *dev_table = (void *)get_dev_table(iommu);
BUG_ON(iommu->mmio_base == NULL);
- entry = iommu_virt_to_phys(amd_iommu_dev_table);
+ entry = iommu_virt_to_phys(dev_table);
entry |= (dev_table_size >> 12) - 1;
memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
&entry, sizeof(entry));
@@ -557,6 +553,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
{
u8 *p = (void *)h, *end = (void *)h;
struct ivhd_entry *dev;
+ int last_devid = -EINVAL;
u32 ivhd_size = get_ivhd_header_size(h);
@@ -573,14 +570,14 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
switch (dev->type) {
case IVHD_DEV_ALL:
/* Use maximum BDF value for DEV_ALL */
- update_last_devid(0xffff);
- break;
+ return 0xffff;
case IVHD_DEV_SELECT:
case IVHD_DEV_RANGE_END:
case IVHD_DEV_ALIAS:
case IVHD_DEV_EXT_SELECT:
/* all the above subfield types refer to device ids */
- update_last_devid(dev->devid);
+ if (dev->devid > last_devid)
+ last_devid = dev->devid;
break;
default:
break;
@@ -590,7 +587,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
WARN_ON(p != end);
- return 0;
+ return last_devid;
}
static int __init check_ivrs_checksum(struct acpi_table_header *table)
@@ -614,38 +611,125 @@ static int __init check_ivrs_checksum(struct acpi_table_header *table)
* id which we need to handle. This is the first of three functions which parse
* the ACPI table. So we check the checksum here.
*/
-static int __init find_last_devid_acpi(struct acpi_table_header *table)
+static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg)
{
u8 *p = (u8 *)table, *end = (u8 *)table;
struct ivhd_header *h;
+ int last_devid, last_bdf = 0;
p += IVRS_HEADER_LENGTH;
end += table->length;
while (p < end) {
h = (struct ivhd_header *)p;
- if (h->type == amd_iommu_target_ivhd_type) {
- int ret = find_last_devid_from_ivhd(h);
-
- if (ret)
- return ret;
+ if (h->pci_seg == pci_seg &&
+ h->type == amd_iommu_target_ivhd_type) {
+ last_devid = find_last_devid_from_ivhd(h);
+
+ if (last_devid < 0)
+ return -EINVAL;
+ if (last_devid > last_bdf)
+ last_bdf = last_devid;
}
p += h->length;
}
WARN_ON(p != end);
- return 0;
+ return last_bdf;
}
/****************************************************************************
*
* The following functions belong to the code path which parses the ACPI table
* the second time. In this ACPI parsing iteration we allocate IOMMU specific
- * data structures, initialize the device/alias/rlookup table and also
- * basically initialize the hardware.
+ * data structures, initialize the per PCI segment device/alias/rlookup table
+ * and also basically initialize the hardware.
*
****************************************************************************/
+/* Allocate per PCI segment device table */
+static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
+ get_order(pci_seg->dev_table_size));
+ if (!pci_seg->dev_table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->dev_table,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = NULL;
+}
+
+/* Allocate per PCI segment IOMMU rlookup table. */
+static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->rlookup_table = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(pci_seg->rlookup_table_size));
+ if (pci_seg->rlookup_table == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->rlookup_table,
+ get_order(pci_seg->rlookup_table_size));
+ pci_seg->rlookup_table = NULL;
+}
+
+static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->irq_lookup_table = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(pci_seg->rlookup_table_size));
+ kmemleak_alloc(pci_seg->irq_lookup_table,
+ pci_seg->rlookup_table_size, 1, GFP_KERNEL);
+ if (pci_seg->irq_lookup_table == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ kmemleak_free(pci_seg->irq_lookup_table);
+ free_pages((unsigned long)pci_seg->irq_lookup_table,
+ get_order(pci_seg->rlookup_table_size));
+ pci_seg->irq_lookup_table = NULL;
+}
+
+static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ int i;
+
+ pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(pci_seg->alias_table_size));
+ if (!pci_seg->alias_table)
+ return -ENOMEM;
+
+ /*
+ * let all alias entries point to itself
+ */
+ for (i = 0; i <= pci_seg->last_bdf; ++i)
+ pci_seg->alias_table[i] = i;
+
+ return 0;
+}
+
+static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->alias_table,
+ get_order(pci_seg->alias_table_size));
+ pci_seg->alias_table = NULL;
+}
+
/*
* Allocates the command buffer. This buffer is per AMD IOMMU. We can
* write commands to that buffer later and the IOMMU will execute them
@@ -724,7 +808,7 @@ static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
void *buf = (void *)__get_free_pages(gfp, order);
if (buf &&
- iommu_feature(iommu, FEATURE_SNP) &&
+ check_feature_on_all_iommus(FEATURE_SNP) &&
set_memory_4k((unsigned long)buf, (1 << order))) {
free_pages((unsigned long)buf, order);
buf = NULL;
@@ -815,20 +899,15 @@ static void free_ga_log(struct amd_iommu *iommu)
#endif
}
+#ifdef CONFIG_IRQ_REMAP
static int iommu_ga_log_enable(struct amd_iommu *iommu)
{
-#ifdef CONFIG_IRQ_REMAP
u32 status, i;
u64 entry;
if (!iommu->ga_log)
return -EINVAL;
- /* Check if already running */
- status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
- if (WARN_ON(status & (MMIO_STATUS_GALOG_RUN_MASK)))
- return 0;
-
entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
&entry, sizeof(entry));
@@ -852,13 +931,12 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
if (WARN_ON(i >= LOOP_TIMEOUT))
return -EINVAL;
-#endif /* CONFIG_IRQ_REMAP */
+
return 0;
}
static int iommu_init_ga_log(struct amd_iommu *iommu)
{
-#ifdef CONFIG_IRQ_REMAP
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
return 0;
@@ -876,10 +954,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
err_out:
free_ga_log(iommu);
return -EINVAL;
-#else
- return 0;
-#endif /* CONFIG_IRQ_REMAP */
}
+#endif /* CONFIG_IRQ_REMAP */
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{
@@ -916,56 +992,59 @@ static void iommu_enable_gt(struct amd_iommu *iommu)
}
/* sets a specific bit in the device table entry. */
-static void set_dev_entry_bit(u16 devid, u8 bit)
+static void __set_dev_entry_bit(struct dev_table_entry *dev_table,
+ u16 devid, u8 bit)
{
int i = (bit >> 6) & 0x03;
int _bit = bit & 0x3f;
- amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
+ dev_table[devid].data[i] |= (1UL << _bit);
+}
+
+static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
+{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ return __set_dev_entry_bit(dev_table, devid, bit);
}
-static int get_dev_entry_bit(u16 devid, u8 bit)
+static int __get_dev_entry_bit(struct dev_table_entry *dev_table,
+ u16 devid, u8 bit)
{
int i = (bit >> 6) & 0x03;
int _bit = bit & 0x3f;
- return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
+ return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
}
+static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
+{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
-static bool copy_device_table(void)
+ return __get_dev_entry_bit(dev_table, devid, bit);
+}
+
+static bool __copy_device_table(struct amd_iommu *iommu)
{
- u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
+ u64 int_ctl, int_tab_len, entry = 0;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
struct dev_table_entry *old_devtb = NULL;
u32 lo, hi, devid, old_devtb_size;
phys_addr_t old_devtb_phys;
- struct amd_iommu *iommu;
u16 dom_id, dte_v, irq_v;
gfp_t gfp_flag;
u64 tmp;
- if (!amd_iommu_pre_enabled)
- return false;
-
- pr_warn("Translation is already enabled - trying to copy translation structures\n");
- for_each_iommu(iommu) {
- /* All IOMMUs should use the same device table with the same size */
- lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
- hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
- entry = (((u64) hi) << 32) + lo;
- if (last_entry && last_entry != entry) {
- pr_err("IOMMU:%d should use the same dev table as others!\n",
- iommu->index);
- return false;
- }
- last_entry = entry;
+ /* Each IOMMU use separate device table with the same size */
+ lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
+ hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
+ entry = (((u64) hi) << 32) + lo;
- old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
- if (old_devtb_size != dev_table_size) {
- pr_err("The device table size of IOMMU:%d is not expected!\n",
- iommu->index);
- return false;
- }
+ old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
+ if (old_devtb_size != pci_seg->dev_table_size) {
+ pr_err("The device table size of IOMMU:%d is not expected!\n",
+ iommu->index);
+ return false;
}
/*
@@ -981,38 +1060,38 @@ static bool copy_device_table(void)
}
old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
? (__force void *)ioremap_encrypted(old_devtb_phys,
- dev_table_size)
- : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
+ pci_seg->dev_table_size)
+ : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
if (!old_devtb)
return false;
gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
- old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
- get_order(dev_table_size));
- if (old_dev_tbl_cpy == NULL) {
+ pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
+ get_order(pci_seg->dev_table_size));
+ if (pci_seg->old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
memunmap(old_devtb);
return false;
}
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
- old_dev_tbl_cpy[devid] = old_devtb[devid];
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
if (dte_v && dom_id) {
- old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
- old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
+ pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
+ pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
__set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
/* If gcr3 table existed, mask it out */
if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
- old_dev_tbl_cpy[devid].data[1] &= ~tmp;
+ pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
tmp |= DTE_FLAG_GV;
- old_dev_tbl_cpy[devid].data[0] &= ~tmp;
+ pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
}
}
@@ -1027,7 +1106,7 @@ static bool copy_device_table(void)
return false;
}
- old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
+ pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
}
}
memunmap(old_devtb);
@@ -1035,21 +1114,42 @@ static bool copy_device_table(void)
return true;
}
-void amd_iommu_apply_erratum_63(u16 devid)
+static bool copy_device_table(void)
{
- int sysmgt;
+ struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
- sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
- (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
+ if (!amd_iommu_pre_enabled)
+ return false;
- if (sysmgt == 0x01)
- set_dev_entry_bit(devid, DEV_ENTRY_IW);
+ pr_warn("Translation is already enabled - trying to copy translation structures\n");
+
+ /*
+ * All IOMMUs within PCI segment shares common device table.
+ * Hence copy device table only once per PCI segment.
+ */
+ for_each_pci_segment(pci_seg) {
+ for_each_iommu(iommu) {
+ if (pci_seg->id != iommu->pci_seg->id)
+ continue;
+ if (!__copy_device_table(iommu))
+ return false;
+ break;
+ }
+ }
+
+ return true;
}
-/* Writes the specific IOMMU for a device into the rlookup table */
-static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
+void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
{
- amd_iommu_rlookup_table[devid] = iommu;
+ int sysmgt;
+
+ sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) |
+ (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1);
+
+ if (sysmgt == 0x01)
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW);
}
/*
@@ -1060,26 +1160,26 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
u16 devid, u32 flags, u32 ext_flags)
{
if (flags & ACPI_DEVFLAG_INITPASS)
- set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS);
if (flags & ACPI_DEVFLAG_EXTINT)
- set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS);
if (flags & ACPI_DEVFLAG_NMI)
- set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS);
if (flags & ACPI_DEVFLAG_SYSMGT1)
- set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1);
if (flags & ACPI_DEVFLAG_SYSMGT2)
- set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2);
if (flags & ACPI_DEVFLAG_LINT0)
- set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS);
if (flags & ACPI_DEVFLAG_LINT1)
- set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS);
- amd_iommu_apply_erratum_63(devid);
+ amd_iommu_apply_erratum_63(iommu, devid);
- set_iommu_for_device(iommu, devid);
+ amd_iommu_set_rlookup_table(iommu, devid);
}
-int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
+int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
{
struct devid_map *entry;
struct list_head *list;
@@ -1116,7 +1216,7 @@ int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
return 0;
}
-static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
+static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
bool cmd_line)
{
struct acpihid_map_entry *entry;
@@ -1195,10 +1295,11 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
{
u8 *p = (u8 *)h;
u8 *end = p, flags = 0;
- u16 devid = 0, devid_start = 0, devid_to = 0;
+ u16 devid = 0, devid_start = 0, devid_to = 0, seg_id;
u32 dev_i, ext_flags = 0;
bool alias = false;
struct ivhd_entry *e;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
u32 ivhd_size;
int ret;
@@ -1230,19 +1331,21 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
while (p < end) {
e = (struct ivhd_entry *)p;
+ seg_id = pci_seg->id;
+
switch (e->type) {
case IVHD_DEV_ALL:
DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
- for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
+ for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i)
set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
break;
case IVHD_DEV_SELECT:
- DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
+ DUMP_printk(" DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x "
"flags: %02x\n",
- PCI_BUS_NUM(e->devid),
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags);
@@ -1253,8 +1356,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_SELECT_RANGE_START:
DUMP_printk(" DEV_SELECT_RANGE_START\t "
- "devid: %02x:%02x.%x flags: %02x\n",
- PCI_BUS_NUM(e->devid),
+ "devid: %04x:%02x:%02x.%x flags: %02x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags);
@@ -1266,9 +1369,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_ALIAS:
- DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
+ DUMP_printk(" DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x "
"flags: %02x devid_to: %02x:%02x.%x\n",
- PCI_BUS_NUM(e->devid),
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags,
@@ -1280,18 +1383,18 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
devid_to = e->ext >> 8;
set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
- amd_iommu_alias_table[devid] = devid_to;
+ pci_seg->alias_table[devid] = devid_to;
break;
case IVHD_DEV_ALIAS_RANGE:
DUMP_printk(" DEV_ALIAS_RANGE\t\t "
- "devid: %02x:%02x.%x flags: %02x "
- "devid_to: %02x:%02x.%x\n",
- PCI_BUS_NUM(e->devid),
+ "devid: %04x:%02x:%02x.%x flags: %02x "
+ "devid_to: %04x:%02x:%02x.%x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags,
- PCI_BUS_NUM(e->ext >> 8),
+ seg_id, PCI_BUS_NUM(e->ext >> 8),
PCI_SLOT(e->ext >> 8),
PCI_FUNC(e->ext >> 8));
@@ -1303,9 +1406,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_EXT_SELECT:
- DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
+ DUMP_printk(" DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x "
"flags: %02x ext: %08x\n",
- PCI_BUS_NUM(e->devid),
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags, e->ext);
@@ -1317,8 +1420,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_EXT_SELECT_RANGE:
DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
- "%02x:%02x.%x flags: %02x ext: %08x\n",
- PCI_BUS_NUM(e->devid),
+ "%04x:%02x:%02x.%x flags: %02x ext: %08x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags, e->ext);
@@ -1330,15 +1433,15 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_RANGE_END:
- DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
- PCI_BUS_NUM(e->devid),
+ DUMP_printk(" DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid));
devid = e->devid;
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
if (alias) {
- amd_iommu_alias_table[dev_i] = devid_to;
+ pci_seg->alias_table[dev_i] = devid_to;
set_dev_entry_from_acpi(iommu,
devid_to, flags, ext_flags);
}
@@ -1349,11 +1452,11 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_SPECIAL: {
u8 handle, type;
const char *var;
- u16 devid;
+ u32 devid;
int ret;
handle = e->ext & 0xff;
- devid = (e->ext >> 8) & 0xffff;
+ devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8));
type = (e->ext >> 24) & 0xff;
if (type == IVHD_SPECIAL_IOAPIC)
@@ -1363,9 +1466,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
else
var = "UNKNOWN";
- DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
+ DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n",
var, (int)handle,
- PCI_BUS_NUM(devid),
+ seg_id, PCI_BUS_NUM(devid),
PCI_SLOT(devid),
PCI_FUNC(devid));
@@ -1383,7 +1486,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
case IVHD_DEV_ACPI_HID: {
- u16 devid;
+ u32 devid;
u8 hid[ACPIHID_HID_LEN];
u8 uid[ACPIHID_UID_LEN];
int ret;
@@ -1426,9 +1529,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
- devid = e->devid;
- DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
- hid, uid,
+ devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid);
+ DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n",
+ hid, uid, seg_id,
PCI_BUS_NUM(devid),
PCI_SLOT(devid),
PCI_FUNC(devid));
@@ -1458,6 +1561,74 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
return 0;
}
+/* Allocate PCI segment data structure */
+static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
+ struct acpi_table_header *ivrs_base)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+ int last_bdf;
+
+ /*
+ * First parse ACPI tables to find the largest Bus/Dev/Func we need to
+ * handle in this PCI segment. Upon this information the shared data
+ * structures for the PCI segments in the system will be allocated.
+ */
+ last_bdf = find_last_devid_acpi(ivrs_base, id);
+ if (last_bdf < 0)
+ return NULL;
+
+ pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL);
+ if (pci_seg == NULL)
+ return NULL;
+
+ pci_seg->last_bdf = last_bdf;
+ DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
+ pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
+
+ pci_seg->id = id;
+ init_llist_head(&pci_seg->dev_data_list);
+ INIT_LIST_HEAD(&pci_seg->unity_map);
+ list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list);
+
+ if (alloc_dev_table(pci_seg))
+ return NULL;
+ if (alloc_alias_table(pci_seg))
+ return NULL;
+ if (alloc_rlookup_table(pci_seg))
+ return NULL;
+
+ return pci_seg;
+}
+
+static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id,
+ struct acpi_table_header *ivrs_base)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id == id)
+ return pci_seg;
+ }
+
+ return alloc_pci_segment(id, ivrs_base);
+}
+
+static void __init free_pci_segments(void)
+{
+ struct amd_iommu_pci_seg *pci_seg, *next;
+
+ for_each_pci_segment_safe(pci_seg, next) {
+ list_del(&pci_seg->list);
+ free_irq_lookup_table(pci_seg);
+ free_rlookup_table(pci_seg);
+ free_alias_table(pci_seg);
+ free_dev_table(pci_seg);
+ kfree(pci_seg);
+ }
+}
+
static void __init free_iommu_one(struct amd_iommu *iommu)
{
free_cwwb_sem(iommu);
@@ -1542,9 +1713,15 @@ static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
* together and also allocates the command buffer and programs the
* hardware. It does NOT enable the IOMMU. This is done afterwards.
*/
-static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
+static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
+ struct acpi_table_header *ivrs_base)
{
- int ret;
+ struct amd_iommu_pci_seg *pci_seg;
+
+ pci_seg = get_pci_segment(h->pci_seg, ivrs_base);
+ if (pci_seg == NULL)
+ return -ENOMEM;
+ iommu->pci_seg = pci_seg;
raw_spin_lock_init(&iommu->lock);
iommu->cmd_sem_val = 0;
@@ -1566,7 +1743,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
*/
iommu->devid = h->devid;
iommu->cap_ptr = h->cap_ptr;
- iommu->pci_seg = h->pci_seg;
iommu->mmio_phys = h->mmio_phys;
switch (h->type) {
@@ -1621,6 +1797,13 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (!iommu->mmio_base)
return -ENOMEM;
+ return init_iommu_from_acpi(iommu, h);
+}
+
+static int __init init_iommu_one_late(struct amd_iommu *iommu)
+{
+ int ret;
+
if (alloc_cwwb_sem(iommu))
return -ENOMEM;
@@ -1642,10 +1825,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (amd_iommu_pre_enabled)
amd_iommu_pre_enabled = translation_pre_enabled(iommu);
- ret = init_iommu_from_acpi(iommu, h);
- if (ret)
- return ret;
-
if (amd_iommu_irq_remap) {
ret = amd_iommu_create_irq_domain(iommu);
if (ret)
@@ -1656,7 +1835,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
* Make sure IOMMU is not considered to translate itself. The IVRS
* table tells us so, but this is a lie!
*/
- amd_iommu_rlookup_table[iommu->devid] = NULL;
+ iommu->pci_seg->rlookup_table[iommu->devid] = NULL;
return 0;
}
@@ -1701,15 +1880,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
end += table->length;
p += IVRS_HEADER_LENGTH;
+ /* Phase 1: Process all IVHD blocks */
while (p < end) {
h = (struct ivhd_header *)p;
if (*p == amd_iommu_target_ivhd_type) {
- DUMP_printk("device: %02x:%02x.%01x cap: %04x "
- "seg: %d flags: %01x info %04x\n",
- PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
- PCI_FUNC(h->devid), h->cap_ptr,
- h->pci_seg, h->flags, h->info);
+ DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x "
+ "flags: %01x info %04x\n",
+ h->pci_seg, PCI_BUS_NUM(h->devid),
+ PCI_SLOT(h->devid), PCI_FUNC(h->devid),
+ h->cap_ptr, h->flags, h->info);
DUMP_printk(" mmio-addr: %016llx\n",
h->mmio_phys);
@@ -1717,7 +1897,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
if (iommu == NULL)
return -ENOMEM;
- ret = init_iommu_one(iommu, h);
+ ret = init_iommu_one(iommu, h, table);
if (ret)
return ret;
}
@@ -1726,6 +1906,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
}
WARN_ON(p != end);
+ /* Phase 2 : Early feature support check */
+ get_global_efr();
+
+ /* Phase 3 : Enabling IOMMU features */
+ for_each_iommu(iommu) {
+ ret = init_iommu_one_late(iommu);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1762,7 +1952,7 @@ static ssize_t amd_iommu_show_features(struct device *dev,
char *buf)
{
struct amd_iommu *iommu = dev_to_amd_iommu(dev);
- return sprintf(buf, "%llx\n", iommu->features);
+ return sprintf(buf, "%llx:%llx\n", iommu->features2, iommu->features);
}
static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
@@ -1789,16 +1979,18 @@ static const struct attribute_group *amd_iommu_groups[] = {
*/
static void __init late_iommu_features_init(struct amd_iommu *iommu)
{
- u64 features;
+ u64 features, features2;
if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
return;
/* read extended feature bits */
features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+ features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
if (!iommu->features) {
iommu->features = features;
+ iommu->features2 = features2;
return;
}
@@ -1806,9 +1998,13 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
* Sanity check and warn if EFR values from
* IVHD and MMIO conflict.
*/
- if (features != iommu->features)
- pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
- features, iommu->features);
+ if (features != iommu->features ||
+ features2 != iommu->features2) {
+ pr_warn(FW_WARN
+ "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
+ features, iommu->features,
+ features2, iommu->features2);
+ }
}
static int __init iommu_init_pci(struct amd_iommu *iommu)
@@ -1816,7 +2012,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
int cap_ptr = iommu->cap_ptr;
int ret;
- iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
+ iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
+ PCI_BUS_NUM(iommu->devid),
iommu->devid & 0xff);
if (!iommu->dev)
return -ENODEV;
@@ -1863,10 +2060,6 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
return -ENOMEM;
- ret = iommu_init_ga_log(iommu);
- if (ret)
- return ret;
-
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
pr_info("Using strict mode due to virtualization\n");
iommu_set_dma_strict();
@@ -1879,7 +2072,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
int i, j;
iommu->root_pdev =
- pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
+ pci_get_domain_bus_and_slot(iommu->pci_seg->id,
+ iommu->dev->bus->number,
PCI_DEVFN(0, 0));
/*
@@ -1906,8 +2100,11 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_erratum_746_workaround(iommu);
amd_iommu_ats_write_check_workaround(iommu);
- iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
+ ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
amd_iommu_groups, "ivhd%d", iommu->index);
+ if (ret)
+ return ret;
+
iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
return pci_enable_device(iommu->dev);
@@ -1928,7 +2125,7 @@ static void print_iommu_info(void)
pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
- pr_info("Extended features (%#llx):", iommu->features);
+ pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2);
for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i)))
@@ -1938,13 +2135,14 @@ static void print_iommu_info(void)
if (iommu->features & FEATURE_GAM_VAPIC)
pr_cont(" GA_vAPIC");
+ if (iommu->features & FEATURE_SNP)
+ pr_cont(" SNP");
+
pr_cont("\n");
}
}
if (irq_remapping_enabled) {
pr_info("Interrupt remapping enabled\n");
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
- pr_info("Virtual APIC enabled\n");
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
pr_info("X2APIC enabled\n");
}
@@ -1953,6 +2151,7 @@ static void print_iommu_info(void)
static int __init amd_iommu_init_pci(void)
{
struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
int ret;
for_each_iommu(iommu) {
@@ -1983,7 +2182,8 @@ static int __init amd_iommu_init_pci(void)
goto out;
}
- init_device_table_dma();
+ for_each_pci_segment(pci_seg)
+ init_device_table_dma(pci_seg);
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
@@ -2232,9 +2432,6 @@ enable_faults:
if (iommu->ppr_log != NULL)
iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
-
- iommu_ga_log_enable(iommu);
-
return 0;
}
@@ -2249,19 +2446,28 @@ enable_faults:
static void __init free_unity_maps(void)
{
struct unity_map_entry *entry, *next;
+ struct amd_iommu_pci_seg *p, *pci_seg;
- list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
- list_del(&entry->list);
- kfree(entry);
+ for_each_pci_segment_safe(pci_seg, p) {
+ list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
}
}
/* called for unity map ACPI definition */
-static int __init init_unity_map_range(struct ivmd_header *m)
+static int __init init_unity_map_range(struct ivmd_header *m,
+ struct acpi_table_header *ivrs_base)
{
struct unity_map_entry *e = NULL;
+ struct amd_iommu_pci_seg *pci_seg;
char *s;
+ pci_seg = get_pci_segment(m->pci_seg, ivrs_base);
+ if (pci_seg == NULL)
+ return -ENOMEM;
+
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL)
return -ENOMEM;
@@ -2277,7 +2483,7 @@ static int __init init_unity_map_range(struct ivmd_header *m)
case ACPI_IVMD_TYPE_ALL:
s = "IVMD_TYPE_ALL\t\t";
e->devid_start = 0;
- e->devid_end = amd_iommu_last_bdf;
+ e->devid_end = pci_seg->last_bdf;
break;
case ACPI_IVMD_TYPE_RANGE:
s = "IVMD_TYPE_RANGE\t\t";
@@ -2299,14 +2505,16 @@ static int __init init_unity_map_range(struct ivmd_header *m)
if (m->flags & IVMD_FLAG_EXCL_RANGE)
e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
- DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
- " range_start: %016llx range_end: %016llx flags: %x\n", s,
+ DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: "
+ "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx"
+ " flags: %x\n", s, m->pci_seg,
PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
- PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
+ PCI_FUNC(e->devid_start), m->pci_seg,
+ PCI_BUS_NUM(e->devid_end),
PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
e->address_start, e->address_end, m->flags);
- list_add_tail(&e->list, &amd_iommu_unity_map);
+ list_add_tail(&e->list, &pci_seg->unity_map);
return 0;
}
@@ -2323,7 +2531,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
while (p < end) {
m = (struct ivmd_header *)p;
if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
- init_unity_map_range(m);
+ init_unity_map_range(m, table);
p += m->length;
}
@@ -2334,35 +2542,48 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
/*
* Init the device table to not allow DMA access for devices
*/
-static void init_device_table_dma(void)
+static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
{
u32 devid;
+ struct dev_table_entry *dev_table = pci_seg->dev_table;
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
- set_dev_entry_bit(devid, DEV_ENTRY_VALID);
- set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
+ if (dev_table == NULL)
+ return;
+
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID);
+ if (!amd_iommu_snp_en)
+ __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION);
}
}
-static void __init uninit_device_table_dma(void)
+static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
{
u32 devid;
+ struct dev_table_entry *dev_table = pci_seg->dev_table;
+
+ if (dev_table == NULL)
+ return;
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
- amd_iommu_dev_table[devid].data[0] = 0ULL;
- amd_iommu_dev_table[devid].data[1] = 0ULL;
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ dev_table[devid].data[0] = 0ULL;
+ dev_table[devid].data[1] = 0ULL;
}
}
static void init_device_table(void)
{
+ struct amd_iommu_pci_seg *pci_seg;
u32 devid;
if (!amd_iommu_irq_remap)
return;
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
- set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
+ for_each_pci_segment(pci_seg) {
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
+ __set_dev_entry_bit(pci_seg->dev_table,
+ devid, DEV_ENTRY_IRQ_TBL_EN);
+ }
}
static void iommu_init_flags(struct amd_iommu *iommu)
@@ -2440,8 +2661,6 @@ static void iommu_enable_ga(struct amd_iommu *iommu)
#ifdef CONFIG_IRQ_REMAP
switch (amd_iommu_guest_ir) {
case AMD_IOMMU_GUEST_IR_VAPIC:
- iommu_feature_enable(iommu, CONTROL_GAM_EN);
- fallthrough;
case AMD_IOMMU_GUEST_IR_LEGACY_GA:
iommu_feature_enable(iommu, CONTROL_GA_EN);
iommu->irte_ops = &irte_128_ops;
@@ -2478,7 +2697,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
static void early_enable_iommus(void)
{
struct amd_iommu *iommu;
-
+ struct amd_iommu_pci_seg *pci_seg;
if (!copy_device_table()) {
/*
@@ -2488,9 +2707,14 @@ static void early_enable_iommus(void)
*/
if (amd_iommu_pre_enabled)
pr_err("Failed to copy DEV table from previous kernel.\n");
- if (old_dev_tbl_cpy != NULL)
- free_pages((unsigned long)old_dev_tbl_cpy,
- get_order(dev_table_size));
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->old_dev_tbl_cpy != NULL) {
+ free_pages((unsigned long)pci_seg->old_dev_tbl_cpy,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->old_dev_tbl_cpy = NULL;
+ }
+ }
for_each_iommu(iommu) {
clear_translation_pre_enabled(iommu);
@@ -2498,9 +2722,13 @@ static void early_enable_iommus(void)
}
} else {
pr_info("Copied DEV table from previous kernel.\n");
- free_pages((unsigned long)amd_iommu_dev_table,
- get_order(dev_table_size));
- amd_iommu_dev_table = old_dev_tbl_cpy;
+
+ for_each_pci_segment(pci_seg) {
+ free_pages((unsigned long)pci_seg->dev_table,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
+ }
+
for_each_iommu(iommu) {
iommu_disable_command_buffer(iommu);
iommu_disable_event_buffer(iommu);
@@ -2512,19 +2740,6 @@ static void early_enable_iommus(void)
iommu_flush_all_caches(iommu);
}
}
-
-#ifdef CONFIG_IRQ_REMAP
- /*
- * Note: We have already checked GASup from IVRS table.
- * Now, we need to make sure that GAMSup is set.
- */
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
- !check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
- amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
-
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
- amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
-#endif
}
static void enable_iommus_v2(void)
@@ -2537,10 +2752,72 @@ static void enable_iommus_v2(void)
}
}
+static void enable_iommus_vapic(void)
+{
+#ifdef CONFIG_IRQ_REMAP
+ u32 status, i;
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ /*
+ * Disable GALog if already running. It could have been enabled
+ * in the previous boot before kdump.
+ */
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
+ continue;
+
+ iommu_feature_disable(iommu, CONTROL_GALOG_EN);
+ iommu_feature_disable(iommu, CONTROL_GAINT_EN);
+
+ /*
+ * Need to set and poll check the GALOGRun bit to zero before
+ * we can set/ modify GA Log registers safely.
+ */
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
+ break;
+ udelay(10);
+ }
+
+ if (WARN_ON(i >= LOOP_TIMEOUT))
+ return;
+ }
+
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
+ !check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) {
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+ return;
+ }
+
+ if (amd_iommu_snp_en &&
+ !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) {
+ pr_warn("Force to disable Virtual APIC due to SNP\n");
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+ return;
+ }
+
+ /* Enabling GAM and SNPAVIC support */
+ for_each_iommu(iommu) {
+ if (iommu_init_ga_log(iommu) ||
+ iommu_ga_log_enable(iommu))
+ return;
+
+ iommu_feature_enable(iommu, CONTROL_GAM_EN);
+ if (amd_iommu_snp_en)
+ iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN);
+ }
+
+ amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
+ pr_info("Virtual APIC enabled\n");
+#endif
+}
+
static void enable_iommus(void)
{
early_enable_iommus();
-
+ enable_iommus_vapic();
enable_iommus_v2();
}
@@ -2590,27 +2867,11 @@ static struct syscore_ops amd_iommu_syscore_ops = {
static void __init free_iommu_resources(void)
{
- kmemleak_free(irq_lookup_table);
- free_pages((unsigned long)irq_lookup_table,
- get_order(rlookup_table_size));
- irq_lookup_table = NULL;
-
kmem_cache_destroy(amd_iommu_irq_cache);
amd_iommu_irq_cache = NULL;
- free_pages((unsigned long)amd_iommu_rlookup_table,
- get_order(rlookup_table_size));
- amd_iommu_rlookup_table = NULL;
-
- free_pages((unsigned long)amd_iommu_alias_table,
- get_order(alias_table_size));
- amd_iommu_alias_table = NULL;
-
- free_pages((unsigned long)amd_iommu_dev_table,
- get_order(dev_table_size));
- amd_iommu_dev_table = NULL;
-
free_iommu_all();
+ free_pci_segments();
}
/* SB IOAPIC is always on this device in AMD systems */
@@ -2709,7 +2970,7 @@ static void __init ivinfo_init(void *ivrs)
static int __init early_amd_iommu_init(void)
{
struct acpi_table_header *ivrs_base;
- int i, remap_cache_sz, ret;
+ int remap_cache_sz, ret;
acpi_status status;
if (!amd_iommu_detected)
@@ -2737,42 +2998,8 @@ static int __init early_amd_iommu_init(void)
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
- /*
- * First parse ACPI tables to find the largest Bus/Dev/Func
- * we need to handle. Upon this information the shared data
- * structures for the IOMMUs in the system will be allocated
- */
- ret = find_last_devid_acpi(ivrs_base);
- if (ret)
- goto out;
-
- dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
- alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
- rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
-
/* Device table - directly used by all IOMMUs */
ret = -ENOMEM;
- amd_iommu_dev_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
- get_order(dev_table_size));
- if (amd_iommu_dev_table == NULL)
- goto out;
-
- /*
- * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
- * IOMMU see for that device
- */
- amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
- get_order(alias_table_size));
- if (amd_iommu_alias_table == NULL)
- goto out;
-
- /* IOMMU rlookup table - find the IOMMU for a specific device */
- amd_iommu_rlookup_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO,
- get_order(rlookup_table_size));
- if (amd_iommu_rlookup_table == NULL)
- goto out;
amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO,
@@ -2781,12 +3008,6 @@ static int __init early_amd_iommu_init(void)
goto out;
/*
- * let all alias entries point to itself
- */
- for (i = 0; i <= amd_iommu_last_bdf; ++i)
- amd_iommu_alias_table[i] = i;
-
- /*
* never allocate domain 0 because its used as the non-allocated and
* error value placeholder
*/
@@ -2808,6 +3029,7 @@ static int __init early_amd_iommu_init(void)
amd_iommu_irq_remap = check_ioapic_information();
if (amd_iommu_irq_remap) {
+ struct amd_iommu_pci_seg *pci_seg;
/*
* Interrupt remapping enabled, create kmem_cache for the
* remapping tables.
@@ -2824,13 +3046,10 @@ static int __init early_amd_iommu_init(void)
if (!amd_iommu_irq_cache)
goto out;
- irq_lookup_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO,
- get_order(rlookup_table_size));
- kmemleak_alloc(irq_lookup_table, rlookup_table_size,
- 1, GFP_KERNEL);
- if (!irq_lookup_table)
- goto out;
+ for_each_pci_segment(pci_seg) {
+ if (alloc_irq_lookup_table(pci_seg))
+ goto out;
+ }
}
ret = init_memory_definitions(ivrs_base);
@@ -2937,6 +3156,7 @@ static int __init state_next(void)
register_syscore_ops(&amd_iommu_syscore_ops);
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
+ enable_iommus_vapic();
enable_iommus_v2();
break;
case IOMMU_PCI_INIT:
@@ -2967,8 +3187,11 @@ static int __init state_next(void)
free_iommu_resources();
} else {
struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg)
+ uninit_device_table_dma(pci_seg);
- uninit_device_table_dma();
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
}
@@ -3161,15 +3384,17 @@ static int __init parse_amd_iommu_options(char *str)
static int __init parse_ivrs_ioapic(char *str)
{
- unsigned int bus, dev, fn;
+ u32 seg = 0, bus, dev, fn;
int ret, id, i;
- u16 devid;
+ u32 devid;
ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
-
if (ret != 4) {
- pr_err("Invalid command line: ivrs_ioapic%s\n", str);
- return 1;
+ ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn);
+ if (ret != 5) {
+ pr_err("Invalid command line: ivrs_ioapic%s\n", str);
+ return 1;
+ }
}
if (early_ioapic_map_size == EARLY_MAP_SIZE) {
@@ -3178,7 +3403,7 @@ static int __init parse_ivrs_ioapic(char *str)
return 1;
}
- devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
cmdline_maps = true;
i = early_ioapic_map_size++;
@@ -3191,15 +3416,17 @@ static int __init parse_ivrs_ioapic(char *str)
static int __init parse_ivrs_hpet(char *str)
{
- unsigned int bus, dev, fn;
+ u32 seg = 0, bus, dev, fn;
int ret, id, i;
- u16 devid;
+ u32 devid;
ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
-
if (ret != 4) {
- pr_err("Invalid command line: ivrs_hpet%s\n", str);
- return 1;
+ ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn);
+ if (ret != 5) {
+ pr_err("Invalid command line: ivrs_hpet%s\n", str);
+ return 1;
+ }
}
if (early_hpet_map_size == EARLY_MAP_SIZE) {
@@ -3208,7 +3435,7 @@ static int __init parse_ivrs_hpet(char *str)
return 1;
}
- devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
cmdline_maps = true;
i = early_hpet_map_size++;
@@ -3221,15 +3448,18 @@ static int __init parse_ivrs_hpet(char *str)
static int __init parse_ivrs_acpihid(char *str)
{
- u32 bus, dev, fn;
+ u32 seg = 0, bus, dev, fn;
char *hid, *uid, *p;
char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
int ret, i;
ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
if (ret != 4) {
- pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
- return 1;
+ ret = sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid);
+ if (ret != 5) {
+ pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
+ return 1;
+ }
}
p = acpiid;
@@ -3244,8 +3474,7 @@ static int __init parse_ivrs_acpihid(char *str)
i = early_acpihid_map_size++;
memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
- early_acpihid_map[i].devid =
- ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
early_acpihid_map[i].cmd_line = true;
return 1;
@@ -3260,7 +3489,12 @@ __setup("ivrs_acpihid", parse_ivrs_acpihid);
bool amd_iommu_v2_supported(void)
{
- return amd_iommu_v2_present;
+ /*
+ * Since DTE[Mode]=0 is prohibited on SNP-enabled system
+ * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
+ * setting up IOMMUv1 page table.
+ */
+ return amd_iommu_v2_present && !amd_iommu_snp_en;
}
EXPORT_SYMBOL(amd_iommu_v2_supported);
@@ -3363,3 +3597,41 @@ int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64
return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
}
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+int amd_iommu_snp_enable(void)
+{
+ /*
+ * The SNP support requires that IOMMU must be enabled, and is
+ * not configured in the passthrough mode.
+ */
+ if (no_iommu || iommu_default_passthrough()) {
+ pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported");
+ return -EINVAL;
+ }
+
+ /*
+ * Prevent enabling SNP after IOMMU_ENABLED state because this process
+ * affect how IOMMU driver sets up data structures and configures
+ * IOMMU hardware.
+ */
+ if (init_state > IOMMU_ENABLED) {
+ pr_err("SNP: Too late to enable SNP for IOMMU.\n");
+ return -EINVAL;
+ }
+
+ amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP);
+ if (!amd_iommu_snp_en)
+ return -EINVAL;
+
+ pr_info("SNP enabled\n");
+
+ /* Enforce IOMMU v1 pagetable when SNP is enabled. */
+ if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 6608d1717574..7d4b61e5db47 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -258,7 +258,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
__npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
/* pte could have been changed somewhere. */
- if (cmpxchg64(pte, __pte, __npte) != __pte)
+ if (!try_cmpxchg64(pte, &__pte, __npte))
free_page((unsigned long)page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -341,10 +341,8 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
u64 *pt;
int mode;
- while (cmpxchg64(pte, pteval, 0) != pteval) {
+ while (!try_cmpxchg64(pte, &pteval, 0))
pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
- pteval = *pte;
- }
if (!IOMMU_PTE_PRESENT(pteval))
return;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 840831d5d2ad..828672a46a3d 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -62,9 +62,6 @@
static DEFINE_SPINLOCK(pd_bitmap_lock);
-/* List of all available dev_data structures */
-static LLIST_HEAD(dev_data_list);
-
LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map);
@@ -95,13 +92,6 @@ static void detach_device(struct device *dev);
*
****************************************************************************/
-static inline u16 get_pci_device_id(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
-
- return pci_dev_id(pdev);
-}
-
static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
@@ -122,16 +112,74 @@ static inline int get_acpihid_device_id(struct device *dev,
return -EINVAL;
}
-static inline int get_device_id(struct device *dev)
+static inline int get_device_sbdf_id(struct device *dev)
{
- int devid;
+ int sbdf;
if (dev_is_pci(dev))
- devid = get_pci_device_id(dev);
+ sbdf = get_pci_sbdf_id(to_pci_dev(dev));
else
- devid = get_acpihid_device_id(dev, NULL);
+ sbdf = get_acpihid_device_id(dev, NULL);
+
+ return sbdf;
+}
+
+struct dev_table_entry *get_dev_table(struct amd_iommu *iommu)
+{
+ struct dev_table_entry *dev_table;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ BUG_ON(pci_seg == NULL);
+ dev_table = pci_seg->dev_table;
+ BUG_ON(dev_table == NULL);
+
+ return dev_table;
+}
+
+static inline u16 get_device_segment(struct device *dev)
+{
+ u16 seg;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ seg = pci_domain_nr(pdev->bus);
+ } else {
+ u32 devid = get_acpihid_device_id(dev, NULL);
+
+ seg = PCI_SBDF_TO_SEGID(devid);
+ }
+
+ return seg;
+}
+
+/* Writes the specific IOMMU for a device into the PCI segment rlookup table */
+void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid)
+{
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ pci_seg->rlookup_table[devid] = iommu;
+}
+
+static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id == seg)
+ return pci_seg->rlookup_table[devid];
+ }
+ return NULL;
+}
- return devid;
+static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
+{
+ u16 seg = get_device_segment(dev);
+ int devid = get_device_sbdf_id(dev);
+
+ if (devid < 0)
+ return NULL;
+ return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
}
static struct protection_domain *to_pdomain(struct iommu_domain *dom)
@@ -139,9 +187,10 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
-static struct iommu_dev_data *alloc_dev_data(u16 devid)
+static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
@@ -151,19 +200,20 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid)
dev_data->devid = devid;
ratelimit_default_init(&dev_data->rs);
- llist_add(&dev_data->dev_data_list, &dev_data_list);
+ llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list);
return dev_data;
}
-static struct iommu_dev_data *search_dev_data(u16 devid)
+static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
struct llist_node *node;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
- if (llist_empty(&dev_data_list))
+ if (llist_empty(&pci_seg->dev_data_list))
return NULL;
- node = dev_data_list.first;
+ node = pci_seg->dev_data_list.first;
llist_for_each_entry(dev_data, node, dev_data_list) {
if (dev_data->devid == devid)
return dev_data;
@@ -174,67 +224,74 @@ static struct iommu_dev_data *search_dev_data(u16 devid)
static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
+ struct amd_iommu *iommu;
+ struct dev_table_entry *dev_table;
u16 devid = pci_dev_id(pdev);
if (devid == alias)
return 0;
- amd_iommu_rlookup_table[alias] =
- amd_iommu_rlookup_table[devid];
- memcpy(amd_iommu_dev_table[alias].data,
- amd_iommu_dev_table[devid].data,
- sizeof(amd_iommu_dev_table[alias].data));
+ iommu = rlookup_amd_iommu(&pdev->dev);
+ if (!iommu)
+ return 0;
+
+ amd_iommu_set_rlookup_table(iommu, alias);
+ dev_table = get_dev_table(iommu);
+ memcpy(dev_table[alias].data,
+ dev_table[devid].data,
+ sizeof(dev_table[alias].data));
return 0;
}
-static void clone_aliases(struct pci_dev *pdev)
+static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
{
- if (!pdev)
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(dev))
return;
+ pdev = to_pci_dev(dev);
/*
* The IVRS alias stored in the alias table may not be
* part of the PCI DMA aliases if it's bus differs
* from the original device.
*/
- clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
+ clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL);
pci_for_each_dma_alias(pdev, clone_alias, NULL);
}
-static struct pci_dev *setup_aliases(struct device *dev)
+static void setup_aliases(struct amd_iommu *iommu, struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
u16 ivrs_alias;
/* For ACPI HID devices, there are no aliases */
if (!dev_is_pci(dev))
- return NULL;
+ return;
/*
* Add the IVRS alias to the pci aliases if it is on the same
* bus. The IVRS table may know about a quirk that we don't.
*/
- ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
+ ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)];
if (ivrs_alias != pci_dev_id(pdev) &&
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
- clone_aliases(pdev);
-
- return pdev;
+ clone_aliases(iommu, dev);
}
-static struct iommu_dev_data *find_dev_data(u16 devid)
+static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
- dev_data = search_dev_data(devid);
+ dev_data = search_dev_data(iommu, devid);
if (dev_data == NULL) {
- dev_data = alloc_dev_data(devid);
+ dev_data = alloc_dev_data(iommu, devid);
if (!dev_data)
return NULL;
@@ -296,42 +353,49 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev)
*/
static bool check_device(struct device *dev)
{
- int devid;
+ struct amd_iommu_pci_seg *pci_seg;
+ struct amd_iommu *iommu;
+ int devid, sbdf;
if (!dev)
return false;
- devid = get_device_id(dev);
- if (devid < 0)
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
return false;
+ devid = PCI_SBDF_TO_DEVID(sbdf);
- /* Out of our scope? */
- if (devid > amd_iommu_last_bdf)
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
return false;
- if (amd_iommu_rlookup_table[devid] == NULL)
+ /* Out of our scope? */
+ pci_seg = iommu->pci_seg;
+ if (devid > pci_seg->last_bdf)
return false;
return true;
}
-static int iommu_init_device(struct device *dev)
+static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
{
struct iommu_dev_data *dev_data;
- int devid;
+ int devid, sbdf;
if (dev_iommu_priv_get(dev))
return 0;
- devid = get_device_id(dev);
- if (devid < 0)
- return devid;
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
+ return sbdf;
- dev_data = find_dev_data(devid);
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ dev_data = find_dev_data(iommu, devid);
if (!dev_data)
return -ENOMEM;
- dev_data->pdev = setup_aliases(dev);
+ dev_data->dev = dev;
+ setup_aliases(iommu, dev);
/*
* By default we use passthrough mode for IOMMUv2 capable device.
@@ -341,9 +405,6 @@ static int iommu_init_device(struct device *dev)
*/
if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
- struct amd_iommu *iommu;
-
- iommu = amd_iommu_rlookup_table[dev_data->devid];
dev_data->iommu_v2 = iommu->is_iommu_v2;
}
@@ -352,18 +413,21 @@ static int iommu_init_device(struct device *dev)
return 0;
}
-static void iommu_ignore_device(struct device *dev)
+static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
{
- int devid;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+ int devid, sbdf;
- devid = get_device_id(dev);
- if (devid < 0)
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
return;
- amd_iommu_rlookup_table[devid] = NULL;
- memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ pci_seg->rlookup_table[devid] = NULL;
+ memset(&dev_table[devid], 0, sizeof(struct dev_table_entry));
- setup_aliases(dev);
+ setup_aliases(iommu, dev);
}
static void amd_iommu_uninit_device(struct device *dev)
@@ -391,13 +455,13 @@ static void amd_iommu_uninit_device(struct device *dev)
*
****************************************************************************/
-static void dump_dte_entry(u16 devid)
+static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
{
int i;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
for (i = 0; i < 4; ++i)
- pr_err("DTE[%d]: %016llx\n", i,
- amd_iommu_dev_table[devid].data[i]);
+ pr_err("DTE[%d]: %016llx\n", i, dev_table[devid].data[i]);
}
static void dump_command(unsigned long phys_addr)
@@ -409,7 +473,7 @@ static void dump_command(unsigned long phys_addr)
pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
}
-static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
+static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event)
{
struct iommu_dev_data *dev_data = NULL;
int devid, vmg_tag, flags;
@@ -421,7 +485,7 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
@@ -432,8 +496,8 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
vmg_tag, spa, flags);
}
} else {
- pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
vmg_tag, spa, flags);
}
@@ -441,7 +505,7 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
pci_dev_put(pdev);
}
-static void amd_iommu_report_rmp_fault(volatile u32 *event)
+static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event)
{
struct iommu_dev_data *dev_data = NULL;
int devid, flags_rmp, vmg_tag, flags;
@@ -454,7 +518,7 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event)
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
gpa = ((u64)event[3] << 32) | event[2];
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
@@ -465,8 +529,8 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event)
vmg_tag, gpa, flags_rmp, flags);
}
} else {
- pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
vmg_tag, gpa, flags_rmp, flags);
}
@@ -480,13 +544,14 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event)
#define IS_WRITE_REQUEST(flags) \
((flags) & EVENT_FLAG_RW)
-static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
+static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
+ u16 devid, u16 domain_id,
u64 address, int flags)
{
struct iommu_dev_data *dev_data = NULL;
struct pci_dev *pdev;
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
@@ -511,8 +576,8 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
domain_id, address, flags);
}
} else {
- pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
domain_id, address, flags);
}
@@ -549,26 +614,26 @@ retry:
}
if (type == EVENT_TYPE_IO_FAULT) {
- amd_iommu_report_page_fault(devid, pasid, address, flags);
+ amd_iommu_report_page_fault(iommu, devid, pasid, address, flags);
return;
}
switch (type) {
case EVENT_TYPE_ILL_DEV:
- dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
- dump_dte_entry(devid);
+ dump_dte_entry(iommu, devid);
break;
case EVENT_TYPE_DEV_TAB_ERR:
- dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
+ dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x "
"address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address, flags);
break;
case EVENT_TYPE_PAGE_TAB_ERR:
- dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
case EVENT_TYPE_ILL_CMD:
@@ -580,26 +645,26 @@ retry:
address, flags);
break;
case EVENT_TYPE_IOTLB_INV_TO:
- dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%llx]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address);
break;
case EVENT_TYPE_INV_DEV_REQ:
- dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
case EVENT_TYPE_RMP_FAULT:
- amd_iommu_report_rmp_fault(event);
+ amd_iommu_report_rmp_fault(iommu, event);
break;
case EVENT_TYPE_RMP_HW_ERR:
- amd_iommu_report_rmp_hw_error(event);
+ amd_iommu_report_rmp_hw_error(iommu, event);
break;
case EVENT_TYPE_INV_PPR_REQ:
pasid = PPR_PASID(*((u64 *)__evt));
tag = event[1] & 0x03FF;
- dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags, tag);
break;
default:
@@ -636,7 +701,7 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
fault.address = raw[1];
fault.pasid = PPR_PASID(raw[0]);
- fault.device_id = PPR_DEVID(raw[0]);
+ fault.sbdf = PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, PPR_DEVID(raw[0]));
fault.tag = PPR_TAG(raw[0]);
fault.flags = PPR_FLAGS(raw[0]);
@@ -874,7 +939,8 @@ static void build_completion_wait(struct iommu_cmd *cmd,
memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
cmd->data[1] = upper_32_bits(paddr);
- cmd->data[2] = data;
+ cmd->data[2] = lower_32_bits(data);
+ cmd->data[3] = upper_32_bits(data);
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
}
@@ -1125,8 +1191,9 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
{
u32 devid;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
- for (devid = 0; devid <= 0xffff; ++devid)
+ for (devid = 0; devid <= last_bdf; ++devid)
iommu_flush_dte(iommu, devid);
iommu_completion_wait(iommu);
@@ -1139,8 +1206,9 @@ static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
{
u32 dom_id;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
- for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
+ for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
struct iommu_cmd cmd;
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
dom_id, 1);
@@ -1183,8 +1251,9 @@ static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
{
u32 devid;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
- for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
+ for (devid = 0; devid <= last_bdf; devid++)
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -1212,7 +1281,9 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
int qdep;
qdep = dev_data->ats.qdep;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return -EINVAL;
build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
@@ -1232,20 +1303,28 @@ static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
static int device_flush_dte(struct iommu_dev_data *dev_data)
{
struct amd_iommu *iommu;
+ struct pci_dev *pdev = NULL;
+ struct amd_iommu_pci_seg *pci_seg;
u16 alias;
int ret;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return -EINVAL;
- if (dev_data->pdev)
- ret = pci_for_each_dma_alias(dev_data->pdev,
+ if (dev_is_pci(dev_data->dev))
+ pdev = to_pci_dev(dev_data->dev);
+
+ if (pdev)
+ ret = pci_for_each_dma_alias(pdev,
device_flush_dte_alias, iommu);
else
ret = iommu_flush_dte(iommu, dev_data->devid);
if (ret)
return ret;
- alias = amd_iommu_alias_table[dev_data->devid];
+ pci_seg = iommu->pci_seg;
+ alias = pci_seg->alias_table[dev_data->devid];
if (alias != dev_data->devid) {
ret = iommu_flush_dte(iommu, alias);
if (ret)
@@ -1461,28 +1540,35 @@ static void free_gcr3_table(struct protection_domain *domain)
free_page((unsigned long)domain->gcr3_tbl);
}
-static void set_dte_entry(u16 devid, struct protection_domain *domain,
- bool ats, bool ppr)
+static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
+ struct protection_domain *domain, bool ats, bool ppr)
{
u64 pte_root = 0;
u64 flags = 0;
u32 old_domid;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
if (domain->iop.mode != PAGE_MODE_NONE)
pte_root = iommu_virt_to_phys(domain->iop.root);
pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
- pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
- flags = amd_iommu_dev_table[devid].data[1];
+ pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
+
+ /*
+ * When SNP is enabled, Only set TV bit when IOMMU
+ * page translation is in use.
+ */
+ if (!amd_iommu_snp_en || (domain->id != 0))
+ pte_root |= DTE_FLAG_TV;
+
+ flags = dev_table[devid].data[1];
if (ats)
flags |= DTE_FLAG_IOTLB;
if (ppr) {
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
if (iommu_feature(iommu, FEATURE_EPHSUP))
pte_root |= 1ULL << DEV_ENTRY_PPR;
}
@@ -1516,9 +1602,9 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
flags &= ~DEV_DOMID_MASK;
flags |= domain->id;
- old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
- amd_iommu_dev_table[devid].data[1] = flags;
- amd_iommu_dev_table[devid].data[0] = pte_root;
+ old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
+ dev_table[devid].data[1] = flags;
+ dev_table[devid].data[0] = pte_root;
/*
* A kdump kernel might be replacing a domain ID that was copied from
@@ -1526,19 +1612,23 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
* entries for the old domain ID that is being overwritten
*/
if (old_domid) {
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
amd_iommu_flush_tlb_domid(iommu, old_domid);
}
}
-static void clear_dte_entry(u16 devid)
+static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
/* remove entry from the device table seen by the hardware */
- amd_iommu_dev_table[devid].data[0] = DTE_FLAG_V | DTE_FLAG_TV;
- amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
+ dev_table[devid].data[0] = DTE_FLAG_V;
+
+ if (!amd_iommu_snp_en)
+ dev_table[devid].data[0] |= DTE_FLAG_TV;
+
+ dev_table[devid].data[1] &= DTE_FLAG_MASK;
- amd_iommu_apply_erratum_63(devid);
+ amd_iommu_apply_erratum_63(iommu, devid);
}
static void do_attach(struct iommu_dev_data *dev_data,
@@ -1547,7 +1637,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
struct amd_iommu *iommu;
bool ats;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return;
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -1559,9 +1651,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_cnt += 1;
/* Update device table */
- set_dte_entry(dev_data->devid, domain,
+ set_dte_entry(iommu, dev_data->devid, domain,
ats, dev_data->iommu_v2);
- clone_aliases(dev_data->pdev);
+ clone_aliases(iommu, dev_data->dev);
device_flush_dte(dev_data);
}
@@ -1571,13 +1663,15 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return;
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
- clear_dte_entry(dev_data->devid);
- clone_aliases(dev_data->pdev);
+ clear_dte_entry(iommu, dev_data->devid);
+ clone_aliases(iommu, dev_data->dev);
/* Flush the DTE entry */
device_flush_dte(dev_data);
@@ -1749,23 +1843,24 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
{
struct iommu_device *iommu_dev;
struct amd_iommu *iommu;
- int ret, devid;
+ int ret;
if (!check_device(dev))
return ERR_PTR(-ENODEV);
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return ERR_PTR(-ENODEV);
if (dev_iommu_priv_get(dev))
return &iommu->iommu;
- ret = iommu_init_device(dev);
+ ret = iommu_init_device(iommu, dev);
if (ret) {
if (ret != -ENOTSUPP)
dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
iommu_dev = ERR_PTR(ret);
- iommu_ignore_device(dev);
+ iommu_ignore_device(iommu, dev);
} else {
amd_iommu_set_pci_msi_domain(dev, iommu);
iommu_dev = &iommu->iommu;
@@ -1785,13 +1880,14 @@ static void amd_iommu_probe_finalize(struct device *dev)
static void amd_iommu_release_device(struct device *dev)
{
- int devid = get_device_id(dev);
struct amd_iommu *iommu;
if (!check_device(dev))
return;
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return;
amd_iommu_uninit_device(dev);
iommu_completion_wait(iommu);
@@ -1816,9 +1912,13 @@ static void update_device_table(struct protection_domain *domain)
struct iommu_dev_data *dev_data;
list_for_each_entry(dev_data, &domain->dev_list, list) {
- set_dte_entry(dev_data->devid, domain,
+ struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
+
+ if (!iommu)
+ continue;
+ set_dte_entry(iommu, dev_data->devid, domain,
dev_data->ats.enabled, dev_data->iommu_v2);
- clone_aliases(dev_data->pdev);
+ clone_aliases(iommu, dev_data->dev);
}
}
@@ -1969,6 +2069,13 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *domain;
+ /*
+ * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
+ * default to use IOMMU_DOMAIN_DMA[_FQ].
+ */
+ if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
+ return NULL;
+
domain = protection_domain_alloc(type);
if (!domain)
return NULL;
@@ -2004,7 +2111,6 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
struct device *dev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
- int devid = get_device_id(dev);
struct amd_iommu *iommu;
if (!check_device(dev))
@@ -2013,7 +2119,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
if (dev_data->domain != NULL)
detach_device(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
if (!iommu)
return;
@@ -2040,7 +2146,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
dev_data = dev_iommu_priv_get(dev);
dev_data->defer_attach = false;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev);
if (!iommu)
return -EINVAL;
@@ -2169,13 +2275,21 @@ static void amd_iommu_get_resv_regions(struct device *dev,
{
struct iommu_resv_region *region;
struct unity_map_entry *entry;
- int devid;
+ struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
+ int devid, sbdf;
- devid = get_device_id(dev);
- if (devid < 0)
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
+ return;
+
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
return;
+ pci_seg = iommu->pci_seg;
- list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+ list_for_each_entry(entry, &pci_seg->unity_map, list) {
int type, prot = 0;
size_t length;
@@ -2280,7 +2394,6 @@ const struct iommu_ops amd_iommu_ops = {
.probe_finalize = amd_iommu_probe_finalize,
.device_group = amd_iommu_device_group,
.get_resv_regions = amd_iommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.def_domain_type = amd_iommu_def_domain_type,
@@ -2419,8 +2532,9 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
continue;
qdep = dev_data->ats.qdep;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
-
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ continue;
build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
qdep, address, size);
@@ -2582,7 +2696,9 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
struct iommu_cmd cmd;
dev_data = dev_iommu_priv_get(&pdev->dev);
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(&pdev->dev);
+ if (!iommu)
+ return -ENODEV;
build_complete_ppr(&cmd, dev_data->devid, pasid, status,
tag, dev_data->pri_tlp);
@@ -2644,30 +2760,35 @@ EXPORT_SYMBOL(amd_iommu_device_info);
static struct irq_chip amd_ir_chip;
static DEFINE_SPINLOCK(iommu_table_lock);
-static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
+static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
+ struct irq_remap_table *table)
{
u64 dte;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
- dte = amd_iommu_dev_table[devid].data[2];
+ dte = dev_table[devid].data[2];
dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
dte |= iommu_virt_to_phys(table->table);
dte |= DTE_IRQ_REMAP_INTCTL;
dte |= DTE_INTTABLEN;
dte |= DTE_IRQ_REMAP_ENABLE;
- amd_iommu_dev_table[devid].data[2] = dte;
+ dev_table[devid].data[2] = dte;
}
-static struct irq_remap_table *get_irq_table(u16 devid)
+static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
{
struct irq_remap_table *table;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
- if (WARN_ONCE(!amd_iommu_rlookup_table[devid],
- "%s: no iommu for devid %x\n", __func__, devid))
+ if (WARN_ONCE(!pci_seg->rlookup_table[devid],
+ "%s: no iommu for devid %x:%x\n",
+ __func__, pci_seg->id, devid))
return NULL;
- table = irq_lookup_table[devid];
- if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid))
+ table = pci_seg->irq_lookup_table[devid];
+ if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n",
+ __func__, pci_seg->id, devid))
return NULL;
return table;
@@ -2700,8 +2821,10 @@ static struct irq_remap_table *__alloc_irq_table(void)
static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table)
{
- irq_lookup_table[devid] = table;
- set_dte_irq_entry(devid, table);
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ pci_seg->irq_lookup_table[devid] = table;
+ set_dte_irq_entry(iommu, devid, table);
iommu_flush_dte(iommu, devid);
}
@@ -2709,35 +2832,38 @@ static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
void *data)
{
struct irq_remap_table *table = data;
+ struct amd_iommu_pci_seg *pci_seg;
+ struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev);
- irq_lookup_table[alias] = table;
- set_dte_irq_entry(alias, table);
+ if (!iommu)
+ return -EINVAL;
- iommu_flush_dte(amd_iommu_rlookup_table[alias], alias);
+ pci_seg = iommu->pci_seg;
+ pci_seg->irq_lookup_table[alias] = table;
+ set_dte_irq_entry(iommu, alias, table);
+ iommu_flush_dte(pci_seg->rlookup_table[alias], alias);
return 0;
}
-static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
+static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
+ u16 devid, struct pci_dev *pdev)
{
struct irq_remap_table *table = NULL;
struct irq_remap_table *new_table = NULL;
- struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
unsigned long flags;
u16 alias;
spin_lock_irqsave(&iommu_table_lock, flags);
- iommu = amd_iommu_rlookup_table[devid];
- if (!iommu)
- goto out_unlock;
-
- table = irq_lookup_table[devid];
+ pci_seg = iommu->pci_seg;
+ table = pci_seg->irq_lookup_table[devid];
if (table)
goto out_unlock;
- alias = amd_iommu_alias_table[devid];
- table = irq_lookup_table[alias];
+ alias = pci_seg->alias_table[devid];
+ table = pci_seg->irq_lookup_table[alias];
if (table) {
set_remap_table_entry(iommu, devid, table);
goto out_wait;
@@ -2751,11 +2877,11 @@ static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
spin_lock_irqsave(&iommu_table_lock, flags);
- table = irq_lookup_table[devid];
+ table = pci_seg->irq_lookup_table[devid];
if (table)
goto out_unlock;
- table = irq_lookup_table[alias];
+ table = pci_seg->irq_lookup_table[alias];
if (table) {
set_remap_table_entry(iommu, devid, table);
goto out_wait;
@@ -2786,18 +2912,14 @@ out_unlock:
return table;
}
-static int alloc_irq_index(u16 devid, int count, bool align,
- struct pci_dev *pdev)
+static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
+ bool align, struct pci_dev *pdev)
{
struct irq_remap_table *table;
int index, c, alignment = 1;
unsigned long flags;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
- if (!iommu)
- return -ENODEV;
-
- table = alloc_irq_table(devid, pdev);
+ table = alloc_irq_table(iommu, devid, pdev);
if (!table)
return -ENODEV;
@@ -2836,20 +2958,15 @@ out:
return index;
}
-static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
- struct amd_ir_data *data)
+static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
+ struct irte_ga *irte, struct amd_ir_data *data)
{
bool ret;
struct irq_remap_table *table;
- struct amd_iommu *iommu;
unsigned long flags;
struct irte_ga *entry;
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu == NULL)
- return -EINVAL;
-
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return -ENOMEM;
@@ -2880,17 +2997,13 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
return 0;
}
-static int modify_irte(u16 devid, int index, union irte *irte)
+static int modify_irte(struct amd_iommu *iommu,
+ u16 devid, int index, union irte *irte)
{
struct irq_remap_table *table;
- struct amd_iommu *iommu;
unsigned long flags;
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu == NULL)
- return -EINVAL;
-
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return -ENOMEM;
@@ -2904,17 +3017,12 @@ static int modify_irte(u16 devid, int index, union irte *irte)
return 0;
}
-static void free_irte(u16 devid, int index)
+static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
{
struct irq_remap_table *table;
- struct amd_iommu *iommu;
unsigned long flags;
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu == NULL)
- return;
-
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return;
@@ -2956,49 +3064,49 @@ static void irte_ga_prepare(void *entry,
irte->lo.fields_remap.valid = 1;
}
-static void irte_activate(void *entry, u16 devid, u16 index)
+static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
union irte *irte = (union irte *) entry;
irte->fields.valid = 1;
- modify_irte(devid, index, irte);
+ modify_irte(iommu, devid, index, irte);
}
-static void irte_ga_activate(void *entry, u16 devid, u16 index)
+static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
struct irte_ga *irte = (struct irte_ga *) entry;
irte->lo.fields_remap.valid = 1;
- modify_irte_ga(devid, index, irte, NULL);
+ modify_irte_ga(iommu, devid, index, irte, NULL);
}
-static void irte_deactivate(void *entry, u16 devid, u16 index)
+static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
union irte *irte = (union irte *) entry;
irte->fields.valid = 0;
- modify_irte(devid, index, irte);
+ modify_irte(iommu, devid, index, irte);
}
-static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
+static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
struct irte_ga *irte = (struct irte_ga *) entry;
irte->lo.fields_remap.valid = 0;
- modify_irte_ga(devid, index, irte, NULL);
+ modify_irte_ga(iommu, devid, index, irte, NULL);
}
-static void irte_set_affinity(void *entry, u16 devid, u16 index,
+static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
u8 vector, u32 dest_apicid)
{
union irte *irte = (union irte *) entry;
irte->fields.vector = vector;
irte->fields.destination = dest_apicid;
- modify_irte(devid, index, irte);
+ modify_irte(iommu, devid, index, irte);
}
-static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
+static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
u8 vector, u32 dest_apicid)
{
struct irte_ga *irte = (struct irte_ga *) entry;
@@ -3009,7 +3117,7 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
APICID_TO_IRTE_DEST_LO(dest_apicid);
irte->hi.fields.destination =
APICID_TO_IRTE_DEST_HI(dest_apicid);
- modify_irte_ga(devid, index, irte, NULL);
+ modify_irte_ga(iommu, devid, index, irte, NULL);
}
}
@@ -3068,7 +3176,7 @@ static int get_devid(struct irq_alloc_info *info)
return get_hpet_devid(info->devid);
case X86_IRQ_ALLOC_TYPE_PCI_MSI:
case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
- return get_device_id(msi_desc_to_dev(info->desc));
+ return get_device_sbdf_id(msi_desc_to_dev(info->desc));
default:
WARN_ON_ONCE(1);
return -1;
@@ -3097,7 +3205,7 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
int devid, int index, int sub_handle)
{
struct irq_2_irte *irte_info = &data->irq_2_irte;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+ struct amd_iommu *iommu = data->iommu;
if (!iommu)
return;
@@ -3148,8 +3256,9 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
struct irq_alloc_info *info = arg;
struct irq_data *irq_data;
struct amd_ir_data *data = NULL;
+ struct amd_iommu *iommu;
struct irq_cfg *cfg;
- int i, ret, devid;
+ int i, ret, devid, seg, sbdf;
int index;
if (!info)
@@ -3165,8 +3274,14 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
- devid = get_devid(info);
- if (devid < 0)
+ sbdf = get_devid(info);
+ if (sbdf < 0)
+ return -EINVAL;
+
+ seg = PCI_SBDF_TO_SEGID(sbdf);
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ iommu = __rlookup_amd_iommu(seg, devid);
+ if (!iommu)
return -EINVAL;
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
@@ -3175,9 +3290,8 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
struct irq_remap_table *table;
- struct amd_iommu *iommu;
- table = alloc_irq_table(devid, NULL);
+ table = alloc_irq_table(iommu, devid, NULL);
if (table) {
if (!table->min_index) {
/*
@@ -3185,7 +3299,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
* interrupts.
*/
table->min_index = 32;
- iommu = amd_iommu_rlookup_table[devid];
for (i = 0; i < 32; ++i)
iommu->irte_ops->set_allocated(table, i);
}
@@ -3198,10 +3311,10 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
- index = alloc_irq_index(devid, nr_irqs, align,
+ index = alloc_irq_index(iommu, devid, nr_irqs, align,
msi_desc_to_pci_dev(info->desc));
} else {
- index = alloc_irq_index(devid, nr_irqs, false, NULL);
+ index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL);
}
if (index < 0) {
@@ -3233,6 +3346,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
goto out_free_data;
}
+ data->iommu = iommu;
irq_data->hwirq = (devid << 16) + i;
irq_data->chip_data = data;
irq_data->chip = &amd_ir_chip;
@@ -3249,7 +3363,7 @@ out_free_data:
kfree(irq_data->chip_data);
}
for (i = 0; i < nr_irqs; i++)
- free_irte(devid, index + i);
+ free_irte(iommu, devid, index + i);
out_free_parent:
irq_domain_free_irqs_common(domain, virq, nr_irqs);
return ret;
@@ -3268,7 +3382,7 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
if (irq_data && irq_data->chip_data) {
data = irq_data->chip_data;
irte_info = &data->irq_2_irte;
- free_irte(irte_info->devid, irte_info->index);
+ free_irte(data->iommu, irte_info->devid, irte_info->index);
kfree(data->entry);
kfree(data);
}
@@ -3286,13 +3400,13 @@ static int irq_remapping_activate(struct irq_domain *domain,
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+ struct amd_iommu *iommu = data->iommu;
struct irq_cfg *cfg = irqd_cfg(irq_data);
if (!iommu)
return 0;
- iommu->irte_ops->activate(data->entry, irte_info->devid,
+ iommu->irte_ops->activate(iommu, data->entry, irte_info->devid,
irte_info->index);
amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
return 0;
@@ -3303,10 +3417,10 @@ static void irq_remapping_deactivate(struct irq_domain *domain,
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+ struct amd_iommu *iommu = data->iommu;
if (iommu)
- iommu->irte_ops->deactivate(data->entry, irte_info->devid,
+ iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid,
irte_info->index);
}
@@ -3326,8 +3440,8 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
if (devid < 0)
return 0;
+ iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff));
- iommu = amd_iommu_rlookup_table[devid];
return iommu && iommu->ir_domain == d;
}
@@ -3361,7 +3475,7 @@ int amd_iommu_activate_guest_mode(void *data)
entry->hi.fields.vector = ir_data->ga_vector;
entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
- return modify_irte_ga(ir_data->irq_2_irte.devid,
+ return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry, ir_data);
}
EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
@@ -3391,7 +3505,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
entry->hi.fields.destination =
APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
- return modify_irte_ga(ir_data->irq_2_irte.devid,
+ return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry, ir_data);
}
EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
@@ -3399,12 +3513,16 @@ EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
{
int ret;
- struct amd_iommu *iommu;
struct amd_iommu_pi_data *pi_data = vcpu_info;
struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
struct amd_ir_data *ir_data = data->chip_data;
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
- struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
+ struct iommu_dev_data *dev_data;
+
+ if (ir_data->iommu == NULL)
+ return -EINVAL;
+
+ dev_data = search_dev_data(ir_data->iommu, irte_info->devid);
/* Note:
* This device has never been set up for guest mode.
@@ -3426,10 +3544,6 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
pi_data->is_guest_mode = false;
}
- iommu = amd_iommu_rlookup_table[irte_info->devid];
- if (iommu == NULL)
- return -EINVAL;
-
pi_data->prev_ga_tag = ir_data->cached_ga_tag;
if (pi_data->is_guest_mode) {
ir_data->ga_root_ptr = (pi_data->base >> 12);
@@ -3463,7 +3577,7 @@ static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
* Atomically updates the IRTE with the new destination, vector
* and flushes the interrupt entry cache.
*/
- iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
+ iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid,
irte_info->index, cfg->vector,
cfg->dest_apicid);
}
@@ -3475,7 +3589,7 @@ static int amd_ir_set_affinity(struct irq_data *data,
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
struct irq_cfg *cfg = irqd_cfg(data);
struct irq_data *parent = data->parent_data;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+ struct amd_iommu *iommu = ir_data->iommu;
int ret;
if (!iommu)
@@ -3545,11 +3659,11 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
!ref || !entry || !entry->lo.fields_vapic.guest_mode)
return 0;
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = ir_data->iommu;
if (!iommu)
return -ENODEV;
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return -ENODEV;
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index afb3efd565b7..6a1f02c62dff 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -51,7 +51,7 @@ struct pasid_state {
struct device_state {
struct list_head list;
- u16 devid;
+ u32 sbdf;
atomic_t count;
struct pci_dev *pdev;
struct pasid_state **states;
@@ -83,35 +83,25 @@ static struct workqueue_struct *iommu_wq;
static void free_pasid_states(struct device_state *dev_state);
-static u16 device_id(struct pci_dev *pdev)
-{
- u16 devid;
-
- devid = pdev->bus->number;
- devid = (devid << 8) | pdev->devfn;
-
- return devid;
-}
-
-static struct device_state *__get_device_state(u16 devid)
+static struct device_state *__get_device_state(u32 sbdf)
{
struct device_state *dev_state;
list_for_each_entry(dev_state, &state_list, list) {
- if (dev_state->devid == devid)
+ if (dev_state->sbdf == sbdf)
return dev_state;
}
return NULL;
}
-static struct device_state *get_device_state(u16 devid)
+static struct device_state *get_device_state(u32 sbdf)
{
struct device_state *dev_state;
unsigned long flags;
spin_lock_irqsave(&state_lock, flags);
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state != NULL)
atomic_inc(&dev_state->count);
spin_unlock_irqrestore(&state_lock, flags);
@@ -528,15 +518,16 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
unsigned long flags;
struct fault *fault;
bool finish;
- u16 tag, devid;
+ u16 tag, devid, seg_id;
int ret;
iommu_fault = data;
tag = iommu_fault->tag & 0x1ff;
finish = (iommu_fault->tag >> 9) & 1;
- devid = iommu_fault->device_id;
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ seg_id = PCI_SBDF_TO_SEGID(iommu_fault->sbdf);
+ devid = PCI_SBDF_TO_DEVID(iommu_fault->sbdf);
+ pdev = pci_get_domain_bus_and_slot(seg_id, PCI_BUS_NUM(devid),
devid & 0xff);
if (!pdev)
return -ENODEV;
@@ -550,7 +541,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
goto out;
}
- dev_state = get_device_state(iommu_fault->device_id);
+ dev_state = get_device_state(iommu_fault->sbdf);
if (dev_state == NULL)
goto out;
@@ -609,7 +600,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
struct pasid_state *pasid_state;
struct device_state *dev_state;
struct mm_struct *mm;
- u16 devid;
+ u32 sbdf;
int ret;
might_sleep();
@@ -617,8 +608,8 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
if (!amd_iommu_v2_supported())
return -ENODEV;
- devid = device_id(pdev);
- dev_state = get_device_state(devid);
+ sbdf = get_pci_sbdf_id(pdev);
+ dev_state = get_device_state(sbdf);
if (dev_state == NULL)
return -EINVAL;
@@ -692,15 +683,15 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
{
struct pasid_state *pasid_state;
struct device_state *dev_state;
- u16 devid;
+ u32 sbdf;
might_sleep();
if (!amd_iommu_v2_supported())
return;
- devid = device_id(pdev);
- dev_state = get_device_state(devid);
+ sbdf = get_pci_sbdf_id(pdev);
+ dev_state = get_device_state(sbdf);
if (dev_state == NULL)
return;
@@ -742,7 +733,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
struct iommu_group *group;
unsigned long flags;
int ret, tmp;
- u16 devid;
+ u32 sbdf;
might_sleep();
@@ -759,7 +750,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
if (pasids <= 0 || pasids > (PASID_MASK + 1))
return -EINVAL;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
if (dev_state == NULL)
@@ -768,7 +759,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
spin_lock_init(&dev_state->lock);
init_waitqueue_head(&dev_state->wq);
dev_state->pdev = pdev;
- dev_state->devid = devid;
+ dev_state->sbdf = sbdf;
tmp = pasids;
for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
@@ -786,6 +777,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
if (dev_state->domain == NULL)
goto out_free_states;
+ /* See iommu_is_default_domain() */
+ dev_state->domain->type = IOMMU_DOMAIN_IDENTITY;
amd_iommu_domain_direct_map(dev_state->domain);
ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
@@ -806,7 +799,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
spin_lock_irqsave(&state_lock, flags);
- if (__get_device_state(devid) != NULL) {
+ if (__get_device_state(sbdf) != NULL) {
spin_unlock_irqrestore(&state_lock, flags);
ret = -EBUSY;
goto out_free_domain;
@@ -838,16 +831,16 @@ void amd_iommu_free_device(struct pci_dev *pdev)
{
struct device_state *dev_state;
unsigned long flags;
- u16 devid;
+ u32 sbdf;
if (!amd_iommu_v2_supported())
return;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
spin_lock_irqsave(&state_lock, flags);
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state == NULL) {
spin_unlock_irqrestore(&state_lock, flags);
return;
@@ -867,18 +860,18 @@ int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
{
struct device_state *dev_state;
unsigned long flags;
- u16 devid;
+ u32 sbdf;
int ret;
if (!amd_iommu_v2_supported())
return -ENODEV;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
spin_lock_irqsave(&state_lock, flags);
ret = -EINVAL;
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state == NULL)
goto out_unlock;
@@ -898,18 +891,18 @@ int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
{
struct device_state *dev_state;
unsigned long flags;
- u16 devid;
+ u32 sbdf;
int ret;
if (!amd_iommu_v2_supported())
return -ENODEV;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
spin_lock_irqsave(&state_lock, flags);
ret = -EINVAL;
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state == NULL)
goto out_unlock;
diff --git a/drivers/iommu/amd/quirks.c b/drivers/iommu/amd/quirks.c
index 5120ce4fdce3..79dbb8f33b47 100644
--- a/drivers/iommu/amd/quirks.c
+++ b/drivers/iommu/amd/quirks.c
@@ -15,7 +15,7 @@
struct ivrs_quirk_entry {
u8 id;
- u16 devid;
+ u32 devid;
};
enum {
@@ -49,7 +49,7 @@ static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
const struct ivrs_quirk_entry *i;
for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
- add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
+ add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u32 *)&i->devid, 0);
return 0;
}
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 8af0242a90d9..1b1725759262 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -564,9 +564,6 @@ static void apple_dart_release_device(struct device *dev)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
- if (!cfg)
- return;
-
dev_iommu_priv_set(dev, NULL);
kfree(cfg);
}
@@ -771,7 +768,6 @@ static const struct iommu_ops apple_dart_iommu_ops = {
.of_xlate = apple_dart_of_xlate,
.def_domain_type = apple_dart_def_domain_type,
.get_resv_regions = apple_dart_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during dart probe */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 88817a3376ef..d32b02336411 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1380,12 +1380,21 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
}
-static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent)
+static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool force)
{
unsigned int i;
+ u64 val = STRTAB_STE_0_V;
+
+ if (disable_bypass && !force)
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
+ else
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) {
- arm_smmu_write_strtab_ent(NULL, -1, strtab);
+ strtab[0] = cpu_to_le64(val);
+ strtab[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
+ strtab[2] = 0;
strtab += STRTAB_STE_DWORDS;
}
}
@@ -1413,7 +1422,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return -ENOMEM;
}
- arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
+ arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false);
arm_smmu_write_strtab_l1_desc(strtab, desc);
return 0;
}
@@ -2537,6 +2546,19 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
return sid < limit;
}
+static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid)
+{
+ /* Check the SIDs are in range of the SMMU and our stream table */
+ if (!arm_smmu_sid_in_range(smmu, sid))
+ return -ERANGE;
+
+ /* Ensure l2 strtab is initialised */
+ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
+ return arm_smmu_init_l2_strtab(smmu, sid);
+
+ return 0;
+}
+
static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
struct arm_smmu_master *master)
{
@@ -2560,20 +2582,9 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
new_stream->id = sid;
new_stream->master = master;
- /*
- * Check the SIDs are in range of the SMMU and our stream table
- */
- if (!arm_smmu_sid_in_range(smmu, sid)) {
- ret = -ERANGE;
+ ret = arm_smmu_init_sid_strtab(smmu, sid);
+ if (ret)
break;
- }
-
- /* Ensure l2 strtab is initialised */
- if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
- ret = arm_smmu_init_l2_strtab(smmu, sid);
- if (ret)
- break;
- }
/* Insert into SID tree */
new_node = &(smmu->streams.rb_node);
@@ -2691,20 +2702,14 @@ err_free_master:
static void arm_smmu_release_device(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_master *master;
-
- if (!fwspec || fwspec->ops != &arm_smmu_ops)
- return;
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- master = dev_iommu_priv_get(dev);
if (WARN_ON(arm_smmu_master_sva_enabled(master)))
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
arm_smmu_detach_dev(master);
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
kfree(master);
- iommu_fwspec_free(dev);
}
static struct iommu_group *arm_smmu_device_group(struct device *dev)
@@ -2760,58 +2765,27 @@ static void arm_smmu_get_resv_regions(struct device *dev,
iommu_dma_get_resv_regions(dev, head);
}
-static bool arm_smmu_dev_has_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return false;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- return arm_smmu_master_iopf_supported(master);
- case IOMMU_DEV_FEAT_SVA:
- return arm_smmu_master_sva_supported(master);
- default:
- return false;
- }
-}
-
-static bool arm_smmu_dev_feature_enabled(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return false;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- return master->iopf_enabled;
- case IOMMU_DEV_FEAT_SVA:
- return arm_smmu_master_sva_enabled(master);
- default:
- return false;
- }
-}
-
static int arm_smmu_dev_enable_feature(struct device *dev,
enum iommu_dev_features feat)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- if (!arm_smmu_dev_has_feature(dev, feat))
+ if (!master)
return -ENODEV;
- if (arm_smmu_dev_feature_enabled(dev, feat))
- return -EBUSY;
-
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
+ if (!arm_smmu_master_iopf_supported(master))
+ return -EINVAL;
+ if (master->iopf_enabled)
+ return -EBUSY;
master->iopf_enabled = true;
return 0;
case IOMMU_DEV_FEAT_SVA:
+ if (!arm_smmu_master_sva_supported(master))
+ return -EINVAL;
+ if (arm_smmu_master_sva_enabled(master))
+ return -EBUSY;
return arm_smmu_master_enable_sva(master);
default:
return -EINVAL;
@@ -2823,16 +2797,20 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- if (!arm_smmu_dev_feature_enabled(dev, feat))
+ if (!master)
return -EINVAL;
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
+ if (!master->iopf_enabled)
+ return -EINVAL;
if (master->sva_enabled)
return -EBUSY;
master->iopf_enabled = false;
return 0;
case IOMMU_DEV_FEAT_SVA:
+ if (!arm_smmu_master_sva_enabled(master))
+ return -EINVAL;
return arm_smmu_master_disable_sva(master);
default:
return -EINVAL;
@@ -2847,9 +2825,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
- .dev_has_feat = arm_smmu_dev_has_feature,
- .dev_feat_enabled = arm_smmu_dev_feature_enabled,
.dev_enable_feat = arm_smmu_dev_enable_feature,
.dev_disable_feat = arm_smmu_dev_disable_feature,
.sva_bind = arm_smmu_sva_bind,
@@ -3049,7 +3024,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
- arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
+ arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false);
return 0;
}
@@ -3743,6 +3718,36 @@ static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
return devm_ioremap_resource(dev, &res);
}
+static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
+{
+ struct list_head rmr_list;
+ struct iommu_resv_region *e;
+
+ INIT_LIST_HEAD(&rmr_list);
+ iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+
+ list_for_each_entry(e, &rmr_list, list) {
+ __le64 *step;
+ struct iommu_iort_rmr_data *rmr;
+ int ret, i;
+
+ rmr = container_of(e, struct iommu_iort_rmr_data, rr);
+ for (i = 0; i < rmr->num_sids; i++) {
+ ret = arm_smmu_init_sid_strtab(smmu, rmr->sids[i]);
+ if (ret) {
+ dev_err(smmu->dev, "RMR SID(0x%x) bypass failed\n",
+ rmr->sids[i]);
+ continue;
+ }
+
+ step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]);
+ arm_smmu_init_bypass_stes(step, 1, true);
+ }
+ }
+
+ iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+}
+
static int arm_smmu_device_probe(struct platform_device *pdev)
{
int irq, ret;
@@ -3826,6 +3831,9 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Record our private device structure */
platform_set_drvdata(pdev, smmu);
+ /* Check for RMRs and install bypass STEs if any */
+ arm_smmu_rmr_install_bypass_ste(smmu);
+
/* Reset the device */
ret = arm_smmu_device_reset(smmu, bypass);
if (ret)
diff --git a/drivers/iommu/arm/arm-smmu/Makefile b/drivers/iommu/arm/arm-smmu/Makefile
index b0cc01aa20c9..2a5a95e8e3f9 100644
--- a/drivers/iommu/arm/arm-smmu/Makefile
+++ b/drivers/iommu/arm/arm-smmu/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o
arm_smmu-$(CONFIG_ARM_SMMU_QCOM) += arm-smmu-qcom.o
+arm_smmu-$(CONFIG_ARM_SMMU_QCOM_DEBUG) += arm-smmu-qcom-debug.o
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
new file mode 100644
index 000000000000..6eed8e67a0ca
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/of_device.h>
+#include <linux/qcom_scm.h>
+#include <linux/ratelimit.h>
+
+#include "arm-smmu.h"
+#include "arm-smmu-qcom.h"
+
+enum qcom_smmu_impl_reg_offset {
+ QCOM_SMMU_TBU_PWR_STATUS,
+ QCOM_SMMU_STATS_SYNC_INV_TBU_ACK,
+ QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR,
+};
+
+struct qcom_smmu_config {
+ const u32 *reg_offset;
+};
+
+void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
+{
+ int ret;
+ u32 tbu_pwr_status, sync_inv_ack, sync_inv_progress;
+ struct qcom_smmu *qsmmu = container_of(smmu, struct qcom_smmu, smmu);
+ const struct qcom_smmu_config *cfg;
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ if (__ratelimit(&rs)) {
+ dev_err(smmu->dev, "TLB sync timed out -- SMMU may be deadlocked\n");
+
+ cfg = qsmmu->cfg;
+ if (!cfg)
+ return;
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_TBU_PWR_STATUS],
+ &tbu_pwr_status);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TBU power status: %d\n", ret);
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_STATS_SYNC_INV_TBU_ACK],
+ &sync_inv_ack);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TBU sync/inv ack status: %d\n", ret);
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR],
+ &sync_inv_progress);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TCU syn/inv progress: %d\n", ret);
+
+ dev_err(smmu->dev,
+ "TBU: power_status %#x sync_inv_ack %#x sync_inv_progress %#x\n",
+ tbu_pwr_status, sync_inv_ack, sync_inv_progress);
+ }
+}
+
+/* Implementation Defined Register Space 0 register offsets */
+static const u32 qcom_smmu_impl0_reg_offset[] = {
+ [QCOM_SMMU_TBU_PWR_STATUS] = 0x2204,
+ [QCOM_SMMU_STATS_SYNC_INV_TBU_ACK] = 0x25dc,
+ [QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR] = 0x2670,
+};
+
+static const struct qcom_smmu_config qcm2290_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc7180_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc7280_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc8180x_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc8280xp_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm6125_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm6350_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8150_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8250_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8350_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8450_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct of_device_id __maybe_unused qcom_smmu_impl_debug_match[] = {
+ { .compatible = "qcom,msm8998-smmu-v2" },
+ { .compatible = "qcom,qcm2290-smmu-500", .data = &qcm2290_smmu_cfg },
+ { .compatible = "qcom,sc7180-smmu-500", .data = &sc7180_smmu_cfg },
+ { .compatible = "qcom,sc7280-smmu-500", .data = &sc7280_smmu_cfg},
+ { .compatible = "qcom,sc8180x-smmu-500", .data = &sc8180x_smmu_cfg },
+ { .compatible = "qcom,sc8280xp-smmu-500", .data = &sc8280xp_smmu_cfg },
+ { .compatible = "qcom,sdm630-smmu-v2" },
+ { .compatible = "qcom,sdm845-smmu-500" },
+ { .compatible = "qcom,sm6125-smmu-500", .data = &sm6125_smmu_cfg},
+ { .compatible = "qcom,sm6350-smmu-500", .data = &sm6350_smmu_cfg},
+ { .compatible = "qcom,sm8150-smmu-500", .data = &sm8150_smmu_cfg },
+ { .compatible = "qcom,sm8250-smmu-500", .data = &sm8250_smmu_cfg },
+ { .compatible = "qcom,sm8350-smmu-500", .data = &sm8350_smmu_cfg },
+ { .compatible = "qcom,sm8450-smmu-500", .data = &sm8450_smmu_cfg },
+ { }
+};
+
+const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu)
+{
+ const struct of_device_id *match;
+ const struct device_node *np = smmu->dev->of_node;
+
+ match = of_match_node(qcom_smmu_impl_debug_match, np);
+ if (!match)
+ return NULL;
+
+ return match->data;
+}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 7820711c4560..b2708de25ea3 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -5,23 +5,40 @@
#include <linux/acpi.h>
#include <linux/adreno-smmu-priv.h>
+#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/qcom_scm.h>
#include "arm-smmu.h"
+#include "arm-smmu-qcom.h"
-struct qcom_smmu {
- struct arm_smmu_device smmu;
- bool bypass_quirk;
- u8 bypass_cbndx;
- u32 stall_enabled;
-};
+#define QCOM_DUMMY_VAL -1
static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
{
return container_of(smmu, struct qcom_smmu, smmu);
}
+static void qcom_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
+ int sync, int status)
+{
+ unsigned int spin_cnt, delay;
+ u32 reg;
+
+ arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
+ for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
+ for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
+ reg = arm_smmu_readl(smmu, page, status);
+ if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
+ return;
+ cpu_relax();
+ }
+ udelay(delay);
+ }
+
+ qcom_smmu_tlb_sync_debug(smmu);
+}
+
static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx,
u32 reg)
{
@@ -233,6 +250,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sc7280-mdss" },
{ .compatible = "qcom,sc7280-mss-pil" },
{ .compatible = "qcom,sc8180x-mdss" },
+ { .compatible = "qcom,sm8250-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },
{ }
@@ -374,6 +392,7 @@ static const struct arm_smmu_impl qcom_smmu_impl = {
.def_domain_type = qcom_smmu_def_domain_type,
.reset = qcom_smmu500_reset,
.write_s2cr = qcom_smmu_write_s2cr,
+ .tlb_sync = qcom_smmu_tlb_sync,
};
static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
@@ -382,6 +401,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
.reset = qcom_smmu500_reset,
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
+ .tlb_sync = qcom_smmu_tlb_sync,
};
static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
@@ -398,6 +418,7 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
return ERR_PTR(-ENOMEM);
qsmmu->smmu.impl = impl;
+ qsmmu->cfg = qcom_smmu_impl_data(smmu);
return &qsmmu->smmu;
}
@@ -413,6 +434,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sdm845-smmu-500" },
{ .compatible = "qcom,sm6125-smmu-500" },
{ .compatible = "qcom,sm6350-smmu-500" },
+ { .compatible = "qcom,sm6375-smmu-500" },
{ .compatible = "qcom,sm8150-smmu-500" },
{ .compatible = "qcom,sm8250-smmu-500" },
{ .compatible = "qcom,sm8350-smmu-500" },
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
new file mode 100644
index 000000000000..99ec8f8629a0
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ARM_SMMU_QCOM_H
+#define _ARM_SMMU_QCOM_H
+
+struct qcom_smmu {
+ struct arm_smmu_device smmu;
+ const struct qcom_smmu_config *cfg;
+ bool bypass_quirk;
+ u8 bypass_cbndx;
+ u32 stall_enabled;
+};
+
+#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
+void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu);
+const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu);
+#else
+static inline void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { }
+static inline const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu)
+{
+ return NULL;
+}
+#endif
+
+#endif /* _ARM_SMMU_QCOM_H */
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 2ed3594f384e..dfa82df00342 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -1432,27 +1432,19 @@ out_free:
static void arm_smmu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_master_cfg *cfg;
- struct arm_smmu_device *smmu;
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
int ret;
- if (!fwspec || fwspec->ops != &arm_smmu_ops)
- return;
-
- cfg = dev_iommu_priv_get(dev);
- smmu = cfg->smmu;
-
- ret = arm_smmu_rpm_get(smmu);
+ ret = arm_smmu_rpm_get(cfg->smmu);
if (ret < 0)
return;
arm_smmu_master_free_smes(cfg, fwspec);
- arm_smmu_rpm_put(smmu);
+ arm_smmu_rpm_put(cfg->smmu);
dev_iommu_priv_set(dev, NULL);
kfree(cfg);
- iommu_fwspec_free(dev);
}
static void arm_smmu_probe_finalize(struct device *dev)
@@ -1592,7 +1584,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
@@ -2071,10 +2062,57 @@ err_reset_platform_ops: __maybe_unused;
return err;
}
+static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
+{
+ struct list_head rmr_list;
+ struct iommu_resv_region *e;
+ int idx, cnt = 0;
+ u32 reg;
+
+ INIT_LIST_HEAD(&rmr_list);
+ iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+
+ /*
+ * Rather than trying to look at existing mappings that
+ * are setup by the firmware and then invalidate the ones
+ * that do no have matching RMR entries, just disable the
+ * SMMU until it gets enabled again in the reset routine.
+ */
+ reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
+ reg |= ARM_SMMU_sCR0_CLIENTPD;
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
+
+ list_for_each_entry(e, &rmr_list, list) {
+ struct iommu_iort_rmr_data *rmr;
+ int i;
+
+ rmr = container_of(e, struct iommu_iort_rmr_data, rr);
+ for (i = 0; i < rmr->num_sids; i++) {
+ idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0);
+ if (idx < 0)
+ continue;
+
+ if (smmu->s2crs[idx].count == 0) {
+ smmu->smrs[idx].id = rmr->sids[i];
+ smmu->smrs[idx].mask = 0;
+ smmu->smrs[idx].valid = true;
+ }
+ smmu->s2crs[idx].count++;
+ smmu->s2crs[idx].type = S2CR_TYPE_BYPASS;
+ smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
+
+ cnt++;
+ }
+ }
+
+ dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
+ cnt == 1 ? "" : "s");
+ iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+}
+
static int arm_smmu_device_probe(struct platform_device *pdev)
{
struct resource *res;
- resource_size_t ioaddr;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
@@ -2098,7 +2136,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
- ioaddr = res->start;
+ smmu->ioaddr = res->start;
+
/*
* The resource size should effectively match the value of SMMU_TOP;
* stash that temporarily until we know PAGESIZE to validate it with.
@@ -2178,7 +2217,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
}
err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
- "smmu.%pa", &ioaddr);
+ "smmu.%pa", &smmu->ioaddr);
if (err) {
dev_err(dev, "Failed to register iommu in sysfs\n");
return err;
@@ -2191,6 +2230,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, smmu);
+
+ /* Check for RMRs and install bypass SMRs if any */
+ arm_smmu_rmr_install_bypass_smr(smmu);
+
arm_smmu_device_reset(smmu);
arm_smmu_test_smr_masks(smmu);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index 2b9b42fb6f30..703fd5817ec1 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -278,6 +278,7 @@ struct arm_smmu_device {
struct device *dev;
void __iomem *base;
+ phys_addr_t ioaddr;
unsigned int numpage;
unsigned int pgshift;
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 4c077c38fbd6..17235116d3bb 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -532,16 +532,6 @@ static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
return &qcom_iommu->iommu;
}
-static void qcom_iommu_release_device(struct device *dev)
-{
- struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
-
- if (!qcom_iommu)
- return;
-
- iommu_fwspec_free(dev);
-}
-
static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
struct qcom_iommu_dev *qcom_iommu;
@@ -591,7 +581,6 @@ static const struct iommu_ops qcom_iommu_ops = {
.capable = qcom_iommu_capable,
.domain_alloc = qcom_iommu_domain_alloc,
.probe_device = qcom_iommu_probe_device,
- .release_device = qcom_iommu_release_device,
.device_group = generic_device_group,
.of_xlate = qcom_iommu_of_xlate,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
@@ -750,9 +739,12 @@ static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
{
struct device_node *child;
- for_each_child_of_node(qcom_iommu->dev->of_node, child)
- if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec"))
+ for_each_child_of_node(qcom_iommu->dev->of_node, child) {
+ if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) {
+ of_node_put(child);
return true;
+ }
+ }
return false;
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f90251572a5d..17dd683b2fce 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -21,6 +21,7 @@
#include <linux/iova.h>
#include <linux/irq.h>
#include <linux/list_sort.h>
+#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@@ -64,6 +65,7 @@ struct iommu_dma_cookie {
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain *fq_domain;
+ struct mutex mutex;
};
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
@@ -310,6 +312,7 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
if (!domain->iova_cookie)
return -ENOMEM;
+ mutex_init(&domain->iova_cookie->mutex);
return 0;
}
@@ -385,7 +388,7 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{
if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
- iort_iommu_msi_get_resv_regions(dev, list);
+ iort_iommu_get_resv_regions(dev, list);
}
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
@@ -560,26 +563,33 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
}
/* start_pfn is always nonzero for an already-initialised domain */
+ mutex_lock(&cookie->mutex);
if (iovad->start_pfn) {
if (1UL << order != iovad->granule ||
base_pfn != iovad->start_pfn) {
pr_warn("Incompatible range for DMA domain\n");
- return -EFAULT;
+ ret = -EFAULT;
+ goto done_unlock;
}
- return 0;
+ ret = 0;
+ goto done_unlock;
}
init_iova_domain(iovad, 1UL << order, base_pfn);
ret = iova_domain_init_rcaches(iovad);
if (ret)
- return ret;
+ goto done_unlock;
/* If the FQ fails we can simply fall back to strict mode */
if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
domain->type = IOMMU_DOMAIN_DMA;
- return iova_reserve_iommu_regions(dev, domain);
+ ret = iova_reserve_iommu_regions(dev, domain);
+
+done_unlock:
+ mutex_unlock(&cookie->mutex);
+ return ret;
}
/**
@@ -1053,15 +1063,30 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
for_each_sg(sg, s, nents, i) {
/* Restore this segment's original unaligned fields first */
+ dma_addr_t s_dma_addr = sg_dma_address(s);
unsigned int s_iova_off = sg_dma_address(s);
unsigned int s_length = sg_dma_len(s);
unsigned int s_iova_len = s->length;
- s->offset += s_iova_off;
- s->length = s_length;
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
+ if (sg_is_dma_bus_address(s)) {
+ if (i > 0)
+ cur = sg_next(cur);
+
+ sg_dma_unmark_bus_address(s);
+ sg_dma_address(cur) = s_dma_addr;
+ sg_dma_len(cur) = s_length;
+ sg_dma_mark_bus_address(cur);
+ count++;
+ cur_len = 0;
+ continue;
+ }
+
+ s->offset += s_iova_off;
+ s->length = s_length;
+
/*
* Now fill in the real DMA data. If...
* - there is a valid output segment to append to
@@ -1102,10 +1127,14 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
int i;
for_each_sg(sg, s, nents, i) {
- if (sg_dma_address(s) != DMA_MAPPING_ERROR)
- s->offset += sg_dma_address(s);
- if (sg_dma_len(s))
- s->length = sg_dma_len(s);
+ if (sg_is_dma_bus_address(s)) {
+ sg_dma_unmark_bus_address(s);
+ } else {
+ if (sg_dma_address(s) != DMA_MAPPING_ERROR)
+ s->offset += sg_dma_address(s);
+ if (sg_dma_len(s))
+ s->length = sg_dma_len(s);
+ }
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
}
@@ -1158,6 +1187,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
+ struct pci_p2pdma_map_state p2pdma_state = {};
+ enum pci_p2pdma_map_type map;
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
@@ -1187,6 +1218,30 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
size_t s_length = s->length;
size_t pad_len = (mask - iova_len + 1) & mask;
+ if (is_pci_p2pdma_page(sg_page(s))) {
+ map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
+ switch (map) {
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ /*
+ * iommu_map_sg() will skip this segment as
+ * it is marked as a bus address,
+ * __finalise_sg() will copy the dma address
+ * into the output segment.
+ */
+ continue;
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ /*
+ * Mapping through host bridge should be
+ * mapped with regular IOVAs, thus we
+ * do nothing here and continue below.
+ */
+ break;
+ default:
+ ret = -EREMOTEIO;
+ goto out_restore_sg;
+ }
+ }
+
sg_dma_address(s) = s_iova_off;
sg_dma_len(s) = s_length;
s->offset -= s_iova_off;
@@ -1215,6 +1270,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}
+ if (!iova_len)
+ return __finalise_sg(dev, sg, nents, 0);
+
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova) {
ret = -ENOMEM;
@@ -1236,7 +1294,7 @@ out_free_iova:
out_restore_sg:
__invalidate_sg(sg, nents);
out:
- if (ret != -ENOMEM)
+ if (ret != -ENOMEM && ret != -EREMOTEIO)
return -EINVAL;
return ret;
}
@@ -1244,7 +1302,7 @@ out:
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
- dma_addr_t start, end;
+ dma_addr_t end = 0, start;
struct scatterlist *tmp;
int i;
@@ -1258,16 +1316,37 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
/*
* The scatterlist segments are mapped into a single
- * contiguous IOVA allocation, so this is incredibly easy.
+ * contiguous IOVA allocation, the start and end points
+ * just have to be determined.
*/
- start = sg_dma_address(sg);
- for_each_sg(sg_next(sg), tmp, nents - 1, i) {
+ for_each_sg(sg, tmp, nents, i) {
+ if (sg_is_dma_bus_address(tmp)) {
+ sg_dma_unmark_bus_address(tmp);
+ continue;
+ }
+
+ if (sg_dma_len(tmp) == 0)
+ break;
+
+ start = sg_dma_address(tmp);
+ break;
+ }
+
+ nents -= i;
+ for_each_sg(tmp, tmp, nents, i) {
+ if (sg_is_dma_bus_address(tmp)) {
+ sg_dma_unmark_bus_address(tmp);
+ continue;
+ }
+
if (sg_dma_len(tmp) == 0)
break;
- sg = tmp;
+
+ end = sg_dma_address(tmp) + sg_dma_len(tmp);
}
- end = sg_dma_address(sg) + sg_dma_len(sg);
- __iommu_dma_unmap(dev, start, end - start);
+
+ if (end)
+ __iommu_dma_unmap(dev, start, end - start);
}
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
@@ -1459,7 +1538,13 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
}
+static size_t iommu_dma_opt_mapping_size(void)
+{
+ return iova_rcache_range();
+}
+
static const struct dma_map_ops iommu_dma_ops = {
+ .flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = iommu_dma_alloc,
.free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages,
@@ -1479,6 +1564,7 @@ static const struct dma_map_ops iommu_dma_ops = {
.map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource,
.get_merge_boundary = iommu_dma_get_merge_boundary,
+ .opt_mapping_size = iommu_dma_opt_mapping_size,
};
/*
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 71f2018e23fe..8e18984a0c4f 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -135,6 +135,11 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
+#define CTRL_VM_ENABLE BIT(0)
+#define CTRL_VM_FAULT_MODE_STALL BIT(3)
+#define CAPA0_CAPA1_EXIST BIT(11)
+#define CAPA1_VCR_ENABLED BIT(14)
+
/* common registers */
#define REG_MMU_CTRL 0x000
#define REG_MMU_CFG 0x004
@@ -148,29 +153,20 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
/* v1.x - v3.x registers */
-#define REG_MMU_FLUSH 0x00C
-#define REG_MMU_FLUSH_ENTRY 0x010
-#define REG_PT_BASE_ADDR 0x014
-#define REG_INT_STATUS 0x018
-#define REG_INT_CLEAR 0x01C
-
#define REG_PAGE_FAULT_ADDR 0x024
#define REG_AW_FAULT_ADDR 0x028
#define REG_AR_FAULT_ADDR 0x02C
#define REG_DEFAULT_SLAVE_ADDR 0x030
/* v5.x registers */
-#define REG_V5_PT_BASE_PFN 0x00C
-#define REG_V5_MMU_FLUSH_ALL 0x010
-#define REG_V5_MMU_FLUSH_ENTRY 0x014
-#define REG_V5_MMU_FLUSH_RANGE 0x018
-#define REG_V5_MMU_FLUSH_START 0x020
-#define REG_V5_MMU_FLUSH_END 0x024
-#define REG_V5_INT_STATUS 0x060
-#define REG_V5_INT_CLEAR 0x064
#define REG_V5_FAULT_AR_VA 0x070
#define REG_V5_FAULT_AW_VA 0x080
+/* v7.x registers */
+#define REG_V7_CAPA0 0x870
+#define REG_V7_CAPA1 0x874
+#define REG_V7_CTRL_VM 0x8000
+
#define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL)
static struct device *dma_dev;
@@ -251,6 +247,21 @@ struct exynos_iommu_domain {
};
/*
+ * SysMMU version specific data. Contains offsets for the registers which can
+ * be found in different SysMMU variants, but have different offset values.
+ */
+struct sysmmu_variant {
+ u32 pt_base; /* page table base address (physical) */
+ u32 flush_all; /* invalidate all TLB entries */
+ u32 flush_entry; /* invalidate specific TLB entry */
+ u32 flush_range; /* invalidate TLB entries in specified range */
+ u32 flush_start; /* start address of range invalidation */
+ u32 flush_end; /* end address of range invalidation */
+ u32 int_status; /* interrupt status information */
+ u32 int_clear; /* clear the interrupt */
+};
+
+/*
* This structure hold all data of a single SYSMMU controller, this includes
* hw resources like registers and clocks, pointers and list nodes to connect
* it to all other structures, internal state and parameters read from device
@@ -274,6 +285,45 @@ struct sysmmu_drvdata {
unsigned int version; /* our version */
struct iommu_device iommu; /* IOMMU core handle */
+ const struct sysmmu_variant *variant; /* version specific data */
+
+ /* v7 fields */
+ bool has_vcr; /* virtual machine control register */
+};
+
+#define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
+
+/* SysMMU v1..v3 */
+static const struct sysmmu_variant sysmmu_v1_variant = {
+ .flush_all = 0x0c,
+ .flush_entry = 0x10,
+ .pt_base = 0x14,
+ .int_status = 0x18,
+ .int_clear = 0x1c,
+};
+
+/* SysMMU v5 and v7 (non-VM capable) */
+static const struct sysmmu_variant sysmmu_v5_variant = {
+ .pt_base = 0x0c,
+ .flush_all = 0x10,
+ .flush_entry = 0x14,
+ .flush_range = 0x18,
+ .flush_start = 0x20,
+ .flush_end = 0x24,
+ .int_status = 0x60,
+ .int_clear = 0x64,
+};
+
+/* SysMMU v7: VM capable register set */
+static const struct sysmmu_variant sysmmu_v7_vm_variant = {
+ .pt_base = 0x800c,
+ .flush_all = 0x8010,
+ .flush_entry = 0x8014,
+ .flush_range = 0x8018,
+ .flush_start = 0x8020,
+ .flush_end = 0x8024,
+ .int_status = 0x60,
+ .int_clear = 0x64,
};
static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
@@ -304,10 +354,7 @@ static bool sysmmu_block(struct sysmmu_drvdata *data)
static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
{
- if (MMU_MAJ_VER(data->version) < 5)
- writel(0x1, data->sfrbase + REG_MMU_FLUSH);
- else
- writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
+ writel(0x1, SYSMMU_REG(data, flush_all));
}
static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
@@ -315,34 +362,30 @@ static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
{
unsigned int i;
- if (MMU_MAJ_VER(data->version) < 5) {
+ if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) {
for (i = 0; i < num_inv; i++) {
writel((iova & SPAGE_MASK) | 1,
- data->sfrbase + REG_MMU_FLUSH_ENTRY);
+ SYSMMU_REG(data, flush_entry));
iova += SPAGE_SIZE;
}
} else {
- if (num_inv == 1) {
- writel((iova & SPAGE_MASK) | 1,
- data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
- } else {
- writel((iova & SPAGE_MASK),
- data->sfrbase + REG_V5_MMU_FLUSH_START);
- writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
- data->sfrbase + REG_V5_MMU_FLUSH_END);
- writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
- }
+ writel(iova & SPAGE_MASK, SYSMMU_REG(data, flush_start));
+ writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
+ SYSMMU_REG(data, flush_end));
+ writel(0x1, SYSMMU_REG(data, flush_range));
}
}
static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
{
+ u32 pt_base;
+
if (MMU_MAJ_VER(data->version) < 5)
- writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
+ pt_base = pgd;
else
- writel(pgd >> PAGE_SHIFT,
- data->sfrbase + REG_V5_PT_BASE_PFN);
+ pt_base = pgd >> SPAGE_ORDER;
+ writel(pt_base, SYSMMU_REG(data, pt_base));
__sysmmu_tlb_invalidate(data);
}
@@ -362,6 +405,20 @@ static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
clk_disable_unprepare(data->clk_master);
}
+static bool __sysmmu_has_capa1(struct sysmmu_drvdata *data)
+{
+ u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0);
+
+ return capa0 & CAPA0_CAPA1_EXIST;
+}
+
+static void __sysmmu_get_vcr(struct sysmmu_drvdata *data)
+{
+ u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1);
+
+ data->has_vcr = capa1 & CAPA1_VCR_ENABLED;
+}
+
static void __sysmmu_get_version(struct sysmmu_drvdata *data)
{
u32 ver;
@@ -379,6 +436,19 @@ static void __sysmmu_get_version(struct sysmmu_drvdata *data)
dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
+ if (MMU_MAJ_VER(data->version) < 5) {
+ data->variant = &sysmmu_v1_variant;
+ } else if (MMU_MAJ_VER(data->version) < 7) {
+ data->variant = &sysmmu_v5_variant;
+ } else {
+ if (__sysmmu_has_capa1(data))
+ __sysmmu_get_vcr(data);
+ if (data->has_vcr)
+ data->variant = &sysmmu_v7_vm_variant;
+ else
+ data->variant = &sysmmu_v5_variant;
+ }
+
__sysmmu_disable_clocks(data);
}
@@ -406,19 +476,14 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
const struct sysmmu_fault_info *finfo;
unsigned int i, n, itype;
sysmmu_iova_t fault_addr;
- unsigned short reg_status, reg_clear;
int ret = -ENOSYS;
WARN_ON(!data->active);
if (MMU_MAJ_VER(data->version) < 5) {
- reg_status = REG_INT_STATUS;
- reg_clear = REG_INT_CLEAR;
finfo = sysmmu_faults;
n = ARRAY_SIZE(sysmmu_faults);
} else {
- reg_status = REG_V5_INT_STATUS;
- reg_clear = REG_V5_INT_CLEAR;
finfo = sysmmu_v5_faults;
n = ARRAY_SIZE(sysmmu_v5_faults);
}
@@ -427,7 +492,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
clk_enable(data->clk_master);
- itype = __ffs(readl(data->sfrbase + reg_status));
+ itype = __ffs(readl(SYSMMU_REG(data, int_status)));
for (i = 0; i < n; i++, finfo++)
if (finfo->bit == itype)
break;
@@ -444,7 +509,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
/* fault is not recovered by fault handler */
BUG_ON(ret != 0);
- writel(1 << itype, data->sfrbase + reg_clear);
+ writel(1 << itype, SYSMMU_REG(data, int_clear));
sysmmu_unblock(data);
@@ -486,6 +551,18 @@ static void __sysmmu_init_config(struct sysmmu_drvdata *data)
writel(cfg, data->sfrbase + REG_MMU_CFG);
}
+static void __sysmmu_enable_vid(struct sysmmu_drvdata *data)
+{
+ u32 ctrl;
+
+ if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr)
+ return;
+
+ ctrl = readl(data->sfrbase + REG_V7_CTRL_VM);
+ ctrl |= CTRL_VM_ENABLE | CTRL_VM_FAULT_MODE_STALL;
+ writel(ctrl, data->sfrbase + REG_V7_CTRL_VM);
+}
+
static void __sysmmu_enable(struct sysmmu_drvdata *data)
{
unsigned long flags;
@@ -496,6 +573,7 @@ static void __sysmmu_enable(struct sysmmu_drvdata *data)
writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
__sysmmu_init_config(data);
__sysmmu_set_ptbase(data, data->pgtable);
+ __sysmmu_enable_vid(data);
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
data->active = true;
spin_unlock_irqrestore(&data->lock, flags);
@@ -551,7 +629,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
* 64KB page can be one of 16 consecutive sets.
*/
if (MMU_MAJ_VER(data->version) == 2)
- num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
+ num_inv = min_t(unsigned int, size / SPAGE_SIZE, 64);
if (sysmmu_block(data)) {
__sysmmu_tlb_invalidate_entry(data, iova, num_inv);
@@ -623,6 +701,8 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
data->sysmmu = dev;
spin_lock_init(&data->lock);
+ __sysmmu_get_version(data);
+
ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
dev_name(data->sysmmu));
if (ret)
@@ -630,11 +710,10 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
if (ret)
- return ret;
+ goto err_iommu_register;
platform_set_drvdata(pdev, data);
- __sysmmu_get_version(data);
if (PG_ENT_SHIFT < 0) {
if (MMU_MAJ_VER(data->version) < 5) {
PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
@@ -647,6 +726,14 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
}
}
+ if (MMU_MAJ_VER(data->version) >= 5) {
+ ret = dma_set_mask(dev, DMA_BIT_MASK(36));
+ if (ret) {
+ dev_err(dev, "Unable to set DMA mask: %d\n", ret);
+ goto err_dma_set_mask;
+ }
+ }
+
/*
* use the first registered sysmmu device for performing
* dma mapping operations on iommu page tables (cpu cache flush)
@@ -657,6 +744,12 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
return 0;
+
+err_dma_set_mask:
+ iommu_device_unregister(&data->iommu);
+err_iommu_register:
+ iommu_device_sysfs_remove(&data->iommu);
+ return ret;
}
static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
@@ -1251,9 +1344,6 @@ static void exynos_iommu_release_device(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
- if (!has_sysmmu(dev))
- return;
-
if (owner->domain) {
struct iommu_group *group = iommu_group_get(dev);
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 94b4589dc67c..011f9ab7f743 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -447,15 +447,10 @@ static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
return &pamu_iommu;
}
-static void fsl_pamu_release_device(struct device *dev)
-{
-}
-
static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable,
.domain_alloc = fsl_pamu_domain_alloc,
.probe_device = fsl_pamu_probe_device,
- .release_device = fsl_pamu_release_device,
.device_group = fsl_pamu_device_group,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = fsl_pamu_attach_device,
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 51bd66a45a11..e190bb8c225c 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -68,7 +68,6 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
{
struct irq_alloc_info *info = arg;
struct irq_data *irq_data;
- struct irq_desc *desc;
int ret = 0;
if (!info || info->type != X86_IRQ_ALLOC_TYPE_IOAPIC || nr_irqs > 1)
@@ -90,8 +89,7 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
* Hypver-V IO APIC irq affinity should be in the scope of
* ioapic_max_cpumask because no irq remapping support.
*/
- desc = irq_data_to_desc(irq_data);
- cpumask_copy(desc->irq_common_data.affinity, &ioapic_max_cpumask);
+ irq_data_update_affinity(irq_data, &ioapic_max_cpumask);
return 0;
}
diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c
index 71596fc62822..3ee68393122f 100644
--- a/drivers/iommu/intel/cap_audit.c
+++ b/drivers/iommu/intel/cap_audit.c
@@ -10,7 +10,7 @@
#define pr_fmt(fmt) "DMAR: " fmt
-#include <linux/intel-iommu.h>
+#include "iommu.h"
#include "cap_audit.h"
static u64 intel_iommu_cap_sanity;
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index ed796eea4581..1f925285104e 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -10,11 +10,11 @@
#include <linux/debugfs.h>
#include <linux/dmar.h>
-#include <linux/intel-iommu.h>
#include <linux/pci.h>
#include <asm/irq_remapping.h>
+#include "iommu.h"
#include "pasid.h"
#include "perf.h"
@@ -263,10 +263,9 @@ static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
{
- unsigned long flags;
u16 bus;
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
(u64)virt_to_phys(iommu->root_entry));
seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
@@ -278,8 +277,7 @@ static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
*/
for (bus = 0; bus < 256; bus++)
ctx_tbl_walk(m, iommu, bus);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
}
static int dmar_translation_struct_show(struct seq_file *m, void *unused)
@@ -342,13 +340,13 @@ static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
}
}
-static int show_device_domain_translation(struct device *dev, void *data)
+static int __show_device_domain_translation(struct device *dev, void *data)
{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct dmar_domain *domain = info->domain;
+ struct dmar_domain *domain;
struct seq_file *m = data;
u64 path[6] = { 0 };
+ domain = to_dmar_domain(iommu_get_domain_for_dev(dev));
if (!domain)
return 0;
@@ -359,20 +357,39 @@ static int show_device_domain_translation(struct device *dev, void *data)
pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
seq_putc(m, '\n');
- return 0;
+ /* Don't iterate */
+ return 1;
}
-static int domain_translation_struct_show(struct seq_file *m, void *unused)
+static int show_device_domain_translation(struct device *dev, void *data)
{
- unsigned long flags;
- int ret;
+ struct iommu_group *group;
- spin_lock_irqsave(&device_domain_lock, flags);
- ret = bus_for_each_dev(&pci_bus_type, NULL, m,
- show_device_domain_translation);
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ group = iommu_group_get(dev);
+ if (group) {
+ /*
+ * The group->mutex is held across the callback, which will
+ * block calls to iommu_attach/detach_group/device. Hence,
+ * the domain of the device will not change during traversal.
+ *
+ * All devices in an iommu group share a single domain, hence
+ * we only dump the domain of the first device. Even though,
+ * this code still possibly races with the iommu_unmap()
+ * interface. This could be solved by RCU-freeing the page
+ * table pages in the iommu_unmap() path.
+ */
+ iommu_group_for_each_dev(group, data,
+ __show_device_domain_translation);
+ iommu_group_put(group);
+ }
- return ret;
+ return 0;
+}
+
+static int domain_translation_struct_show(struct seq_file *m, void *unused)
+{
+ return bus_for_each_dev(&pci_bus_type, NULL, m,
+ show_device_domain_translation);
}
DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 9699ca101c62..5a8f780e7ffd 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -19,7 +19,6 @@
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/iova.h>
-#include <linux/intel-iommu.h>
#include <linux/timer.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -30,10 +29,11 @@
#include <linux/numa.h>
#include <linux/limits.h>
#include <asm/irq_remapping.h>
-#include <trace/events/intel_iommu.h>
+#include "iommu.h"
#include "../irq_remapping.h"
#include "perf.h"
+#include "trace.h"
typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
struct dmar_res_callback {
@@ -60,7 +60,7 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static int dmar_dev_scope_status = 1;
-static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
+static DEFINE_IDA(dmar_seq_ids);
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
@@ -494,7 +494,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
if (drhd->reg_base_addr == rhsa->base_address) {
int node = pxm_to_node(rhsa->proximity_domain);
- if (!node_online(node))
+ if (node != NUMA_NO_NODE && !node_online(node))
node = NUMA_NO_NODE;
drhd->iommu->node = node;
return 0;
@@ -1023,28 +1023,6 @@ out:
return err;
}
-static int dmar_alloc_seq_id(struct intel_iommu *iommu)
-{
- iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
- DMAR_UNITS_SUPPORTED);
- if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
- iommu->seq_id = -1;
- } else {
- set_bit(iommu->seq_id, dmar_seq_ids);
- sprintf(iommu->name, "dmar%d", iommu->seq_id);
- }
-
- return iommu->seq_id;
-}
-
-static void dmar_free_seq_id(struct intel_iommu *iommu)
-{
- if (iommu->seq_id >= 0) {
- clear_bit(iommu->seq_id, dmar_seq_ids);
- iommu->seq_id = -1;
- }
-}
-
static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
@@ -1062,11 +1040,14 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (!iommu)
return -ENOMEM;
- if (dmar_alloc_seq_id(iommu) < 0) {
+ iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0,
+ DMAR_UNITS_SUPPORTED - 1, GFP_KERNEL);
+ if (iommu->seq_id < 0) {
pr_err("Failed to allocate seq_id\n");
- err = -ENOSPC;
+ err = iommu->seq_id;
goto error;
}
+ sprintf(iommu->name, "dmar%d", iommu->seq_id);
err = map_iommu(iommu, drhd->reg_base_addr);
if (err) {
@@ -1150,7 +1131,7 @@ err_sysfs:
err_unmap:
unmap_iommu(iommu);
error_free_seq_id:
- dmar_free_seq_id(iommu);
+ ida_free(&dmar_seq_ids, iommu->seq_id);
error:
kfree(iommu);
return err;
@@ -1183,7 +1164,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg)
unmap_iommu(iommu);
- dmar_free_seq_id(iommu);
+ ida_free(&dmar_seq_ids, iommu->seq_id);
kfree(iommu);
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 5c0dce78586a..31bc50e538a3 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -17,7 +17,6 @@
#include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
#include <linux/dmi.h>
-#include <linux/intel-iommu.h>
#include <linux/intel-svm.h>
#include <linux/memory.h>
#include <linux/pci.h>
@@ -26,6 +25,7 @@
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
+#include "iommu.h"
#include "../irq_remapping.h"
#include "../iommu-sva-lib.h"
#include "pasid.h"
@@ -126,13 +126,8 @@ static inline unsigned long virt_to_dma_pfn(void *p)
return page_to_dma_pfn(virt_to_page(p));
}
-/* global iommu list, set NULL for ignored DMAR units */
-static struct intel_iommu **g_iommus;
-
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
-static inline struct device_domain_info *
-dmar_search_domain_by_dev_info(int segment, int bus, int devfn);
/*
* set to 1 to panic kernel if can't successfully enable VT-d
@@ -168,38 +163,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
return re->hi & VTD_PAGE_MASK;
}
-static inline void context_clear_pasid_enable(struct context_entry *context)
-{
- context->lo &= ~(1ULL << 11);
-}
-
-static inline bool context_pasid_enabled(struct context_entry *context)
-{
- return !!(context->lo & (1ULL << 11));
-}
-
-static inline void context_set_copied(struct context_entry *context)
-{
- context->hi |= (1ull << 3);
-}
-
-static inline bool context_copied(struct context_entry *context)
-{
- return !!(context->hi & (1ULL << 3));
-}
-
-static inline bool __context_present(struct context_entry *context)
-{
- return (context->lo & 1);
-}
-
-bool context_present(struct context_entry *context)
-{
- return context_pasid_enabled(context) ?
- __context_present(context) :
- __context_present(context) && !context_copied(context);
-}
-
static inline void context_set_present(struct context_entry *context)
{
context->lo |= 1;
@@ -247,6 +210,26 @@ static inline void context_clear_entry(struct context_entry *context)
context->hi = 0;
}
+static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ if (!iommu->copied_tables)
+ return false;
+
+ return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
/*
* This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory.
@@ -256,10 +239,6 @@ static inline void context_clear_entry(struct context_entry *context)
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
-#define for_each_domain_iommu(idx, domain) \
- for (idx = 0; idx < g_num_of_iommus; idx++) \
- if (domain->iommu_refcnt[idx])
-
struct dmar_rmrr_unit {
struct list_head list; /* list of rmrr units */
struct acpi_dmar_header *hdr; /* ACPI header */
@@ -293,12 +272,7 @@ static LIST_HEAD(dmar_satc_units);
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
-/* bitmap for indexing intel_iommus */
-static int g_num_of_iommus;
-
-static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct device *dev);
-static void __dmar_remove_one_dev_info(struct device_domain_info *info);
int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON);
int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
@@ -314,12 +288,6 @@ static int iommu_skip_te_disable;
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
-int intel_iommu_gfx_mapped;
-EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
-
-DEFINE_SPINLOCK(device_domain_lock);
-static LIST_HEAD(device_domain_list);
-
const struct iommu_ops intel_iommu_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
@@ -422,14 +390,36 @@ static inline int domain_pfn_supported(struct dmar_domain *domain,
return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
}
+/*
+ * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
+ * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
+ * the returned SAGAW.
+ */
+static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
+{
+ unsigned long fl_sagaw, sl_sagaw;
+
+ fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0);
+ sl_sagaw = cap_sagaw(iommu->cap);
+
+ /* Second level only. */
+ if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
+ return sl_sagaw;
+
+ /* First level only. */
+ if (!ecap_slts(iommu->ecap))
+ return fl_sagaw;
+
+ return fl_sagaw & sl_sagaw;
+}
+
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
int agaw;
- sagaw = cap_sagaw(iommu->cap);
- for (agaw = width_to_agaw(max_gaw);
- agaw >= 0; agaw--) {
+ sagaw = __iommu_calculate_sagaw(iommu);
+ for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) {
if (test_bit(agaw, &sagaw))
break;
}
@@ -455,24 +445,6 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
}
-/* This functionin only returns single iommu in a domain */
-struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
-{
- int iommu_id;
-
- /* si_domain and vm domain should not get here. */
- if (WARN_ON(!iommu_is_dma_domain(&domain->domain)))
- return NULL;
-
- for_each_domain_iommu(iommu_id, domain)
- break;
-
- if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
- return NULL;
-
- return g_iommus[iommu_id];
-}
-
static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
{
return sm_supported(iommu) ?
@@ -481,16 +453,16 @@ static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
+ struct iommu_domain_info *info;
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
bool found = false;
- int i;
+ unsigned long i;
domain->iommu_coherency = true;
-
- for_each_domain_iommu(i, domain) {
+ xa_for_each(&domain->iommu_array, i, info) {
found = true;
- if (!iommu_paging_structure_coherency(g_iommus[i])) {
+ if (!iommu_paging_structure_coherency(info->iommu)) {
domain->iommu_coherency = false;
break;
}
@@ -543,16 +515,10 @@ static int domain_update_device_node(struct dmar_domain *domain)
{
struct device_domain_info *info;
int nid = NUMA_NO_NODE;
+ unsigned long flags;
- assert_spin_locked(&device_domain_lock);
-
- if (list_empty(&domain->devices))
- return NUMA_NO_NODE;
-
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
- if (!info->dev)
- continue;
-
/*
* There could possibly be multiple device numa nodes as devices
* within the same domain may sit behind different IOMMUs. There
@@ -563,6 +529,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
if (nid != NUMA_NO_NODE)
break;
}
+ spin_unlock_irqrestore(&domain->lock, flags);
return nid;
}
@@ -622,6 +589,13 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
struct context_entry *context;
u64 *entry;
+ /*
+ * Except that the caller requested to allocate a new entry,
+ * returning a copied context entry makes no sense.
+ */
+ if (!alloc && context_copied(iommu, bus, devfn))
+ return NULL;
+
entry = &root->lo;
if (sm_supported(iommu)) {
if (devfn >= 0x80) {
@@ -804,26 +778,23 @@ static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
struct context_entry *context;
int ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (context)
ret = context_present(context);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
return ret;
}
static void free_context_table(struct intel_iommu *iommu)
{
- int i;
- unsigned long flags;
struct context_entry *context;
+ int i;
+
+ if (!iommu->root_entry)
+ return;
- spin_lock_irqsave(&iommu->lock, flags);
- if (!iommu->root_entry) {
- goto out;
- }
for (i = 0; i < ROOT_ENTRY_NR; i++) {
context = iommu_context_addr(iommu, i, 0, 0);
if (context)
@@ -835,36 +806,18 @@ static void free_context_table(struct intel_iommu *iommu)
context = iommu_context_addr(iommu, i, 0x80, 0);
if (context)
free_pgtable_page(context);
-
}
+
free_pgtable_page(iommu->root_entry);
iommu->root_entry = NULL;
-out:
- spin_unlock_irqrestore(&iommu->lock, flags);
}
#ifdef CONFIG_DMAR_DEBUG
-static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, u8 bus, u8 devfn)
+static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
+ u8 bus, u8 devfn, struct dma_pte *parent, int level)
{
- struct device_domain_info *info;
- struct dma_pte *parent, *pte;
- struct dmar_domain *domain;
- int offset, level;
-
- info = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
- if (!info || !info->domain) {
- pr_info("device [%02x:%02x.%d] not probed\n",
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
- return;
- }
-
- domain = info->domain;
- level = agaw_to_level(domain->agaw);
- parent = domain->pgd;
- if (!parent) {
- pr_info("no page table setup\n");
- return;
- }
+ struct dma_pte *pte;
+ int offset;
while (1) {
offset = pfn_level_offset(pfn, level);
@@ -891,9 +844,10 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
struct pasid_entry *entries, *pte;
struct context_entry *ctx_entry;
struct root_entry *rt_entry;
+ int i, dir_index, index, level;
u8 devfn = source_id & 0xff;
u8 bus = source_id >> 8;
- int i, dir_index, index;
+ struct dma_pte *pgtable;
pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr);
@@ -921,8 +875,11 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
ctx_entry->hi, ctx_entry->lo);
/* legacy mode does not require PASID entries */
- if (!sm_supported(iommu))
+ if (!sm_supported(iommu)) {
+ level = agaw_to_level(ctx_entry->hi & 7);
+ pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
goto pgtable_walk;
+ }
/* get the pointer to pasid directory entry */
dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
@@ -949,8 +906,16 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
for (i = 0; i < ARRAY_SIZE(pte->val); i++)
pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]);
+ if (pasid_pte_get_pgtt(pte) == PASID_ENTRY_PGTT_FL_ONLY) {
+ level = pte->val[2] & BIT_ULL(2) ? 5 : 4;
+ pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK);
+ } else {
+ level = agaw_to_level((pte->val[0] >> 2) & 0x7);
+ pgtable = phys_to_virt(pte->val[0] & VTD_PAGE_MASK);
+ }
+
pgtable_walk:
- pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn);
+ pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level);
}
#endif
@@ -1234,7 +1199,6 @@ static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
struct root_entry *root;
- unsigned long flags;
root = (struct root_entry *)alloc_pgtable_page(iommu->node);
if (!root) {
@@ -1244,10 +1208,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
}
__iommu_flush_cache(iommu, root, ROOT_SIZE);
-
- spin_lock_irqsave(&iommu->lock, flags);
iommu->root_entry = root;
- spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
@@ -1389,23 +1350,24 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
}
static struct device_domain_info *
-iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
- u8 bus, u8 devfn)
+iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
+ u8 bus, u8 devfn)
{
struct device_domain_info *info;
-
- assert_spin_locked(&device_domain_lock);
+ unsigned long flags;
if (!iommu->qi)
return NULL;
- list_for_each_entry(info, &domain->devices, link)
+ spin_lock_irqsave(&domain->lock, flags);
+ list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
- if (info->ats_supported && info->dev)
- return info;
- break;
+ spin_unlock_irqrestore(&domain->lock, flags);
+ return info->ats_supported ? info : NULL;
}
+ }
+ spin_unlock_irqrestore(&domain->lock, flags);
return NULL;
}
@@ -1414,24 +1376,23 @@ static void domain_update_iotlb(struct dmar_domain *domain)
{
struct device_domain_info *info;
bool has_iotlb_device = false;
+ unsigned long flags;
- assert_spin_locked(&device_domain_lock);
-
- list_for_each_entry(info, &domain->devices, link)
+ spin_lock_irqsave(&domain->lock, flags);
+ list_for_each_entry(info, &domain->devices, link) {
if (info->ats_enabled) {
has_iotlb_device = true;
break;
}
-
+ }
domain->has_iotlb_device = has_iotlb_device;
+ spin_unlock_irqrestore(&domain->lock, flags);
}
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
- assert_spin_locked(&device_domain_lock);
-
if (!info || !dev_is_pci(info->dev))
return;
@@ -1477,8 +1438,6 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
- assert_spin_locked(&device_domain_lock);
-
if (!dev_is_pci(info->dev))
return;
@@ -1518,17 +1477,16 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
- unsigned long flags;
struct device_domain_info *info;
+ unsigned long flags;
if (!domain->has_iotlb_device)
return;
- spin_lock_irqsave(&device_domain_lock, flags);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask);
-
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock_irqrestore(&domain->lock, flags);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
@@ -1539,7 +1497,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
unsigned int aligned_pages = __roundup_pow_of_two(pages);
unsigned int mask = ilog2(aligned_pages);
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
- u16 did = domain->iommu_did[iommu->seq_id];
+ u16 did = domain_id_iommu(domain, iommu);
BUG_ON(pages == 0);
@@ -1609,11 +1567,12 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- int idx;
+ struct iommu_domain_info *info;
+ unsigned long idx;
- for_each_domain_iommu(idx, dmar_domain) {
- struct intel_iommu *iommu = g_iommus[idx];
- u16 did = dmar_domain->iommu_did[iommu->seq_id];
+ xa_for_each(&dmar_domain->iommu_array, idx, info) {
+ struct intel_iommu *iommu = info->iommu;
+ u16 did = domain_id_iommu(dmar_domain, iommu);
if (domain_use_first_level(dmar_domain))
qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0);
@@ -1719,23 +1678,16 @@ static int iommu_init_domains(struct intel_iommu *iommu)
static void disable_dmar_iommu(struct intel_iommu *iommu)
{
- struct device_domain_info *info, *tmp;
- unsigned long flags;
-
if (!iommu->domain_ids)
return;
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
- if (info->iommu != iommu)
- continue;
-
- if (!info->dev || !info->domain)
- continue;
-
- __dmar_remove_one_dev_info(info);
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ /*
+ * All iommu domains must have been detached from the devices,
+ * hence there should be no domain IDs in use.
+ */
+ if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap))
+ > NUM_RESERVED_DID))
+ return;
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
@@ -1748,7 +1700,10 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
iommu->domain_ids = NULL;
}
- g_iommus[iommu->seq_id] = NULL;
+ if (iommu->copied_tables) {
+ bitmap_free(iommu->copied_tables);
+ iommu->copied_tables = NULL;
+ }
/* free context mapping */
free_context_table(iommu);
@@ -1795,55 +1750,77 @@ static struct dmar_domain *alloc_domain(unsigned int type)
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
+ spin_lock_init(&domain->lock);
+ xa_init(&domain->iommu_array);
return domain;
}
-/* Must be called with iommu->lock */
static int domain_attach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
+ struct iommu_domain_info *info, *curr;
unsigned long ndomains;
- int num;
+ int num, ret = -ENOSPC;
- assert_spin_locked(&device_domain_lock);
- assert_spin_locked(&iommu->lock);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- domain->iommu_refcnt[iommu->seq_id] += 1;
- if (domain->iommu_refcnt[iommu->seq_id] == 1) {
- ndomains = cap_ndoms(iommu->cap);
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
+ spin_lock(&iommu->lock);
+ curr = xa_load(&domain->iommu_array, iommu->seq_id);
+ if (curr) {
+ curr->refcnt++;
+ spin_unlock(&iommu->lock);
+ kfree(info);
+ return 0;
+ }
- if (num >= ndomains) {
- pr_err("%s: No free domain ids\n", iommu->name);
- domain->iommu_refcnt[iommu->seq_id] -= 1;
- return -ENOSPC;
- }
+ ndomains = cap_ndoms(iommu->cap);
+ num = find_first_zero_bit(iommu->domain_ids, ndomains);
+ if (num >= ndomains) {
+ pr_err("%s: No free domain ids\n", iommu->name);
+ goto err_unlock;
+ }
- set_bit(num, iommu->domain_ids);
- domain->iommu_did[iommu->seq_id] = num;
- domain->nid = iommu->node;
- domain_update_iommu_cap(domain);
+ set_bit(num, iommu->domain_ids);
+ info->refcnt = 1;
+ info->did = num;
+ info->iommu = iommu;
+ curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id,
+ NULL, info, GFP_ATOMIC);
+ if (curr) {
+ ret = xa_err(curr) ? : -EBUSY;
+ goto err_clear;
}
+ domain_update_iommu_cap(domain);
+ spin_unlock(&iommu->lock);
return 0;
+
+err_clear:
+ clear_bit(info->did, iommu->domain_ids);
+err_unlock:
+ spin_unlock(&iommu->lock);
+ kfree(info);
+ return ret;
}
static void domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
- int num;
+ struct iommu_domain_info *info;
- assert_spin_locked(&device_domain_lock);
- assert_spin_locked(&iommu->lock);
-
- domain->iommu_refcnt[iommu->seq_id] -= 1;
- if (domain->iommu_refcnt[iommu->seq_id] == 0) {
- num = domain->iommu_did[iommu->seq_id];
- clear_bit(num, iommu->domain_ids);
+ spin_lock(&iommu->lock);
+ info = xa_load(&domain->iommu_array, iommu->seq_id);
+ if (--info->refcnt == 0) {
+ clear_bit(info->did, iommu->domain_ids);
+ xa_erase(&domain->iommu_array, iommu->seq_id);
+ domain->nid = NUMA_NO_NODE;
domain_update_iommu_cap(domain);
- domain->iommu_did[iommu->seq_id] = 0;
+ kfree(info);
}
+ spin_unlock(&iommu->lock);
}
static inline int guestwidth_to_adjustwidth(int gaw)
@@ -1862,10 +1839,6 @@ static inline int guestwidth_to_adjustwidth(int gaw)
static void domain_exit(struct dmar_domain *domain)
{
-
- /* Remove associated devices and clear attached or cached domains */
- domain_remove_dev_info(domain);
-
if (domain->pgd) {
LIST_HEAD(freelist);
@@ -1873,6 +1846,9 @@ static void domain_exit(struct dmar_domain *domain)
put_pages_list(&freelist);
}
+ if (WARN_ON(!list_empty(&domain->devices)))
+ return;
+
kfree(domain);
}
@@ -1930,11 +1906,11 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
struct pasid_table *table,
u8 bus, u8 devfn)
{
- u16 did = domain->iommu_did[iommu->seq_id];
+ struct device_domain_info *info =
+ iommu_support_dev_iotlb(domain, iommu, bus, devfn);
+ u16 did = domain_id_iommu(domain, iommu);
int translation = CONTEXT_TT_MULTI_LEVEL;
- struct device_domain_info *info = NULL;
struct context_entry *context;
- unsigned long flags;
int ret;
WARN_ON(did == 0);
@@ -1947,16 +1923,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
BUG_ON(!domain->pgd);
- spin_lock_irqsave(&device_domain_lock, flags);
spin_lock(&iommu->lock);
-
ret = -ENOMEM;
context = iommu_context_addr(iommu, bus, devfn, 1);
if (!context)
goto out_unlock;
ret = 0;
- if (context_present(context))
+ if (context_present(context) && !context_copied(iommu, bus, devfn))
goto out_unlock;
/*
@@ -1968,7 +1942,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
* in-flight DMA will exist, and we don't need to worry anymore
* hereafter.
*/
- if (context_copied(context)) {
+ if (context_copied(iommu, bus, devfn)) {
u16 did_old = context_domain_id(context);
if (did_old < cap_ndoms(iommu->cap)) {
@@ -1979,6 +1953,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
DMA_TLB_DSI_FLUSH);
}
+
+ clear_context_copied(iommu, bus, devfn);
}
context_clear_entry(context);
@@ -2000,7 +1976,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
* Setup the Device-TLB enable bit and Page request
* Enable bit:
*/
- info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
if (info && info->ats_supported)
context_set_sm_dte(context);
if (info && info->pri_supported)
@@ -2023,7 +1998,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
goto out_unlock;
}
- info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
if (info && info->ats_supported)
translation = CONTEXT_TT_DEV_IOTLB;
else
@@ -2069,7 +2043,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
out_unlock:
spin_unlock(&iommu->lock);
- spin_unlock_irqrestore(&device_domain_lock, flags);
return ret;
}
@@ -2186,8 +2159,9 @@ static void switch_to_super_page(struct dmar_domain *domain,
unsigned long end_pfn, int level)
{
unsigned long lvl_pages = lvl_to_nr_pages(level);
+ struct iommu_domain_info *info;
struct dma_pte *pte = NULL;
- int i;
+ unsigned long i;
while (start_pfn <= end_pfn) {
if (!pte)
@@ -2198,8 +2172,8 @@ static void switch_to_super_page(struct dmar_domain *domain,
start_pfn + lvl_pages - 1,
level + 1);
- for_each_domain_iommu(i, domain)
- iommu_flush_iotlb_psi(g_iommus[i], domain,
+ xa_for_each(&domain->iommu_array, i, info)
+ iommu_flush_iotlb_psi(info->iommu, domain,
start_pfn, lvl_pages,
0, 0);
}
@@ -2313,16 +2287,15 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
{
struct intel_iommu *iommu = info->iommu;
struct context_entry *context;
- unsigned long flags;
u16 did_old;
if (!iommu)
return;
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (!context) {
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
return;
}
@@ -2330,14 +2303,14 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
if (hw_pass_through && domain_type_is_si(info->domain))
did_old = FLPT_DEFAULT_DID;
else
- did_old = info->domain->iommu_did[iommu->seq_id];
+ did_old = domain_id_iommu(info->domain, iommu);
} else {
did_old = context_domain_id(context);
}
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
iommu->flush.flush_context(iommu,
did_old,
(((u16)bus) << 8) | devfn,
@@ -2356,30 +2329,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
__iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
}
-static void domain_remove_dev_info(struct dmar_domain *domain)
-{
- struct device_domain_info *info, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &domain->devices, link)
- __dmar_remove_one_dev_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
-}
-
-static inline struct device_domain_info *
-dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
-{
- struct device_domain_info *info;
-
- list_for_each_entry(info, &device_domain_list, global)
- if (info->segment == segment && info->bus == bus &&
- info->devfn == devfn)
- return info;
-
- return NULL;
-}
-
static int domain_setup_first_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev,
@@ -2412,7 +2361,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
flags |= PASID_FLAG_PAGE_SNOOP;
return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
- domain->iommu_did[iommu->seq_id],
+ domain_id_iommu(domain, iommu),
flags);
}
@@ -2507,17 +2456,13 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
if (!iommu)
return -ENODEV;
- spin_lock_irqsave(&device_domain_lock, flags);
- info->domain = domain;
- spin_lock(&iommu->lock);
ret = domain_attach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
- if (ret) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ if (ret)
return ret;
- }
+ info->domain = domain;
+ spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock_irqrestore(&domain->lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
@@ -2529,7 +2474,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
}
/* Setup the PASID entry for requests without PASID: */
- spin_lock_irqsave(&iommu->lock, flags);
if (hw_pass_through && domain_type_is_si(domain))
ret = intel_pasid_setup_pass_through(iommu, domain,
dev, PASID_RID2PASID);
@@ -2539,7 +2483,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
else
ret = intel_pasid_setup_second_level(iommu, domain,
dev, PASID_RID2PASID);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (ret) {
dev_err(dev, "Setup RID2PASID failed\n");
dmar_remove_one_dev_info(dev);
@@ -2761,32 +2704,14 @@ static int copy_context_table(struct intel_iommu *iommu,
/* Now copy the context entry */
memcpy(&ce, old_ce + idx, sizeof(ce));
- if (!__context_present(&ce))
+ if (!context_present(&ce))
continue;
did = context_domain_id(&ce);
if (did >= 0 && did < cap_ndoms(iommu->cap))
set_bit(did, iommu->domain_ids);
- /*
- * We need a marker for copied context entries. This
- * marker needs to work for the old format as well as
- * for extended context entries.
- *
- * Bit 67 of the context entry is used. In the old
- * format this bit is available to software, in the
- * extended format it is the PGE bit, but PGE is ignored
- * by HW if PASIDs are disabled (and thus still
- * available).
- *
- * So disable PASIDs first and then mark the entry
- * copied. This means that we don't copy PASID
- * translations from the old kernel, but this is fine as
- * faults there are not fatal.
- */
- context_clear_pasid_enable(&ce);
- context_set_copied(&ce);
-
+ set_context_copied(iommu, bus, devfn);
new_ce[idx] = ce;
}
@@ -2807,14 +2732,13 @@ static int copy_translation_tables(struct intel_iommu *iommu)
struct root_entry *old_rt;
phys_addr_t old_rt_phys;
int ctxt_table_entries;
- unsigned long flags;
u64 rtaddr_reg;
int bus, ret;
bool new_ext, ext;
rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
- ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
- new_ext = !!ecap_ecs(iommu->ecap);
+ ext = !!(rtaddr_reg & DMA_RTADDR_SMT);
+ new_ext = !!sm_supported(iommu);
/*
* The RTT bit can only be changed when translation is disabled,
@@ -2825,6 +2749,10 @@ static int copy_translation_tables(struct intel_iommu *iommu)
if (new_ext != ext)
return -EINVAL;
+ iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL);
+ if (!iommu->copied_tables)
+ return -ENOMEM;
+
old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
if (!old_rt_phys)
return -EINVAL;
@@ -2850,7 +2778,7 @@ static int copy_translation_tables(struct intel_iommu *iommu)
}
}
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
/* Context tables are copied, now write them to the root_entry table */
for (bus = 0; bus < 256; bus++) {
@@ -2869,7 +2797,7 @@ static int copy_translation_tables(struct intel_iommu *iommu)
iommu->root_entry[bus].hi = val;
}
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
kfree(ctxt_tbls);
@@ -2968,36 +2896,6 @@ static int __init init_dmars(void)
struct intel_iommu *iommu;
int ret;
- /*
- * for each drhd
- * allocate root
- * initialize and program root entry to not present
- * endfor
- */
- for_each_drhd_unit(drhd) {
- /*
- * lock not needed as this is only incremented in the single
- * threaded kernel __init code path all other access are read
- * only
- */
- if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
- g_num_of_iommus++;
- continue;
- }
- pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
- }
-
- /* Preallocate enough resources for IOMMU hot-addition */
- if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
- g_num_of_iommus = DMAR_UNITS_SUPPORTED;
-
- g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
- GFP_KERNEL);
- if (!g_iommus) {
- ret = -ENOMEM;
- goto error;
- }
-
ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
if (ret)
goto free_iommu;
@@ -3020,8 +2918,6 @@ static int __init init_dmars(void)
intel_pasid_max_id);
}
- g_iommus[iommu->seq_id] = iommu;
-
intel_iommu_init_qi(iommu);
ret = iommu_init_domains(iommu);
@@ -3147,9 +3043,6 @@ free_iommu:
free_dmar_iommu(iommu);
}
- kfree(g_iommus);
-
-error:
return ret;
}
@@ -3530,9 +3423,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
int sp, ret;
struct intel_iommu *iommu = dmaru->iommu;
- if (g_iommus[iommu->seq_id])
- return 0;
-
ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
if (ret)
goto out;
@@ -3556,7 +3446,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
- g_iommus[iommu->seq_id] = iommu;
ret = iommu_init_domains(iommu);
if (ret == 0)
ret = iommu_alloc_root_entry(iommu);
@@ -4022,6 +3911,20 @@ static int __init probe_acpi_namespace_devices(void)
return 0;
}
+static __init int tboot_force_iommu(void)
+{
+ if (!tboot_enabled())
+ return 0;
+
+ if (no_iommu || dmar_disabled)
+ pr_warn("Forcing Intel-IOMMU to enabled\n");
+
+ dmar_disabled = 0;
+ no_iommu = 0;
+
+ return 1;
+}
+
int __init intel_iommu_init(void)
{
int ret = -ENODEV;
@@ -4093,9 +3996,6 @@ int __init intel_iommu_init(void)
if (list_empty(&dmar_satc_units))
pr_info("No SATC found\n");
- if (dmar_map_gfx)
- intel_iommu_gfx_mapped = 1;
-
init_no_remapping_devices();
ret = init_dmars();
@@ -4181,21 +4081,14 @@ static void domain_context_clear(struct device_domain_info *info)
&domain_context_clear_one_cb, info);
}
-static void __dmar_remove_one_dev_info(struct device_domain_info *info)
+static void dmar_remove_one_dev_info(struct device *dev)
{
- struct dmar_domain *domain;
- struct intel_iommu *iommu;
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct dmar_domain *domain = info->domain;
+ struct intel_iommu *iommu = info->iommu;
unsigned long flags;
- assert_spin_locked(&device_domain_lock);
-
- if (WARN_ON(!info))
- return;
-
- iommu = info->iommu;
- domain = info->domain;
-
- if (info->dev && !dev_is_real_dma_subdevice(info->dev)) {
+ if (!dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, info->dev,
PASID_RID2PASID, false);
@@ -4205,23 +4098,12 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
intel_pasid_free_table(info->dev);
}
+ spin_lock_irqsave(&domain->lock, flags);
list_del(&info->link);
+ spin_unlock_irqrestore(&domain->lock, flags);
- spin_lock_irqsave(&iommu->lock, flags);
domain_detach_iommu(domain, iommu);
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-static void dmar_remove_one_dev_info(struct device *dev)
-{
- struct device_domain_info *info;
- unsigned long flags;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- info = dev_iommu_priv_get(dev);
- if (info)
- __dmar_remove_one_dev_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ info->domain = NULL;
}
static int md_domain_init(struct dmar_domain *domain, int guest_width)
@@ -4466,15 +4348,16 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long iova_pfn = IOVA_PFN(gather->start);
size_t size = gather->end - gather->start;
+ struct iommu_domain_info *info;
unsigned long start_pfn;
unsigned long nrpages;
- int iommu_id;
+ unsigned long i;
nrpages = aligned_nrpages(gather->start, size);
start_pfn = mm_to_dma_pfn(iova_pfn);
- for_each_domain_iommu(iommu_id, dmar_domain)
- iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
+ xa_for_each(&dmar_domain->iommu_array, i, info)
+ iommu_flush_iotlb_psi(info->iommu, dmar_domain,
start_pfn, nrpages,
list_empty(&gather->freelist), 0);
@@ -4503,7 +4386,7 @@ static bool domain_support_force_snooping(struct dmar_domain *domain)
struct device_domain_info *info;
bool support = true;
- assert_spin_locked(&device_domain_lock);
+ assert_spin_locked(&domain->lock);
list_for_each_entry(info, &domain->devices, link) {
if (!ecap_sc_support(info->iommu->ecap)) {
support = false;
@@ -4518,8 +4401,7 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
{
struct device_domain_info *info;
- assert_spin_locked(&device_domain_lock);
-
+ assert_spin_locked(&domain->lock);
/*
* Second level page table supports per-PTE snoop control. The
* iommu_map() interface will handle this by setting SNP bit.
@@ -4542,15 +4424,15 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
if (dmar_domain->force_snooping)
return true;
- spin_lock_irqsave(&device_domain_lock, flags);
+ spin_lock_irqsave(&dmar_domain->lock, flags);
if (!domain_support_force_snooping(dmar_domain)) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
return false;
}
domain_set_force_snooping(dmar_domain);
dmar_domain->force_snooping = true;
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
return true;
}
@@ -4572,7 +4454,6 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL;
struct device_domain_info *info;
struct intel_iommu *iommu;
- unsigned long flags;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
@@ -4615,10 +4496,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
}
- spin_lock_irqsave(&device_domain_lock, flags);
- list_add(&info->global, &device_domain_list);
dev_iommu_priv_set(dev, info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
return &iommu->iommu;
}
@@ -4626,15 +4504,9 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
static void intel_iommu_release_device(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- unsigned long flags;
dmar_remove_one_dev_info(dev);
-
- spin_lock_irqsave(&device_domain_lock, flags);
dev_iommu_priv_set(dev, NULL);
- list_del(&info->global);
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
kfree(info);
set_dma_ops(dev, NULL);
}
@@ -4707,7 +4579,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct context_entry *context;
struct dmar_domain *domain;
- unsigned long flags;
u64 ctx_lo;
int ret;
@@ -4715,9 +4586,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
if (!domain)
return -EINVAL;
- spin_lock_irqsave(&device_domain_lock, flags);
spin_lock(&iommu->lock);
-
ret = -EINVAL;
if (!info->pasid_supported)
goto out;
@@ -4733,7 +4602,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
context[0].lo = ctx_lo;
wmb();
iommu->flush.flush_context(iommu,
- domain->iommu_did[iommu->seq_id],
+ domain_id_iommu(domain, iommu),
PCI_DEVID(info->bus, info->devfn),
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
@@ -4747,7 +4616,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
out:
spin_unlock(&iommu->lock);
- spin_unlock_irqrestore(&device_domain_lock, flags);
return ret;
}
@@ -4871,13 +4739,11 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long pages = aligned_nrpages(iova, size);
unsigned long pfn = iova >> VTD_PAGE_SHIFT;
- struct intel_iommu *iommu;
- int iommu_id;
+ struct iommu_domain_info *info;
+ unsigned long i;
- for_each_domain_iommu(iommu_id, dmar_domain) {
- iommu = g_iommus[iommu_id];
- __mapping_notify_one(iommu, dmar_domain, pfn, pages);
- }
+ xa_for_each(&dmar_domain->iommu_array, i, info)
+ __mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
}
const struct iommu_ops intel_iommu_ops = {
@@ -4887,7 +4753,6 @@ const struct iommu_ops intel_iommu_ops = {
.probe_finalize = intel_iommu_probe_finalize,
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.device_group = intel_iommu_device_group,
.dev_enable_feat = intel_iommu_dev_enable_feat,
.dev_disable_feat = intel_iommu_dev_disable_feat,
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
new file mode 100644
index 000000000000..74b0e19e23ee
--- /dev/null
+++ b/drivers/iommu/intel/iommu.h
@@ -0,0 +1,842 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2006-2015, Intel Corporation.
+ *
+ * Authors: Ashok Raj <ashok.raj@intel.com>
+ * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * David Woodhouse <David.Woodhouse@intel.com>
+ */
+
+#ifndef _INTEL_IOMMU_H_
+#define _INTEL_IOMMU_H_
+
+#include <linux/types.h>
+#include <linux/iova.h>
+#include <linux/io.h>
+#include <linux/idr.h>
+#include <linux/mmu_notifier.h>
+#include <linux/list.h>
+#include <linux/iommu.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmar.h>
+#include <linux/ioasid.h>
+#include <linux/bitfield.h>
+#include <linux/xarray.h>
+
+#include <asm/cacheflush.h>
+#include <asm/iommu.h>
+
+/*
+ * VT-d hardware uses 4KiB page size regardless of host page size.
+ */
+#define VTD_PAGE_SHIFT (12)
+#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
+#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
+#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
+
+#define VTD_STRIDE_SHIFT (9)
+#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
+
+#define DMA_PTE_READ BIT_ULL(0)
+#define DMA_PTE_WRITE BIT_ULL(1)
+#define DMA_PTE_LARGE_PAGE BIT_ULL(7)
+#define DMA_PTE_SNP BIT_ULL(11)
+
+#define DMA_FL_PTE_PRESENT BIT_ULL(0)
+#define DMA_FL_PTE_US BIT_ULL(2)
+#define DMA_FL_PTE_ACCESS BIT_ULL(5)
+#define DMA_FL_PTE_DIRTY BIT_ULL(6)
+#define DMA_FL_PTE_XD BIT_ULL(63)
+
+#define ADDR_WIDTH_5LEVEL (57)
+#define ADDR_WIDTH_4LEVEL (48)
+
+#define CONTEXT_TT_MULTI_LEVEL 0
+#define CONTEXT_TT_DEV_IOTLB 1
+#define CONTEXT_TT_PASS_THROUGH 2
+#define CONTEXT_PASIDE BIT_ULL(3)
+
+/*
+ * Intel IOMMU register specification per version 1.0 public spec.
+ */
+#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
+#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
+#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
+#define DMAR_GCMD_REG 0x18 /* Global command register */
+#define DMAR_GSTS_REG 0x1c /* Global status register */
+#define DMAR_RTADDR_REG 0x20 /* Root entry table */
+#define DMAR_CCMD_REG 0x28 /* Context command reg */
+#define DMAR_FSTS_REG 0x34 /* Fault Status register */
+#define DMAR_FECTL_REG 0x38 /* Fault control register */
+#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
+#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
+#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
+#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
+#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
+#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
+#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
+#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
+#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
+#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
+#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
+#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
+#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
+#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
+#define DMAR_IQER_REG 0xb0 /* Invalidation queue error record register */
+#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
+#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
+#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
+#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
+#define DMAR_PRS_REG 0xdc /* Page request status register */
+#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
+#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
+#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
+#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
+#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */
+#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */
+#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
+#define DMAR_MTRR_FIX16K_80000_REG 0x128
+#define DMAR_MTRR_FIX16K_A0000_REG 0x130
+#define DMAR_MTRR_FIX4K_C0000_REG 0x138
+#define DMAR_MTRR_FIX4K_C8000_REG 0x140
+#define DMAR_MTRR_FIX4K_D0000_REG 0x148
+#define DMAR_MTRR_FIX4K_D8000_REG 0x150
+#define DMAR_MTRR_FIX4K_E0000_REG 0x158
+#define DMAR_MTRR_FIX4K_E8000_REG 0x160
+#define DMAR_MTRR_FIX4K_F0000_REG 0x168
+#define DMAR_MTRR_FIX4K_F8000_REG 0x170
+#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
+#define DMAR_MTRR_PHYSMASK0_REG 0x188
+#define DMAR_MTRR_PHYSBASE1_REG 0x190
+#define DMAR_MTRR_PHYSMASK1_REG 0x198
+#define DMAR_MTRR_PHYSBASE2_REG 0x1a0
+#define DMAR_MTRR_PHYSMASK2_REG 0x1a8
+#define DMAR_MTRR_PHYSBASE3_REG 0x1b0
+#define DMAR_MTRR_PHYSMASK3_REG 0x1b8
+#define DMAR_MTRR_PHYSBASE4_REG 0x1c0
+#define DMAR_MTRR_PHYSMASK4_REG 0x1c8
+#define DMAR_MTRR_PHYSBASE5_REG 0x1d0
+#define DMAR_MTRR_PHYSMASK5_REG 0x1d8
+#define DMAR_MTRR_PHYSBASE6_REG 0x1e0
+#define DMAR_MTRR_PHYSMASK6_REG 0x1e8
+#define DMAR_MTRR_PHYSBASE7_REG 0x1f0
+#define DMAR_MTRR_PHYSMASK7_REG 0x1f8
+#define DMAR_MTRR_PHYSBASE8_REG 0x200
+#define DMAR_MTRR_PHYSMASK8_REG 0x208
+#define DMAR_MTRR_PHYSBASE9_REG 0x210
+#define DMAR_MTRR_PHYSMASK9_REG 0x218
+#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */
+#define DMAR_VCMD_REG 0xe00 /* Virtual command register */
+#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */
+
+#define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg)
+#define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg)
+#define DMAR_IQER_REG_ICESID(reg) FIELD_GET(GENMASK_ULL(63, 48), reg)
+
+#define OFFSET_STRIDE (9)
+
+#define dmar_readq(a) readq(a)
+#define dmar_writeq(a,v) writeq(v,a)
+#define dmar_readl(a) readl(a)
+#define dmar_writel(a, v) writel(v, a)
+
+#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
+#define DMAR_VER_MINOR(v) ((v) & 0x0f)
+
+/*
+ * Decoding Capability Register
+ */
+#define cap_5lp_support(c) (((c) >> 60) & 1)
+#define cap_pi_support(c) (((c) >> 59) & 1)
+#define cap_fl1gp_support(c) (((c) >> 56) & 1)
+#define cap_read_drain(c) (((c) >> 55) & 1)
+#define cap_write_drain(c) (((c) >> 54) & 1)
+#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
+#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
+#define cap_pgsel_inv(c) (((c) >> 39) & 1)
+
+#define cap_super_page_val(c) (((c) >> 34) & 0xf)
+#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
+ * OFFSET_STRIDE) + 21)
+
+#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
+#define cap_max_fault_reg_offset(c) \
+ (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
+
+#define cap_zlr(c) (((c) >> 22) & 1)
+#define cap_isoch(c) (((c) >> 23) & 1)
+#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
+#define cap_sagaw(c) (((c) >> 8) & 0x1f)
+#define cap_caching_mode(c) (((c) >> 7) & 1)
+#define cap_phmr(c) (((c) >> 6) & 1)
+#define cap_plmr(c) (((c) >> 5) & 1)
+#define cap_rwbf(c) (((c) >> 4) & 1)
+#define cap_afl(c) (((c) >> 3) & 1)
+#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
+/*
+ * Extended Capability Register
+ */
+
+#define ecap_rps(e) (((e) >> 49) & 0x1)
+#define ecap_smpwc(e) (((e) >> 48) & 0x1)
+#define ecap_flts(e) (((e) >> 47) & 0x1)
+#define ecap_slts(e) (((e) >> 46) & 0x1)
+#define ecap_slads(e) (((e) >> 45) & 0x1)
+#define ecap_vcs(e) (((e) >> 44) & 0x1)
+#define ecap_smts(e) (((e) >> 43) & 0x1)
+#define ecap_dit(e) (((e) >> 41) & 0x1)
+#define ecap_pds(e) (((e) >> 42) & 0x1)
+#define ecap_pasid(e) (((e) >> 40) & 0x1)
+#define ecap_pss(e) (((e) >> 35) & 0x1f)
+#define ecap_eafs(e) (((e) >> 34) & 0x1)
+#define ecap_nwfs(e) (((e) >> 33) & 0x1)
+#define ecap_srs(e) (((e) >> 31) & 0x1)
+#define ecap_ers(e) (((e) >> 30) & 0x1)
+#define ecap_prs(e) (((e) >> 29) & 0x1)
+#define ecap_broken_pasid(e) (((e) >> 28) & 0x1)
+#define ecap_dis(e) (((e) >> 27) & 0x1)
+#define ecap_nest(e) (((e) >> 26) & 0x1)
+#define ecap_mts(e) (((e) >> 25) & 0x1)
+#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
+#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
+#define ecap_coherent(e) ((e) & 0x1)
+#define ecap_qis(e) ((e) & 0x2)
+#define ecap_pass_through(e) (((e) >> 6) & 0x1)
+#define ecap_eim_support(e) (((e) >> 4) & 0x1)
+#define ecap_ir_support(e) (((e) >> 3) & 0x1)
+#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
+#define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
+#define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */
+
+/* Virtual command interface capability */
+#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
+
+/* IOTLB_REG */
+#define DMA_TLB_FLUSH_GRANU_OFFSET 60
+#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
+#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
+#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
+#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
+#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
+#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
+#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
+#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
+#define DMA_TLB_IVT (((u64)1) << 63)
+#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
+#define DMA_TLB_MAX_SIZE (0x3f)
+
+/* INVALID_DESC */
+#define DMA_CCMD_INVL_GRANU_OFFSET 61
+#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
+#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
+#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
+#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
+#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
+#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
+#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
+#define DMA_ID_TLB_ADDR(addr) (addr)
+#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
+
+/* PMEN_REG */
+#define DMA_PMEN_EPM (((u32)1)<<31)
+#define DMA_PMEN_PRS (((u32)1)<<0)
+
+/* GCMD_REG */
+#define DMA_GCMD_TE (((u32)1) << 31)
+#define DMA_GCMD_SRTP (((u32)1) << 30)
+#define DMA_GCMD_SFL (((u32)1) << 29)
+#define DMA_GCMD_EAFL (((u32)1) << 28)
+#define DMA_GCMD_WBF (((u32)1) << 27)
+#define DMA_GCMD_QIE (((u32)1) << 26)
+#define DMA_GCMD_SIRTP (((u32)1) << 24)
+#define DMA_GCMD_IRE (((u32) 1) << 25)
+#define DMA_GCMD_CFI (((u32) 1) << 23)
+
+/* GSTS_REG */
+#define DMA_GSTS_TES (((u32)1) << 31)
+#define DMA_GSTS_RTPS (((u32)1) << 30)
+#define DMA_GSTS_FLS (((u32)1) << 29)
+#define DMA_GSTS_AFLS (((u32)1) << 28)
+#define DMA_GSTS_WBFS (((u32)1) << 27)
+#define DMA_GSTS_QIES (((u32)1) << 26)
+#define DMA_GSTS_IRTPS (((u32)1) << 24)
+#define DMA_GSTS_IRES (((u32)1) << 25)
+#define DMA_GSTS_CFIS (((u32)1) << 23)
+
+/* DMA_RTADDR_REG */
+#define DMA_RTADDR_SMT (((u64)1) << 10)
+
+/* CCMD_REG */
+#define DMA_CCMD_ICC (((u64)1) << 63)
+#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
+#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
+#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
+#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
+#define DMA_CCMD_MASK_NOBIT 0
+#define DMA_CCMD_MASK_1BIT 1
+#define DMA_CCMD_MASK_2BIT 2
+#define DMA_CCMD_MASK_3BIT 3
+#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
+#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
+
+/* FECTL_REG */
+#define DMA_FECTL_IM (((u32)1) << 31)
+
+/* FSTS_REG */
+#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
+#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
+#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
+#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
+#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
+#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
+#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
+
+/* FRCD_REG, 32 bits access */
+#define DMA_FRCD_F (((u32)1) << 31)
+#define dma_frcd_type(d) ((d >> 30) & 1)
+#define dma_frcd_fault_reason(c) (c & 0xff)
+#define dma_frcd_source_id(c) (c & 0xffff)
+#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
+#define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
+/* low 64 bit */
+#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
+
+/* PRS_REG */
+#define DMA_PRS_PPR ((u32)1)
+#define DMA_PRS_PRO ((u32)2)
+
+#define DMA_VCS_PAS ((u64)1)
+
+#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
+do { \
+ cycles_t start_time = get_cycles(); \
+ while (1) { \
+ sts = op(iommu->reg + offset); \
+ if (cond) \
+ break; \
+ if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
+ panic("DMAR hardware is malfunctioning\n"); \
+ cpu_relax(); \
+ } \
+} while (0)
+
+#define QI_LENGTH 256 /* queue length */
+
+enum {
+ QI_FREE,
+ QI_IN_USE,
+ QI_DONE,
+ QI_ABORT
+};
+
+#define QI_CC_TYPE 0x1
+#define QI_IOTLB_TYPE 0x2
+#define QI_DIOTLB_TYPE 0x3
+#define QI_IEC_TYPE 0x4
+#define QI_IWD_TYPE 0x5
+#define QI_EIOTLB_TYPE 0x6
+#define QI_PC_TYPE 0x7
+#define QI_DEIOTLB_TYPE 0x8
+#define QI_PGRP_RESP_TYPE 0x9
+#define QI_PSTRM_RESP_TYPE 0xa
+
+#define QI_IEC_SELECTIVE (((u64)1) << 4)
+#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
+#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
+
+#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
+#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
+#define QI_IWD_FENCE (((u64)1) << 6)
+#define QI_IWD_PRQ_DRAIN (((u64)1) << 7)
+
+#define QI_IOTLB_DID(did) (((u64)did) << 16)
+#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
+#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
+#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
+#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
+#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
+#define QI_IOTLB_AM(am) (((u8)am) & 0x3f)
+
+#define QI_CC_FM(fm) (((u64)fm) << 48)
+#define QI_CC_SID(sid) (((u64)sid) << 32)
+#define QI_CC_DID(did) (((u64)did) << 16)
+#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
+
+#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
+#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
+#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+ ((u64)((pfsid >> 4) & 0xfff) << 52))
+#define QI_DEV_IOTLB_SIZE 1
+#define QI_DEV_IOTLB_MAX_INVS 32
+
+#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
+#define QI_PC_DID(did) (((u64)did) << 16)
+#define QI_PC_GRAN(gran) (((u64)gran) << 4)
+
+/* PASID cache invalidation granu */
+#define QI_PC_ALL_PASIDS 0
+#define QI_PC_PASID_SEL 1
+#define QI_PC_GLOBAL 3
+
+#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
+#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f)
+#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
+#define QI_EIOTLB_DID(did) (((u64)did) << 16)
+#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
+
+/* QI Dev-IOTLB inv granu */
+#define QI_DEV_IOTLB_GRAN_ALL 1
+#define QI_DEV_IOTLB_GRAN_PASID_SEL 0
+
+#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
+#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
+#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
+#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
+#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
+#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+ ((u64)((pfsid >> 4) & 0xfff) << 52))
+#define QI_DEV_EIOTLB_MAX_INVS 32
+
+/* Page group response descriptor QW0 */
+#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
+#define QI_PGRP_PDP(p) (((u64)(p)) << 5)
+#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12)
+#define QI_PGRP_DID(rid) (((u64)(rid)) << 16)
+#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
+
+/* Page group response descriptor QW1 */
+#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
+#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
+
+
+#define QI_RESP_SUCCESS 0x0
+#define QI_RESP_INVALID 0x1
+#define QI_RESP_FAILURE 0xf
+
+#define QI_GRAN_NONG_PASID 2
+#define QI_GRAN_PSI_PASID 3
+
+#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
+
+struct qi_desc {
+ u64 qw0;
+ u64 qw1;
+ u64 qw2;
+ u64 qw3;
+};
+
+struct q_inval {
+ raw_spinlock_t q_lock;
+ void *desc; /* invalidation queue */
+ int *desc_status; /* desc status */
+ int free_head; /* first free entry */
+ int free_tail; /* last free entry */
+ int free_cnt;
+};
+
+struct dmar_pci_notify_info;
+
+#ifdef CONFIG_IRQ_REMAP
+/* 1MB - maximum possible interrupt remapping table size */
+#define INTR_REMAP_PAGE_ORDER 8
+#define INTR_REMAP_TABLE_REG_SIZE 0xf
+#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
+
+#define INTR_REMAP_TABLE_ENTRIES 65536
+
+struct irq_domain;
+
+struct ir_table {
+ struct irte *base;
+ unsigned long *bitmap;
+};
+
+void intel_irq_remap_add_device(struct dmar_pci_notify_info *info);
+#else
+static inline void
+intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { }
+#endif
+
+struct iommu_flush {
+ void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type);
+ void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
+};
+
+enum {
+ SR_DMAR_FECTL_REG,
+ SR_DMAR_FEDATA_REG,
+ SR_DMAR_FEADDR_REG,
+ SR_DMAR_FEUADDR_REG,
+ MAX_SR_DMAR_REGS
+};
+
+#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
+#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
+#define VTD_FLAG_SVM_CAPABLE (1 << 2)
+
+extern int intel_iommu_sm;
+
+#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
+#define pasid_supported(iommu) (sm_supported(iommu) && \
+ ecap_pasid((iommu)->ecap))
+
+struct pasid_entry;
+struct pasid_state_entry;
+struct page_req_dsc;
+
+/*
+ * 0: Present
+ * 1-11: Reserved
+ * 12-63: Context Ptr (12 - (haw-1))
+ * 64-127: Reserved
+ */
+struct root_entry {
+ u64 lo;
+ u64 hi;
+};
+
+/*
+ * low 64 bits:
+ * 0: present
+ * 1: fault processing disable
+ * 2-3: translation type
+ * 12-63: address space root
+ * high 64 bits:
+ * 0-2: address width
+ * 3-6: aval
+ * 8-23: domain id
+ */
+struct context_entry {
+ u64 lo;
+ u64 hi;
+};
+
+/*
+ * When VT-d works in the scalable mode, it allows DMA translation to
+ * happen through either first level or second level page table. This
+ * bit marks that the DMA translation for the domain goes through the
+ * first level page table, otherwise, it goes through the second level.
+ */
+#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1)
+
+struct iommu_domain_info {
+ struct intel_iommu *iommu;
+ unsigned int refcnt; /* Refcount of devices per iommu */
+ u16 did; /* Domain ids per IOMMU. Use u16 since
+ * domain ids are 16 bit wide according
+ * to VT-d spec, section 9.3 */
+};
+
+struct dmar_domain {
+ int nid; /* node id */
+ struct xarray iommu_array; /* Attached IOMMU array */
+
+ u8 has_iotlb_device: 1;
+ u8 iommu_coherency: 1; /* indicate coherency of iommu access */
+ u8 force_snooping : 1; /* Create IOPTEs with snoop control */
+ u8 set_pte_snp:1;
+
+ spinlock_t lock; /* Protect device tracking lists */
+ struct list_head devices; /* all devices' list */
+
+ struct dma_pte *pgd; /* virtual address */
+ int gaw; /* max guest address width */
+
+ /* adjusted guest address width, 0 is level 2 30-bit */
+ int agaw;
+
+ int flags; /* flags to find out type of domain */
+ int iommu_superpage;/* Level of superpages supported:
+ 0 == 4KiB (no superpages), 1 == 2MiB,
+ 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
+ u64 max_addr; /* maximum mapped address */
+
+ struct iommu_domain domain; /* generic domain data structure for
+ iommu core */
+};
+
+struct intel_iommu {
+ void __iomem *reg; /* Pointer to hardware regs, virtual addr */
+ u64 reg_phys; /* physical address of hw register set */
+ u64 reg_size; /* size of hw register set */
+ u64 cap;
+ u64 ecap;
+ u64 vccap;
+ u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
+ raw_spinlock_t register_lock; /* protect register handling */
+ int seq_id; /* sequence id of the iommu */
+ int agaw; /* agaw of this iommu */
+ int msagaw; /* max sagaw of this iommu */
+ unsigned int irq, pr_irq;
+ u16 segment; /* PCI segment# */
+ unsigned char name[13]; /* Device Name */
+
+#ifdef CONFIG_INTEL_IOMMU
+ unsigned long *domain_ids; /* bitmap of domains */
+ unsigned long *copied_tables; /* bitmap of copied tables */
+ spinlock_t lock; /* protect context, domain ids */
+ struct root_entry *root_entry; /* virtual address */
+
+ struct iommu_flush flush;
+#endif
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ struct page_req_dsc *prq;
+ unsigned char prq_name[16]; /* Name for PRQ interrupt */
+ struct completion prq_complete;
+ struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
+#endif
+ struct iopf_queue *iopf_queue;
+ unsigned char iopfq_name[16];
+ struct q_inval *qi; /* Queued invalidation info */
+ u32 *iommu_state; /* Store iommu states between suspend and resume.*/
+
+#ifdef CONFIG_IRQ_REMAP
+ struct ir_table *ir_table; /* Interrupt remapping info */
+ struct irq_domain *ir_domain;
+ struct irq_domain *ir_msi_domain;
+#endif
+ struct iommu_device iommu; /* IOMMU core code handle */
+ int node;
+ u32 flags; /* Software defined flags */
+
+ struct dmar_drhd_unit *drhd;
+ void *perf_statistic;
+};
+
+/* PCI domain-device relationship */
+struct device_domain_info {
+ struct list_head link; /* link to domain siblings */
+ u32 segment; /* PCI segment number */
+ u8 bus; /* PCI bus number */
+ u8 devfn; /* PCI devfn number */
+ u16 pfsid; /* SRIOV physical function source ID */
+ u8 pasid_supported:3;
+ u8 pasid_enabled:1;
+ u8 pri_supported:1;
+ u8 pri_enabled:1;
+ u8 ats_supported:1;
+ u8 ats_enabled:1;
+ u8 ats_qdep;
+ struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
+ struct intel_iommu *iommu; /* IOMMU used by this device */
+ struct dmar_domain *domain; /* pointer to domain */
+ struct pasid_table *pasid_table; /* pasid table */
+};
+
+static inline void __iommu_flush_cache(
+ struct intel_iommu *iommu, void *addr, int size)
+{
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(addr, size);
+}
+
+/* Convert generic struct iommu_domain to private struct dmar_domain */
+static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct dmar_domain, domain);
+}
+
+/* Retrieve the domain ID which has allocated to the domain */
+static inline u16
+domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
+{
+ struct iommu_domain_info *info =
+ xa_load(&domain->iommu_array, iommu->seq_id);
+
+ return info->did;
+}
+
+/*
+ * 0: readable
+ * 1: writable
+ * 2-6: reserved
+ * 7: super page
+ * 8-10: available
+ * 11: snoop behavior
+ * 12-63: Host physical address
+ */
+struct dma_pte {
+ u64 val;
+};
+
+static inline void dma_clear_pte(struct dma_pte *pte)
+{
+ pte->val = 0;
+}
+
+static inline u64 dma_pte_addr(struct dma_pte *pte)
+{
+#ifdef CONFIG_64BIT
+ return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
+#else
+ /* Must have a full atomic 64-bit read */
+ return __cmpxchg64(&pte->val, 0ULL, 0ULL) &
+ VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
+#endif
+}
+
+static inline bool dma_pte_present(struct dma_pte *pte)
+{
+ return (pte->val & 3) != 0;
+}
+
+static inline bool dma_pte_superpage(struct dma_pte *pte)
+{
+ return (pte->val & DMA_PTE_LARGE_PAGE);
+}
+
+static inline bool first_pte_in_page(struct dma_pte *pte)
+{
+ return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE);
+}
+
+static inline int nr_pte_to_next_page(struct dma_pte *pte)
+{
+ return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) :
+ (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
+}
+
+static inline bool context_present(struct context_entry *context)
+{
+ return (context->lo & 1);
+}
+
+extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
+
+extern int dmar_enable_qi(struct intel_iommu *iommu);
+extern void dmar_disable_qi(struct intel_iommu *iommu);
+extern int dmar_reenable_qi(struct intel_iommu *iommu);
+extern void qi_global_iec(struct intel_iommu *iommu);
+
+extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type);
+extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
+extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u16 qdep, u64 addr, unsigned mask);
+
+void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
+ unsigned long npages, bool ih);
+
+void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u32 pasid, u16 qdep, u64 addr,
+ unsigned int size_order);
+void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
+ u32 pasid);
+
+int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
+ unsigned int count, unsigned long options);
+/*
+ * Options used in qi_submit_sync:
+ * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
+ */
+#define QI_OPT_WAIT_DRAIN BIT(0)
+
+extern int dmar_ir_support(void);
+
+void *alloc_pgtable_page(int node);
+void free_pgtable_page(void *vaddr);
+void iommu_flush_write_buffer(struct intel_iommu *iommu);
+int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
+struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
+
+#ifdef CONFIG_INTEL_IOMMU_SVM
+extern void intel_svm_check(struct intel_iommu *iommu);
+extern int intel_svm_enable_prq(struct intel_iommu *iommu);
+extern int intel_svm_finish_prq(struct intel_iommu *iommu);
+struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
+ void *drvdata);
+void intel_svm_unbind(struct iommu_sva *handle);
+u32 intel_svm_get_pasid(struct iommu_sva *handle);
+int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
+ struct iommu_page_response *msg);
+
+struct intel_svm_dev {
+ struct list_head list;
+ struct rcu_head rcu;
+ struct device *dev;
+ struct intel_iommu *iommu;
+ struct iommu_sva sva;
+ unsigned long prq_seq_number;
+ u32 pasid;
+ int users;
+ u16 did;
+ u16 dev_iotlb:1;
+ u16 sid, qdep;
+};
+
+struct intel_svm {
+ struct mmu_notifier notifier;
+ struct mm_struct *mm;
+
+ unsigned int flags;
+ u32 pasid;
+ struct list_head devs;
+};
+#else
+static inline void intel_svm_check(struct intel_iommu *iommu) {}
+#endif
+
+#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
+void intel_iommu_debugfs_init(void);
+#else
+static inline void intel_iommu_debugfs_init(void) {}
+#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
+
+extern const struct attribute_group *intel_iommu_groups[];
+struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
+ u8 devfn, int alloc);
+
+extern const struct iommu_ops intel_iommu_ops;
+
+#ifdef CONFIG_INTEL_IOMMU
+extern int iommu_calculate_agaw(struct intel_iommu *iommu);
+extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
+extern int dmar_disabled;
+extern int intel_iommu_enabled;
+#else
+static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
+{
+ return 0;
+}
+static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
+{
+ return 0;
+}
+#define dmar_disabled (1)
+#define intel_iommu_enabled (0)
+#endif
+
+static inline const char *decode_prq_descriptor(char *str, size_t size,
+ u64 dw0, u64 dw1, u64 dw2, u64 dw3)
+{
+ char *buf = str;
+ int bytes;
+
+ bytes = snprintf(buf, size,
+ "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx",
+ FIELD_GET(GENMASK_ULL(31, 16), dw0),
+ FIELD_GET(GENMASK_ULL(63, 12), dw1),
+ dw1 & BIT_ULL(0) ? 'r' : '-',
+ dw1 & BIT_ULL(1) ? 'w' : '-',
+ dw0 & BIT_ULL(52) ? 'x' : '-',
+ dw0 & BIT_ULL(53) ? 'p' : '-',
+ dw1 & BIT_ULL(2) ? 'l' : '-',
+ FIELD_GET(GENMASK_ULL(51, 32), dw0),
+ FIELD_GET(GENMASK_ULL(11, 3), dw1));
+
+ /* Private Data */
+ if (dw0 & BIT_ULL(9)) {
+ size -= bytes;
+ buf += bytes;
+ snprintf(buf, size, " private=0x%llx/0x%llx\n", dw2, dw3);
+ }
+
+ return str;
+}
+
+#endif
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index a67319597884..2e9683e970f8 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -10,7 +10,6 @@
#include <linux/hpet.h>
#include <linux/pci.h>
#include <linux/irq.h>
-#include <linux/intel-iommu.h>
#include <linux/acpi.h>
#include <linux/irqdomain.h>
#include <linux/crash_dump.h>
@@ -21,6 +20,7 @@
#include <asm/irq_remapping.h>
#include <asm/pci-direct.h>
+#include "iommu.h"
#include "../irq_remapping.h"
#include "cap_audit.h"
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 17cad7c1f62d..c5e7e8b020a5 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -12,13 +12,13 @@
#include <linux/bitops.h>
#include <linux/cpufeature.h>
#include <linux/dmar.h>
-#include <linux/intel-iommu.h>
#include <linux/iommu.h>
#include <linux/memory.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/spinlock.h>
+#include "iommu.h"
#include "pasid.h"
/*
@@ -450,17 +450,17 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
struct pasid_entry *pte;
u16 did, pgtt;
+ spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
- if (WARN_ON(!pte))
- return;
-
- if (!pasid_pte_is_present(pte))
+ if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return;
+ }
did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
-
intel_pasid_clear_entry(dev, pasid, fault_ignore);
+ spin_unlock(&iommu->lock);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
@@ -496,22 +496,6 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
}
}
-static inline int pasid_enable_wpe(struct pasid_entry *pte)
-{
-#ifdef CONFIG_X86
- unsigned long cr0 = read_cr0();
-
- /* CR0.WP is normally set but just to be sure */
- if (unlikely(!(cr0 & X86_CR0_WP))) {
- pr_err_ratelimited("No CPU write protect!\n");
- return -EINVAL;
- }
-#endif
- pasid_set_wpe(pte);
-
- return 0;
-};
-
/*
* Set up the scalable mode pasid table entry for first only
* translation type.
@@ -528,39 +512,52 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
return -EINVAL;
}
- pte = intel_pasid_get_entry(dev, pasid);
- if (WARN_ON(!pte))
+ if (flags & PASID_FLAG_SUPERVISOR_MODE) {
+#ifdef CONFIG_X86
+ unsigned long cr0 = read_cr0();
+
+ /* CR0.WP is normally set but just to be sure */
+ if (unlikely(!(cr0 & X86_CR0_WP))) {
+ pr_err("No CPU write protect!\n");
+ return -EINVAL;
+ }
+#endif
+ if (!ecap_srs(iommu->ecap)) {
+ pr_err("No supervisor request support on %s\n",
+ iommu->name);
+ return -EINVAL;
+ }
+ }
+
+ if ((flags & PASID_FLAG_FL5LP) && !cap_5lp_support(iommu->cap)) {
+ pr_err("No 5-level paging support for first-level on %s\n",
+ iommu->name);
return -EINVAL;
+ }
- /* Caller must ensure PASID entry is not in use. */
- if (pasid_pte_is_present(pte))
+ spin_lock(&iommu->lock);
+ pte = intel_pasid_get_entry(dev, pasid);
+ if (!pte) {
+ spin_unlock(&iommu->lock);
+ return -ENODEV;
+ }
+
+ if (pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return -EBUSY;
+ }
pasid_clear_entry(pte);
/* Setup the first level page table pointer: */
pasid_set_flptr(pte, (u64)__pa(pgd));
if (flags & PASID_FLAG_SUPERVISOR_MODE) {
- if (!ecap_srs(iommu->ecap)) {
- pr_err("No supervisor request support on %s\n",
- iommu->name);
- return -EINVAL;
- }
pasid_set_sre(pte);
- if (pasid_enable_wpe(pte))
- return -EINVAL;
-
+ pasid_set_wpe(pte);
}
- if (flags & PASID_FLAG_FL5LP) {
- if (cap_5lp_support(iommu->cap)) {
- pasid_set_flpm(pte, 1);
- } else {
- pr_err("No 5-level paging support for first-level\n");
- pasid_clear_entry(pte);
- return -EINVAL;
- }
- }
+ if (flags & PASID_FLAG_FL5LP)
+ pasid_set_flpm(pte, 1);
if (flags & PASID_FLAG_PAGE_SNOOP)
pasid_set_pgsnp(pte);
@@ -572,6 +569,8 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
/* Setup Present and PASID Granular Transfer Type: */
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
pasid_set_present(pte);
+ spin_unlock(&iommu->lock);
+
pasid_flush_caches(iommu, pte, pasid, did);
return 0;
@@ -627,17 +626,19 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
}
pgd_val = virt_to_phys(pgd);
- did = domain->iommu_did[iommu->seq_id];
+ did = domain_id_iommu(domain, iommu);
+ spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
if (!pte) {
- dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
+ spin_unlock(&iommu->lock);
return -ENODEV;
}
- /* Caller must ensure PASID entry is not in use. */
- if (pasid_pte_is_present(pte))
+ if (pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return -EBUSY;
+ }
pasid_clear_entry(pte);
pasid_set_domain_id(pte, did);
@@ -654,6 +655,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
if (pasid != PASID_RID2PASID)
pasid_set_sre(pte);
pasid_set_present(pte);
+ spin_unlock(&iommu->lock);
+
pasid_flush_caches(iommu, pte, pasid, did);
return 0;
@@ -669,15 +672,17 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
u16 did = FLPT_DEFAULT_DID;
struct pasid_entry *pte;
+ spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
if (!pte) {
- dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
+ spin_unlock(&iommu->lock);
return -ENODEV;
}
- /* Caller must ensure PASID entry is not in use. */
- if (pasid_pte_is_present(pte))
+ if (pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return -EBUSY;
+ }
pasid_clear_entry(pte);
pasid_set_domain_id(pte, did);
@@ -692,6 +697,8 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
*/
pasid_set_sre(pte);
pasid_set_present(pte);
+ spin_unlock(&iommu->lock);
+
pasid_flush_caches(iommu, pte, pasid, did);
return 0;
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index bf5b937848b4..20c54e50f533 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -39,6 +39,7 @@
* only and pass-through transfer modes.
*/
#define FLPT_DEFAULT_DID 1
+#define NUM_RESERVED_DID 2
/*
* The SUPERVISOR_MODE flag indicates a first level translation which
diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c
index 0e8e03252d92..94ee70ac38e3 100644
--- a/drivers/iommu/intel/perf.c
+++ b/drivers/iommu/intel/perf.c
@@ -9,8 +9,8 @@
*/
#include <linux/spinlock.h>
-#include <linux/intel-iommu.h>
+#include "iommu.h"
#include "perf.h"
static DEFINE_SPINLOCK(latency_lock);
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 7ee37d996e15..8bcfb93dda56 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -5,7 +5,6 @@
* Authors: David Woodhouse <dwmw2@infradead.org>
*/
-#include <linux/intel-iommu.h>
#include <linux/mmu_notifier.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
@@ -21,11 +20,12 @@
#include <linux/ioasid.h>
#include <asm/page.h>
#include <asm/fpu/api.h>
-#include <trace/events/intel_iommu.h>
+#include "iommu.h"
#include "pasid.h"
#include "perf.h"
#include "../iommu-sva-lib.h"
+#include "trace.h"
static irqreturn_t prq_event_thread(int irq, void *d);
static void intel_svm_drain_prq(struct device *dev, u32 pasid);
@@ -328,9 +328,9 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
unsigned int flags)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- unsigned long iflags, sflags;
struct intel_svm_dev *sdev;
struct intel_svm *svm;
+ unsigned long sflags;
int ret = 0;
svm = pasid_private_find(mm->pasid);
@@ -394,11 +394,8 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
sflags = (flags & SVM_FLAG_SUPERVISOR_MODE) ?
PASID_FLAG_SUPERVISOR_MODE : 0;
sflags |= cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
- spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid,
FLPT_DEFAULT_DID, sflags);
- spin_unlock_irqrestore(&iommu->lock, iflags);
-
if (ret)
goto free_sdev;
@@ -544,7 +541,7 @@ static void intel_svm_drain_prq(struct device *dev, u32 pasid)
domain = info->domain;
pdev = to_pci_dev(dev);
sid = PCI_DEVID(info->bus, info->devfn);
- did = domain->iommu_did[iommu->seq_id];
+ did = domain_id_iommu(domain, iommu);
qdep = pci_ats_queue_depth(pdev);
/*
diff --git a/drivers/iommu/intel/trace.c b/drivers/iommu/intel/trace.c
index bfb6a6e37a88..117e626e3ea9 100644
--- a/drivers/iommu/intel/trace.c
+++ b/drivers/iommu/intel/trace.c
@@ -11,4 +11,4 @@
#include <linux/types.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/intel_iommu.h>
+#include "trace.h"
diff --git a/drivers/iommu/intel/trace.h b/drivers/iommu/intel/trace.h
new file mode 100644
index 000000000000..93d96f93a89b
--- /dev/null
+++ b/drivers/iommu/intel/trace.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel IOMMU trace support
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM intel_iommu
+
+#if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_INTEL_IOMMU_H
+
+#include <linux/tracepoint.h>
+
+#include "iommu.h"
+
+#define MSG_MAX 256
+
+TRACE_EVENT(qi_submit,
+ TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3),
+
+ TP_ARGS(iommu, qw0, qw1, qw2, qw3),
+
+ TP_STRUCT__entry(
+ __field(u64, qw0)
+ __field(u64, qw1)
+ __field(u64, qw2)
+ __field(u64, qw3)
+ __string(iommu, iommu->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(iommu, iommu->name);
+ __entry->qw0 = qw0;
+ __entry->qw1 = qw1;
+ __entry->qw2 = qw2;
+ __entry->qw3 = qw3;
+ ),
+
+ TP_printk("%s %s: 0x%llx 0x%llx 0x%llx 0x%llx",
+ __print_symbolic(__entry->qw0 & 0xf,
+ { QI_CC_TYPE, "cc_inv" },
+ { QI_IOTLB_TYPE, "iotlb_inv" },
+ { QI_DIOTLB_TYPE, "dev_tlb_inv" },
+ { QI_IEC_TYPE, "iec_inv" },
+ { QI_IWD_TYPE, "inv_wait" },
+ { QI_EIOTLB_TYPE, "p_iotlb_inv" },
+ { QI_PC_TYPE, "pc_inv" },
+ { QI_DEIOTLB_TYPE, "p_dev_tlb_inv" },
+ { QI_PGRP_RESP_TYPE, "page_grp_resp" }),
+ __get_str(iommu),
+ __entry->qw0, __entry->qw1, __entry->qw2, __entry->qw3
+ )
+);
+
+TRACE_EVENT(prq_report,
+ TP_PROTO(struct intel_iommu *iommu, struct device *dev,
+ u64 dw0, u64 dw1, u64 dw2, u64 dw3,
+ unsigned long seq),
+
+ TP_ARGS(iommu, dev, dw0, dw1, dw2, dw3, seq),
+
+ TP_STRUCT__entry(
+ __field(u64, dw0)
+ __field(u64, dw1)
+ __field(u64, dw2)
+ __field(u64, dw3)
+ __field(unsigned long, seq)
+ __string(iommu, iommu->name)
+ __string(dev, dev_name(dev))
+ __dynamic_array(char, buff, MSG_MAX)
+ ),
+
+ TP_fast_assign(
+ __entry->dw0 = dw0;
+ __entry->dw1 = dw1;
+ __entry->dw2 = dw2;
+ __entry->dw3 = dw3;
+ __entry->seq = seq;
+ __assign_str(iommu, iommu->name);
+ __assign_str(dev, dev_name(dev));
+ ),
+
+ TP_printk("%s/%s seq# %ld: %s",
+ __get_str(iommu), __get_str(dev), __entry->seq,
+ decode_prq_descriptor(__get_str(buff), MSG_MAX, __entry->dw0,
+ __entry->dw1, __entry->dw2, __entry->dw3)
+ )
+);
+#endif /* _TRACE_INTEL_IOMMU_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/iommu/intel/
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index be066c1503d3..ba3115fd0f86 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -182,14 +182,8 @@ static bool arm_v7s_is_mtk_enabled(struct io_pgtable_cfg *cfg)
(cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT);
}
-static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
- struct io_pgtable_cfg *cfg)
+static arm_v7s_iopte to_mtk_iopte(phys_addr_t paddr, arm_v7s_iopte pte)
{
- arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
-
- if (!arm_v7s_is_mtk_enabled(cfg))
- return pte;
-
if (paddr & BIT_ULL(32))
pte |= ARM_V7S_ATTR_MTK_PA_BIT32;
if (paddr & BIT_ULL(33))
@@ -199,6 +193,17 @@ static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
return pte;
}
+static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
+ struct io_pgtable_cfg *cfg)
+{
+ arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
+
+ if (arm_v7s_is_mtk_enabled(cfg))
+ return to_mtk_iopte(paddr, pte);
+
+ return pte;
+}
+
static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl,
struct io_pgtable_cfg *cfg)
{
@@ -240,10 +245,17 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
dma_addr_t dma;
size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg);
void *table = NULL;
+ gfp_t gfp_l1;
+
+ /*
+ * ARM_MTK_TTBR_EXT extend the translation table base support larger
+ * memory address.
+ */
+ gfp_l1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
+ GFP_KERNEL : ARM_V7S_TABLE_GFP_DMA;
if (lvl == 1)
- table = (void *)__get_free_pages(
- __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
+ table = (void *)__get_free_pages(gfp_l1 | __GFP_ZERO, get_order(size));
else if (lvl == 2)
table = kmem_cache_zalloc(data->l2_tables, gfp);
@@ -251,7 +263,8 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
return NULL;
phys = virt_to_phys(table);
- if (phys != (arm_v7s_iopte)phys) {
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
+ phys >= (1ULL << cfg->oas) : phys != (arm_v7s_iopte)phys) {
/* Doesn't fit in PTE */
dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
goto out_free;
@@ -457,9 +470,14 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
arm_v7s_iopte curr,
struct io_pgtable_cfg *cfg)
{
+ phys_addr_t phys = virt_to_phys(table);
arm_v7s_iopte old, new;
- new = virt_to_phys(table) | ARM_V7S_PTE_TYPE_TABLE;
+ new = phys | ARM_V7S_PTE_TYPE_TABLE;
+
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT)
+ new = to_mtk_iopte(phys, new);
+
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
new |= ARM_V7S_ATTR_NS_TABLE;
@@ -779,6 +797,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
void *cookie)
{
struct arm_v7s_io_pgtable *data;
+ slab_flags_t slab_flag;
+ phys_addr_t paddr;
if (cfg->ias > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
return NULL;
@@ -788,7 +808,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
- IO_PGTABLE_QUIRK_ARM_MTK_EXT))
+ IO_PGTABLE_QUIRK_ARM_MTK_EXT |
+ IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT))
return NULL;
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
@@ -796,15 +817,27 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
!(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
return NULL;
+ if ((cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT) &&
+ !arm_v7s_is_mtk_enabled(cfg))
+ return NULL;
+
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
spin_lock_init(&data->split_lock);
+
+ /*
+ * ARM_MTK_TTBR_EXT extend the translation table base support larger
+ * memory address.
+ */
+ slab_flag = cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
+ 0 : ARM_V7S_TABLE_SLAB_FLAGS;
+
data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
ARM_V7S_TABLE_SIZE(2, cfg),
ARM_V7S_TABLE_SIZE(2, cfg),
- ARM_V7S_TABLE_SLAB_FLAGS, NULL);
+ slab_flag, NULL);
if (!data->l2_tables)
goto out_free_data;
@@ -850,12 +883,16 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
wmb();
/* TTBR */
- cfg->arm_v7s_cfg.ttbr = virt_to_phys(data->pgd) | ARM_V7S_TTBR_S |
- (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
- ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
- ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
- (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
- ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
+ paddr = virt_to_phys(data->pgd);
+ if (arm_v7s_is_mtk_enabled(cfg))
+ cfg->arm_v7s_cfg.ttbr = paddr | upper_32_bits(paddr);
+ else
+ cfg->arm_v7s_cfg.ttbr = paddr | ARM_V7S_TTBR_S |
+ (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
+ ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
+ (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
return &data->iop;
out_free_data:
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 847ad47a2dfd..3a808146b50f 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -259,7 +259,8 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
return 0;
out_release:
- ops->release_device(dev);
+ if (ops->release_device)
+ ops->release_device(dev);
out_module_put:
module_put(ops->owner);
@@ -272,7 +273,7 @@ err_free:
int iommu_probe_device(struct device *dev)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops;
struct iommu_group *group;
int ret;
@@ -313,6 +314,7 @@ int iommu_probe_device(struct device *dev)
mutex_unlock(&group->mutex);
iommu_group_put(group);
+ ops = dev_iommu_ops(dev);
if (ops->probe_finalize)
ops->probe_finalize(dev);
@@ -336,7 +338,8 @@ void iommu_release_device(struct device *dev)
iommu_device_unlink(dev->iommu->iommu_dev, dev);
ops = dev_iommu_ops(dev);
- ops->release_device(dev);
+ if (ops->release_device)
+ ops->release_device(dev);
iommu_group_remove_device(dev);
module_put(ops->owner);
@@ -600,7 +603,7 @@ static void iommu_group_release(struct kobject *kobj)
if (group->iommu_data_release)
group->iommu_data_release(group->iommu_data);
- ida_simple_remove(&iommu_group_ida, group->id);
+ ida_free(&iommu_group_ida, group->id);
if (group->default_domain)
iommu_domain_free(group->default_domain);
@@ -641,7 +644,7 @@ struct iommu_group *iommu_group_alloc(void)
INIT_LIST_HEAD(&group->devices);
INIT_LIST_HEAD(&group->entry);
- ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&iommu_group_ida, GFP_KERNEL);
if (ret < 0) {
kfree(group);
return ERR_PTR(ret);
@@ -651,7 +654,7 @@ struct iommu_group *iommu_group_alloc(void)
ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
NULL, "%d", group->id);
if (ret) {
- ida_simple_remove(&iommu_group_ida, group->id);
+ ida_free(&iommu_group_ida, group->id);
kobject_put(&group->kobj);
return ERR_PTR(ret);
}
@@ -2457,6 +2460,9 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
len = 0;
}
+ if (sg_is_dma_bus_address(sg))
+ goto next;
+
if (len) {
len += sg->length;
} else {
@@ -2464,6 +2470,7 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
start = s_phys;
}
+next:
if (++i < nents)
sg = sg_next(sg);
}
@@ -2576,32 +2583,25 @@ void iommu_get_resv_regions(struct device *dev, struct list_head *list)
ops->get_resv_regions(dev, list);
}
-void iommu_put_resv_regions(struct device *dev, struct list_head *list)
-{
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->put_resv_regions)
- ops->put_resv_regions(dev, list);
-}
-
/**
- * generic_iommu_put_resv_regions - Reserved region driver helper
+ * iommu_put_resv_regions - release resered regions
* @dev: device for which to free reserved regions
* @list: reserved region list for device
*
- * IOMMU drivers can use this to implement their .put_resv_regions() callback
- * for simple reservations. Memory allocated for each reserved region will be
- * freed. If an IOMMU driver allocates additional resources per region, it is
- * going to have to implement a custom callback.
+ * This releases a reserved region list acquired by iommu_get_resv_regions().
*/
-void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
+void iommu_put_resv_regions(struct device *dev, struct list_head *list)
{
struct iommu_resv_region *entry, *next;
- list_for_each_entry_safe(entry, next, list, list)
- kfree(entry);
+ list_for_each_entry_safe(entry, next, list, list) {
+ if (entry->free)
+ entry->free(dev, entry);
+ else
+ kfree(entry);
+ }
}
-EXPORT_SYMBOL(generic_iommu_put_resv_regions);
+EXPORT_SYMBOL(iommu_put_resv_regions);
struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
size_t length, int prot,
@@ -2751,19 +2751,6 @@ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
}
EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
-bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
-{
- if (dev->iommu && dev->iommu->iommu_dev) {
- const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
-
- if (ops->dev_feat_enabled)
- return ops->dev_feat_enabled(dev, feat);
- }
-
- return false;
-}
-EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
-
/**
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
@@ -3089,6 +3076,24 @@ out:
return ret;
}
+static bool iommu_is_default_domain(struct iommu_group *group)
+{
+ if (group->domain == group->default_domain)
+ return true;
+
+ /*
+ * If the default domain was set to identity and it is still an identity
+ * domain then we consider this a pass. This happens because of
+ * amd_iommu_init_device() replacing the default idenytity domain with an
+ * identity domain that has a different configuration for AMDGPU.
+ */
+ if (group->default_domain &&
+ group->default_domain->type == IOMMU_DOMAIN_IDENTITY &&
+ group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY)
+ return true;
+ return false;
+}
+
/**
* iommu_device_use_default_domain() - Device driver wants to handle device
* DMA through the kernel DMA API.
@@ -3107,8 +3112,7 @@ int iommu_device_use_default_domain(struct device *dev)
mutex_lock(&group->mutex);
if (group->owner_cnt) {
- if (group->domain != group->default_domain ||
- group->owner) {
+ if (group->owner || !iommu_is_default_domain(group)) {
ret = -EBUSY;
goto unlock_out;
}
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index db77aa675145..47d1983dfa2a 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -26,6 +26,11 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
+unsigned long iova_rcache_range(void)
+{
+ return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
+}
+
static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
{
struct iova_domain *iovad;
@@ -614,7 +619,12 @@ EXPORT_SYMBOL_GPL(reserve_iova);
* dynamic size tuning described in the paper.
*/
-#define IOVA_MAG_SIZE 128
+/*
+ * As kmalloc's buffer size is fixed to power of 2, 127 is chosen to
+ * assure size of 'iova_magazine' to be 1024 bytes, so that no memory
+ * will be wasted.
+ */
+#define IOVA_MAG_SIZE 127
#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
struct iova_magazine {
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index f09aedfdd462..6a24aa804ea3 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -394,10 +394,6 @@ static struct iommu_device *msm_iommu_probe_device(struct device *dev)
return &iommu->iommu;
}
-static void msm_iommu_release_device(struct device *dev)
-{
-}
-
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
@@ -603,7 +599,7 @@ static int insert_iommu_master(struct device *dev,
for (sid = 0; sid < master->num_mids; sid++)
if (master->mids[sid] == spec->args[0]) {
- dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
+ dev_warn(dev, "Stream ID 0x%x repeated; ignoring\n",
sid);
return 0;
}
@@ -677,7 +673,6 @@ fail:
static struct iommu_ops msm_iommu_ops = {
.domain_alloc = msm_iommu_domain_alloc,
.probe_device = msm_iommu_probe_device,
- .release_device = msm_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index bb9dd92c9898..7e363b1f24df 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -34,7 +34,6 @@
#include <dt-bindings/memory/mtk-memory-port.h>
#define REG_MMU_PT_BASE_ADDR 0x000
-#define MMU_PT_ADDR_MASK GENMASK(31, 7)
#define REG_MMU_INVALIDATE 0x020
#define F_ALL_INVLD 0x2
@@ -138,6 +137,7 @@
/* PM and clock always on. e.g. infra iommu */
#define PM_CLK_AO BIT(15)
#define IFA_IOMMU_PCIE_SUPPORT BIT(16)
+#define PGTABLE_PA_35_EN BIT(17)
#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \
((((pdata)->flags) & (mask)) == (_x))
@@ -596,6 +596,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
.iommu_dev = data->dev,
};
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN))
+ dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT;
+
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
dom->cfg.oas = data->enable_4GB ? 33 : 32;
else
@@ -684,8 +687,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
goto err_unlock;
}
bank->m4u_dom = dom;
- writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
- bank->base + REG_MMU_PT_BASE_ADDR);
+ writel(dom->cfg.arm_v7s_cfg.ttbr, bank->base + REG_MMU_PT_BASE_ADDR);
pm_runtime_put(m4udev);
}
@@ -819,17 +821,12 @@ static void mtk_iommu_release_device(struct device *dev)
struct device *larbdev;
unsigned int larbid;
- if (!fwspec || fwspec->ops != &mtk_iommu_ops)
- return;
-
data = dev_iommu_priv_get(dev);
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
larbdev = data->larb_imu[larbid].dev;
device_link_remove(dev, larbdev);
}
-
- iommu_fwspec_free(dev);
}
static int mtk_iommu_get_group_id(struct device *dev, const struct mtk_iommu_plat_data *plat_data)
@@ -933,7 +930,6 @@ static const struct iommu_ops mtk_iommu_ops = {
.device_group = mtk_iommu_device_group,
.of_xlate = mtk_iommu_of_xlate,
.get_resv_regions = mtk_iommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
@@ -1140,22 +1136,32 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) {
- switch (data->plat_data->m4u_plat) {
- case M4U_MT2712:
- p = "mediatek,mt2712-infracfg";
- break;
- case M4U_MT8173:
- p = "mediatek,mt8173-infracfg";
- break;
- default:
- p = NULL;
+ infracfg = syscon_regmap_lookup_by_phandle(dev->of_node, "mediatek,infracfg");
+ if (IS_ERR(infracfg)) {
+ /*
+ * Legacy devicetrees will not specify a phandle to
+ * mediatek,infracfg: in that case, we use the older
+ * way to retrieve a syscon to infra.
+ *
+ * This is for retrocompatibility purposes only, hence
+ * no more compatibles shall be added to this.
+ */
+ switch (data->plat_data->m4u_plat) {
+ case M4U_MT2712:
+ p = "mediatek,mt2712-infracfg";
+ break;
+ case M4U_MT8173:
+ p = "mediatek,mt8173-infracfg";
+ break;
+ default:
+ p = NULL;
+ }
+
+ infracfg = syscon_regmap_lookup_by_compatible(p);
+ if (IS_ERR(infracfg))
+ return PTR_ERR(infracfg);
}
- infracfg = syscon_regmap_lookup_by_compatible(p);
-
- if (IS_ERR(infracfg))
- return PTR_ERR(infracfg);
-
ret = regmap_read(infracfg, REG_INFRA_MISC, &val);
if (ret)
return ret;
@@ -1204,18 +1210,16 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
ret = mtk_iommu_mm_dts_parse(dev, &match, data);
if (ret) {
- dev_err(dev, "mm dts parse fail(%d).", ret);
+ dev_err_probe(dev, ret, "mm dts parse fail\n");
goto out_runtime_disable;
}
- } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
- data->plat_data->pericfg_comp_str) {
- infracfg = syscon_regmap_lookup_by_compatible(data->plat_data->pericfg_comp_str);
- if (IS_ERR(infracfg)) {
- ret = PTR_ERR(infracfg);
+ } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
+ p = data->plat_data->pericfg_comp_str;
+ data->pericfg = syscon_regmap_lookup_by_compatible(p);
+ if (IS_ERR(data->pericfg)) {
+ ret = PTR_ERR(data->pericfg);
goto out_runtime_disable;
}
-
- data->pericfg = infracfg;
}
platform_set_drvdata(pdev, data);
@@ -1366,8 +1370,7 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
writel_relaxed(reg->int_control[i], base + REG_MMU_INT_CONTROL0);
writel_relaxed(reg->int_main_control[i], base + REG_MMU_INT_MAIN_CONTROL);
writel_relaxed(reg->ivrp_paddr[i], base + REG_MMU_IVRP_PADDR);
- writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
- base + REG_MMU_PT_BASE_ADDR);
+ writel(m4u_dom->cfg.arm_v7s_cfg.ttbr, base + REG_MMU_PT_BASE_ADDR);
} while (++i < data->plat_data->banks_num);
/*
@@ -1401,7 +1404,7 @@ static const struct mtk_iommu_plat_data mt2712_data = {
static const struct mtk_iommu_plat_data mt6779_data = {
.m4u_plat = M4U_MT6779,
.flags = HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | WR_THROT_EN |
- MTK_IOMMU_TYPE_MM,
+ MTK_IOMMU_TYPE_MM | PGTABLE_PA_35_EN,
.inv_sel_reg = REG_MMU_INV_SEL_GEN2,
.banks_num = 1,
.banks_enable = {true},
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index e1cb51b9866c..128c7a3f1778 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -532,15 +532,10 @@ static void mtk_iommu_v1_release_device(struct device *dev)
struct device *larbdev;
unsigned int larbid;
- if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
- return;
-
data = dev_iommu_priv_get(dev);
larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
larbdev = data->larb_imu[larbid].dev;
device_link_remove(dev, larbdev);
-
- iommu_fwspec_free(dev);
}
static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 41f4eb005219..5696314ae69e 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -40,7 +40,7 @@ static int of_iommu_xlate(struct device *dev,
* a proper probe-ordering dependency mechanism in future.
*/
if (!ops)
- return -ENODEV;
+ return driver_deferred_probe_check_state(dev);
if (!try_module_get(ops->owner))
return -ENODEV;
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index bd409bab6286..511959c8a14d 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -383,16 +383,6 @@ static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
return &sdev->iommu;
}
-static void sprd_iommu_release_device(struct device *dev)
-{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-
- if (!fwspec || fwspec->ops != &sprd_iommu_ops)
- return;
-
- iommu_fwspec_free(dev);
-}
-
static struct iommu_group *sprd_iommu_device_group(struct device *dev)
{
struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
@@ -417,7 +407,6 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static const struct iommu_ops sprd_iommu_ops = {
.domain_alloc = sprd_iommu_domain_alloc,
.probe_device = sprd_iommu_probe_device,
- .release_device = sprd_iommu_release_device,
.device_group = sprd_iommu_device_group,
.of_xlate = sprd_iommu_of_xlate,
.pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT,
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index c54ab477b8fd..a84c63518773 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -738,8 +738,6 @@ static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
return &iommu->iommu;
}
-static void sun50i_iommu_release_device(struct device *dev) {}
-
static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
{
struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
@@ -764,7 +762,6 @@ static const struct iommu_ops sun50i_iommu_ops = {
.domain_alloc = sun50i_iommu_domain_alloc,
.of_xlate = sun50i_iommu_of_xlate,
.probe_device = sun50i_iommu_probe_device,
- .release_device = sun50i_iommu_release_device,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = sun50i_iommu_attach_device,
.detach_dev = sun50i_iommu_detach_device,
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index a6700a40a6f8..e5ca3cf1a949 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -246,10 +246,6 @@ static struct iommu_device *gart_iommu_probe_device(struct device *dev)
return &gart_handle->iommu;
}
-static void gart_iommu_release_device(struct device *dev)
-{
-}
-
static int gart_iommu_of_xlate(struct device *dev,
struct of_phandle_args *args)
{
@@ -273,7 +269,6 @@ static void gart_iommu_sync(struct iommu_domain *domain,
static const struct iommu_ops gart_iommu_ops = {
.domain_alloc = gart_iommu_domain_alloc,
.probe_device = gart_iommu_probe_device,
- .release_device = gart_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
.of_xlate = gart_iommu_of_xlate,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 1fea68e551f1..2a8de975fe63 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -864,8 +864,6 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
return &smmu->iommu;
}
-static void tegra_smmu_release_device(struct device *dev) {}
-
static const struct tegra_smmu_group_soc *
tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
{
@@ -966,7 +964,6 @@ static int tegra_smmu_of_xlate(struct device *dev,
static const struct iommu_ops tegra_smmu_ops = {
.domain_alloc = tegra_smmu_domain_alloc,
.probe_device = tegra_smmu_probe_device,
- .release_device = tegra_smmu_release_device,
.device_group = tegra_smmu_device_group,
.of_xlate = tegra_smmu_of_xlate,
.pgsize_bitmap = SZ_4K,
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 25be4b822aa0..80151176ba12 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -788,11 +788,13 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
return 0;
}
-static int viommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
int ret;
u32 flags;
+ size_t size = pgsize * pgcount;
u64 end = iova + size - 1;
struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain);
@@ -823,17 +825,21 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
if (ret)
viommu_del_mappings(vdomain, iova, end);
+ else if (mapped)
+ *mapped = size;
return ret;
}
-static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
int ret = 0;
size_t unmapped;
struct virtio_iommu_req_unmap unmap;
struct viommu_domain *vdomain = to_viommu_domain(domain);
+ size_t size = pgsize * pgcount;
unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
if (unmapped < size)
@@ -964,7 +970,7 @@ static struct iommu_device *viommu_probe_device(struct device *dev)
return &viommu->iommu;
err_free_dev:
- generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
+ iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev);
return ERR_PTR(ret);
@@ -981,15 +987,9 @@ static void viommu_probe_finalize(struct device *dev)
static void viommu_release_device(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct viommu_endpoint *vdev;
-
- if (!fwspec || fwspec->ops != &viommu_ops)
- return;
-
- vdev = dev_iommu_priv_get(dev);
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
- generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
+ iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev);
}
@@ -1006,20 +1006,30 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1);
}
+static bool viommu_capable(enum iommu_cap cap)
+{
+ switch (cap) {
+ case IOMMU_CAP_CACHE_COHERENCY:
+ return true;
+ default:
+ return false;
+ }
+}
+
static struct iommu_ops viommu_ops = {
+ .capable = viommu_capable,
.domain_alloc = viommu_domain_alloc,
.probe_device = viommu_probe_device,
.probe_finalize = viommu_probe_finalize,
.release_device = viommu_release_device,
.device_group = viommu_device_group,
.get_resv_regions = viommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.of_xlate = viommu_of_xlate,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = viommu_attach_dev,
- .map = viommu_map,
- .unmap = viommu_unmap,
+ .map_pages = viommu_map_pages,
+ .unmap_pages = viommu_unmap_pages,
.iova_to_phys = viommu_iova_to_phys,
.iotlb_sync = viommu_iotlb_sync,
.free = viommu_domain_free,
diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
index 327f3ab62c03..741612ba6a52 100644
--- a/drivers/irqchip/irq-loongarch-cpu.c
+++ b/drivers/irqchip/irq-loongarch-cpu.c
@@ -129,7 +129,7 @@ static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
clear_csr_ecfg(ECFG0_IM);
clear_csr_estat(ESTATF_IP);
- cpuintc_handle = irq_domain_alloc_fwnode(NULL);
+ cpuintc_handle = irq_domain_alloc_named_fwnode("CPUINTC");
irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM,
&loongarch_cpu_intc_irq_domain_ops, NULL);
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index 80d8ca6f2d46..16e9af8d8b1e 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -111,11 +111,15 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af
regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
/* Mask target vector */
- csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), 0x0, 0);
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)),
+ 0x0, priv->node * CORES_PER_EIO_NODE);
+
/* Set route for target vector */
eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
+
/* Unmask target vector */
- csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 0x0, 0);
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
+ 0x0, priv->node * CORES_PER_EIO_NODE);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -286,7 +290,7 @@ static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi
}
}
-struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
+static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
{
int i;
@@ -344,7 +348,8 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
if (!priv)
return -ENOMEM;
- priv->domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_eiointc);
+ priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC",
+ acpi_eiointc->node);
if (!priv->domain_handle) {
pr_err("Unable to allocate domain handle\n");
goto out_free_priv;
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index c4f3c886ad61..0da8716f8f24 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -207,7 +207,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
"reg-names", core_reg_names[i]);
if (index < 0)
- return -EINVAL;
+ goto out_iounmap;
priv->core_isr[i] = of_iomap(node, index);
}
@@ -360,7 +360,7 @@ int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic
parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_liointc);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index d0e8551bebfa..a72ede90ffc6 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -282,7 +282,7 @@ int __init pch_msi_acpi_init(struct irq_domain *parent,
int ret;
struct fwnode_handle *domain_handle;
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchmsi);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_pchmsi->msg_address);
ret = pch_msi_init(acpi_pchmsi->msg_address, acpi_pchmsi->start,
acpi_pchmsi->count, parent, domain_handle);
if (ret < 0)
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index b6f1392964b1..c01b9c257005 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -48,25 +48,6 @@ static struct pch_pic *pch_pic_priv[MAX_IO_PICS];
struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
-int find_pch_pic(u32 gsi)
-{
- int i;
-
- /* Find the PCH_PIC that manages this GSI. */
- for (i = 0; i < MAX_IO_PICS; i++) {
- struct pch_pic *priv = pch_pic_priv[i];
-
- if (!priv)
- return -1;
-
- if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
- return i;
- }
-
- pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
- return -1;
-}
-
static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
{
u32 reg;
@@ -325,6 +306,25 @@ IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init);
#endif
#ifdef CONFIG_ACPI
+int find_pch_pic(u32 gsi)
+{
+ int i;
+
+ /* Find the PCH_PIC that manages this GSI. */
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ struct pch_pic *priv = pch_pic_priv[i];
+
+ if (!priv)
+ return -1;
+
+ if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
+ return i;
+ }
+
+ pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
+ return -1;
+}
+
static int __init
pch_lpc_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
@@ -349,7 +349,7 @@ int __init pch_pic_acpi_init(struct irq_domain *parent,
vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ;
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchpic);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index b65bd8878d4f..499e5f81b3fe 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -95,10 +95,11 @@ static const struct irq_domain_ops riscv_intc_domain_ops = {
static int __init riscv_intc_init(struct device_node *node,
struct device_node *parent)
{
- int rc, hartid;
+ int rc;
+ unsigned long hartid;
- hartid = riscv_of_parent_hartid(node);
- if (hartid < 0) {
+ rc = riscv_of_parent_hartid(node, &hartid);
+ if (rc < 0) {
pr_warn("unable to find hart id for %pOF\n", node);
return 0;
}
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index ba4938188570..2f4784860df5 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -374,7 +374,8 @@ static int __init __plic_init(struct device_node *node,
for (i = 0; i < nr_contexts; i++) {
struct of_phandle_args parent;
irq_hw_number_t hwirq;
- int cpu, hartid;
+ int cpu;
+ unsigned long hartid;
if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i);
@@ -398,8 +399,8 @@ static int __init __plic_init(struct device_node *node,
continue;
}
- hartid = riscv_of_parent_hartid(parent.np);
- if (hartid < 0) {
+ error = riscv_of_parent_hartid(parent.np, &hartid);
+ if (error < 0) {
pr_warn("failed to parse hart ID for context %d.\n", i);
continue;
}
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index e1f771c72fc4..ad3e2c1b3c87 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -148,10 +148,10 @@ static int tegra_ictlr_suspend(void)
lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS);
/* Disable COP interrupts */
- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
/* Disable CPU interrupts */
- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
/* Enable the wakeup sources of ictlr */
writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET);
@@ -172,12 +172,12 @@ static void tegra_ictlr_resume(void)
writel_relaxed(lic->cpu_iep[i],
ictlr + ICTLR_CPU_IEP_CLASS);
- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
writel_relaxed(lic->cpu_ier[i],
ictlr + ICTLR_CPU_IER_SET);
writel_relaxed(lic->cop_iep[i],
ictlr + ICTLR_COP_IEP_CLASS);
- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
writel_relaxed(lic->cop_ier[i],
ictlr + ICTLR_COP_IER_SET);
}
@@ -312,7 +312,7 @@ static int __init tegra_ictlr_init(struct device_node *node,
lic->base[i] = base;
/* Disable all interrupts */
- writel_relaxed(~0UL, base + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), base + ICTLR_CPU_IER_CLR);
/* All interrupts target IRQ */
writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a49979f41eee..499d0f215a8b 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -447,16 +447,16 @@ config LEDS_LP8860
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
- depends on LEDS_CLASS
+ depends on LEDS_CLASS && BROKEN
depends on X86 && SERIO_I8042 && DMI
help
This driver makes the mail LED accessible from userspace
- programs through the leds subsystem. This LED have three
- known mode: off, blink at 0.5Hz and blink at 1Hz.
+ programs through the LEDs subsystem. This LED has three
+ known modes: off, blink at 0.5Hz and blink at 1Hz.
The driver supports two kinds of interface: using ledtrig-timer
or through /sys/class/leds/clevo::mail/brightness. As this LED
- cannot change it's brightness it blinks instead. The brightness
+ cannot change its brightness it blinks instead. The brightness
value 0 means off, 1..127 means blink at 0.5Hz and 128..255 means
blink at 1Hz.
@@ -697,7 +697,7 @@ config LEDS_MENF21BMC
config LEDS_IS31FL319X
tristate "LED Support for ISSI IS31FL319x I2C LED controller family"
- depends on LEDS_CLASS && I2C && OF
+ depends on LEDS_CLASS && I2C
select REGMAP_I2C
help
This option enables support for LEDs connected to ISSI IS31FL319x
diff --git a/drivers/leds/blink/Kconfig b/drivers/leds/blink/Kconfig
index 59ba81e40e85..945c84286a4e 100644
--- a/drivers/leds/blink/Kconfig
+++ b/drivers/leds/blink/Kconfig
@@ -1,3 +1,17 @@
+config LEDS_BCM63138
+ tristate "LED Support for Broadcom BCM63138 SoC"
+ depends on LEDS_CLASS
+ depends on ARCH_BCM4908 || ARCH_BCM_5301X || BCM63XX || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ default ARCH_BCM4908
+ help
+ This option enables support for LED controller that is part of
+ BCM63138 SoC. The same hardware block is known to be also used
+ in BCM4908, BCM6848, BCM6858, BCM63148, BCM63381 and BCM68360.
+
+ If compiled as module it will be called leds-bcm63138.
+
config LEDS_LGM
tristate "LED support for LGM SoC series"
depends on X86 || COMPILE_TEST
diff --git a/drivers/leds/blink/Makefile b/drivers/leds/blink/Makefile
index fa5d04dccf13..447029f4153a 100644
--- a/drivers/leds/blink/Makefile
+++ b/drivers/leds/blink/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LEDS_BCM63138) += leds-bcm63138.o
obj-$(CONFIG_LEDS_LGM) += leds-lgm-sso.o
diff --git a/drivers/leds/blink/leds-bcm63138.c b/drivers/leds/blink/leds-bcm63138.c
new file mode 100644
index 000000000000..2cf2761e4914
--- /dev/null
+++ b/drivers/leds/blink/leds-bcm63138.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define BCM63138_MAX_LEDS 32
+#define BCM63138_MAX_BRIGHTNESS 9
+
+#define BCM63138_LED_BITS 4 /* how many bits control a single LED */
+#define BCM63138_LED_MASK ((1 << BCM63138_LED_BITS) - 1) /* 0xf */
+#define BCM63138_LEDS_PER_REG (32 / BCM63138_LED_BITS) /* 8 */
+
+#define BCM63138_GLB_CTRL 0x00
+#define BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL 0x00000002
+#define BCM63138_GLB_CTRL_SERIAL_LED_EN_POL 0x00000008
+#define BCM63138_MASK 0x04
+#define BCM63138_HW_LED_EN 0x08
+#define BCM63138_SERIAL_LED_SHIFT_SEL 0x0c
+#define BCM63138_FLASH_RATE_CTRL1 0x10
+#define BCM63138_FLASH_RATE_CTRL2 0x14
+#define BCM63138_FLASH_RATE_CTRL3 0x18
+#define BCM63138_FLASH_RATE_CTRL4 0x1c
+#define BCM63138_BRIGHT_CTRL1 0x20
+#define BCM63138_BRIGHT_CTRL2 0x24
+#define BCM63138_BRIGHT_CTRL3 0x28
+#define BCM63138_BRIGHT_CTRL4 0x2c
+#define BCM63138_POWER_LED_CFG 0x30
+#define BCM63138_HW_POLARITY 0xb4
+#define BCM63138_SW_DATA 0xb8
+#define BCM63138_SW_POLARITY 0xbc
+#define BCM63138_PARALLEL_LED_POLARITY 0xc0
+#define BCM63138_SERIAL_LED_POLARITY 0xc4
+#define BCM63138_HW_LED_STATUS 0xc8
+#define BCM63138_FLASH_CTRL_STATUS 0xcc
+#define BCM63138_FLASH_BRT_CTRL 0xd0
+#define BCM63138_FLASH_P_LED_OUT_STATUS 0xd4
+#define BCM63138_FLASH_S_LED_OUT_STATUS 0xd8
+
+struct bcm63138_leds {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t lock;
+};
+
+struct bcm63138_led {
+ struct bcm63138_leds *leds;
+ struct led_classdev cdev;
+ u32 pin;
+ bool active_low;
+};
+
+/*
+ * I/O access
+ */
+
+static void bcm63138_leds_write(struct bcm63138_leds *leds, unsigned int reg,
+ u32 data)
+{
+ writel(data, leds->base + reg);
+}
+
+static unsigned long bcm63138_leds_read(struct bcm63138_leds *leds,
+ unsigned int reg)
+{
+ return readl(leds->base + reg);
+}
+
+static void bcm63138_leds_update_bits(struct bcm63138_leds *leds,
+ unsigned int reg, u32 mask, u32 val)
+{
+ WARN_ON(val & ~mask);
+
+ bcm63138_leds_write(leds, reg, (bcm63138_leds_read(leds, reg) & ~mask) | (val & mask));
+}
+
+/*
+ * Helpers
+ */
+
+static void bcm63138_leds_set_flash_rate(struct bcm63138_leds *leds,
+ struct bcm63138_led *led,
+ u8 value)
+{
+ int reg_offset = (led->pin >> fls((BCM63138_LEDS_PER_REG - 1))) * 4;
+ int shift = (led->pin & (BCM63138_LEDS_PER_REG - 1)) * BCM63138_LED_BITS;
+
+ bcm63138_leds_update_bits(leds, BCM63138_FLASH_RATE_CTRL1 + reg_offset,
+ BCM63138_LED_MASK << shift, value << shift);
+}
+
+static void bcm63138_leds_set_bright(struct bcm63138_leds *leds,
+ struct bcm63138_led *led,
+ u8 value)
+{
+ int reg_offset = (led->pin >> fls((BCM63138_LEDS_PER_REG - 1))) * 4;
+ int shift = (led->pin & (BCM63138_LEDS_PER_REG - 1)) * BCM63138_LED_BITS;
+
+ bcm63138_leds_update_bits(leds, BCM63138_BRIGHT_CTRL1 + reg_offset,
+ BCM63138_LED_MASK << shift, value << shift);
+}
+
+static void bcm63138_leds_enable_led(struct bcm63138_leds *leds,
+ struct bcm63138_led *led,
+ enum led_brightness value)
+{
+ u32 bit = BIT(led->pin);
+
+ bcm63138_leds_update_bits(leds, BCM63138_SW_DATA, bit, value ? bit : 0);
+}
+
+/*
+ * API callbacks
+ */
+
+static void bcm63138_leds_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct bcm63138_led *led = container_of(led_cdev, struct bcm63138_led, cdev);
+ struct bcm63138_leds *leds = led->leds;
+ unsigned long flags;
+
+ spin_lock_irqsave(&leds->lock, flags);
+
+ bcm63138_leds_enable_led(leds, led, value);
+ if (!value)
+ bcm63138_leds_set_flash_rate(leds, led, 0);
+ else
+ bcm63138_leds_set_bright(leds, led, value);
+
+ spin_unlock_irqrestore(&leds->lock, flags);
+}
+
+static int bcm63138_leds_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct bcm63138_led *led = container_of(led_cdev, struct bcm63138_led, cdev);
+ struct bcm63138_leds *leds = led->leds;
+ unsigned long flags;
+ u8 value;
+
+ if (!*delay_on && !*delay_off) {
+ *delay_on = 640;
+ *delay_off = 640;
+ }
+
+ if (*delay_on != *delay_off) {
+ dev_dbg(led_cdev->dev, "Blinking at unequal delays is not supported\n");
+ return -EINVAL;
+ }
+
+ switch (*delay_on) {
+ case 1152 ... 1408: /* 1280 ms ± 10% */
+ value = 0x7;
+ break;
+ case 576 ... 704: /* 640 ms ± 10% */
+ value = 0x6;
+ break;
+ case 288 ... 352: /* 320 ms ± 10% */
+ value = 0x5;
+ break;
+ case 126 ... 154: /* 140 ms ± 10% */
+ value = 0x4;
+ break;
+ case 59 ... 72: /* 65 ms ± 10% */
+ value = 0x3;
+ break;
+ default:
+ dev_dbg(led_cdev->dev, "Blinking delay value %lu is unsupported\n",
+ *delay_on);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&leds->lock, flags);
+
+ bcm63138_leds_enable_led(leds, led, BCM63138_MAX_BRIGHTNESS);
+ bcm63138_leds_set_flash_rate(leds, led, value);
+
+ spin_unlock_irqrestore(&leds->lock, flags);
+
+ return 0;
+}
+
+/*
+ * LED driver
+ */
+
+static void bcm63138_leds_create_led(struct bcm63138_leds *leds,
+ struct device_node *np)
+{
+ struct led_init_data init_data = {
+ .fwnode = of_fwnode_handle(np),
+ };
+ struct device *dev = leds->dev;
+ struct bcm63138_led *led;
+ struct pinctrl *pinctrl;
+ u32 bit;
+ int err;
+
+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+ if (!led) {
+ dev_err(dev, "Failed to alloc LED\n");
+ return;
+ }
+
+ led->leds = leds;
+
+ if (of_property_read_u32(np, "reg", &led->pin)) {
+ dev_err(dev, "Missing \"reg\" property in %pOF\n", np);
+ goto err_free;
+ }
+
+ if (led->pin >= BCM63138_MAX_LEDS) {
+ dev_err(dev, "Invalid \"reg\" value %d\n", led->pin);
+ goto err_free;
+ }
+
+ led->active_low = of_property_read_bool(np, "active-low");
+
+ led->cdev.max_brightness = BCM63138_MAX_BRIGHTNESS;
+ led->cdev.brightness_set = bcm63138_leds_brightness_set;
+ led->cdev.blink_set = bcm63138_leds_blink_set;
+
+ err = devm_led_classdev_register_ext(dev, &led->cdev, &init_data);
+ if (err) {
+ dev_err(dev, "Failed to register LED %pOF: %d\n", np, err);
+ goto err_free;
+ }
+
+ pinctrl = devm_pinctrl_get_select_default(led->cdev.dev);
+ if (IS_ERR(pinctrl) && PTR_ERR(pinctrl) != -ENODEV) {
+ dev_warn(led->cdev.dev, "Failed to select %pOF pinctrl: %ld\n",
+ np, PTR_ERR(pinctrl));
+ }
+
+ bit = BIT(led->pin);
+ bcm63138_leds_update_bits(leds, BCM63138_PARALLEL_LED_POLARITY, bit,
+ led->active_low ? 0 : bit);
+ bcm63138_leds_update_bits(leds, BCM63138_HW_LED_EN, bit, 0);
+ bcm63138_leds_set_flash_rate(leds, led, 0);
+ bcm63138_leds_enable_led(leds, led, led->cdev.brightness);
+
+ return;
+
+err_free:
+ devm_kfree(dev, led);
+}
+
+static int bcm63138_leds_probe(struct platform_device *pdev)
+{
+ struct device_node *np = dev_of_node(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct bcm63138_leds *leds;
+ struct device_node *child;
+
+ leds = devm_kzalloc(dev, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ leds->dev = dev;
+
+ leds->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(leds->base))
+ return PTR_ERR(leds->base);
+
+ spin_lock_init(&leds->lock);
+
+ bcm63138_leds_write(leds, BCM63138_GLB_CTRL,
+ BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL |
+ BCM63138_GLB_CTRL_SERIAL_LED_EN_POL);
+ bcm63138_leds_write(leds, BCM63138_HW_LED_EN, 0);
+ bcm63138_leds_write(leds, BCM63138_SERIAL_LED_POLARITY, 0);
+ bcm63138_leds_write(leds, BCM63138_PARALLEL_LED_POLARITY, 0);
+
+ for_each_available_child_of_node(np, child) {
+ bcm63138_leds_create_led(leds, child);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id bcm63138_leds_of_match_table[] = {
+ { .compatible = "brcm,bcm63138-leds", },
+ { },
+};
+
+static struct platform_driver bcm63138_leds_driver = {
+ .probe = bcm63138_leds_probe,
+ .driver = {
+ .name = "leds-bcm63xxx",
+ .of_match_table = bcm63138_leds_of_match_table,
+ },
+};
+
+module_platform_driver(bcm63138_leds_driver);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, bcm63138_leds_of_match_table);
diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
index 4161b9dd7e48..52b59b62f437 100644
--- a/drivers/leds/leds-is31fl319x.c
+++ b/drivers/leds/leds-is31fl319x.c
@@ -11,9 +11,9 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -21,39 +21,64 @@
/* register numbers */
#define IS31FL319X_SHUTDOWN 0x00
-#define IS31FL319X_CTRL1 0x01
-#define IS31FL319X_CTRL2 0x02
-#define IS31FL319X_CONFIG1 0x03
-#define IS31FL319X_CONFIG2 0x04
-#define IS31FL319X_RAMP_MODE 0x05
-#define IS31FL319X_BREATH_MASK 0x06
-#define IS31FL319X_PWM(channel) (0x07 + channel)
-#define IS31FL319X_DATA_UPDATE 0x10
-#define IS31FL319X_T0(channel) (0x11 + channel)
-#define IS31FL319X_T123_1 0x1a
-#define IS31FL319X_T123_2 0x1b
-#define IS31FL319X_T123_3 0x1c
-#define IS31FL319X_T4(channel) (0x1d + channel)
-#define IS31FL319X_TIME_UPDATE 0x26
-#define IS31FL319X_RESET 0xff
-
-#define IS31FL319X_REG_CNT (IS31FL319X_RESET + 1)
+
+/* registers for 3190, 3191 and 3193 */
+#define IS31FL3190_BREATHING 0x01
+#define IS31FL3190_LEDMODE 0x02
+#define IS31FL3190_CURRENT 0x03
+#define IS31FL3190_PWM(channel) (0x04 + channel)
+#define IS31FL3190_DATA_UPDATE 0x07
+#define IS31FL3190_T0(channel) (0x0a + channel)
+#define IS31FL3190_T1T2(channel) (0x10 + channel)
+#define IS31FL3190_T3T4(channel) (0x16 + channel)
+#define IS31FL3190_TIME_UPDATE 0x1c
+#define IS31FL3190_LEDCONTROL 0x1d
+#define IS31FL3190_RESET 0x2f
+
+#define IS31FL3190_CURRENT_uA_MIN 5000
+#define IS31FL3190_CURRENT_uA_DEFAULT 42000
+#define IS31FL3190_CURRENT_uA_MAX 42000
+#define IS31FL3190_CURRENT_MASK GENMASK(4, 2)
+#define IS31FL3190_CURRENT_5_mA 0x02
+#define IS31FL3190_CURRENT_10_mA 0x01
+#define IS31FL3190_CURRENT_17dot5_mA 0x04
+#define IS31FL3190_CURRENT_30_mA 0x03
+#define IS31FL3190_CURRENT_42_mA 0x00
+
+/* registers for 3196 and 3199 */
+#define IS31FL3196_CTRL1 0x01
+#define IS31FL3196_CTRL2 0x02
+#define IS31FL3196_CONFIG1 0x03
+#define IS31FL3196_CONFIG2 0x04
+#define IS31FL3196_RAMP_MODE 0x05
+#define IS31FL3196_BREATH_MARK 0x06
+#define IS31FL3196_PWM(channel) (0x07 + channel)
+#define IS31FL3196_DATA_UPDATE 0x10
+#define IS31FL3196_T0(channel) (0x11 + channel)
+#define IS31FL3196_T123_1 0x1a
+#define IS31FL3196_T123_2 0x1b
+#define IS31FL3196_T123_3 0x1c
+#define IS31FL3196_T4(channel) (0x1d + channel)
+#define IS31FL3196_TIME_UPDATE 0x26
+#define IS31FL3196_RESET 0xff
+
+#define IS31FL3196_REG_CNT (IS31FL3196_RESET + 1)
#define IS31FL319X_MAX_LEDS 9
/* CS (Current Setting) in CONFIG2 register */
-#define IS31FL319X_CONFIG2_CS_SHIFT 4
-#define IS31FL319X_CONFIG2_CS_MASK 0x7
-#define IS31FL319X_CONFIG2_CS_STEP_REF 12
+#define IS31FL3196_CONFIG2_CS_SHIFT 4
+#define IS31FL3196_CONFIG2_CS_MASK GENMASK(2, 0)
+#define IS31FL3196_CONFIG2_CS_STEP_REF 12
-#define IS31FL319X_CURRENT_MIN ((u32)5000)
-#define IS31FL319X_CURRENT_MAX ((u32)40000)
-#define IS31FL319X_CURRENT_STEP ((u32)5000)
-#define IS31FL319X_CURRENT_DEFAULT ((u32)20000)
+#define IS31FL3196_CURRENT_uA_MIN 5000
+#define IS31FL3196_CURRENT_uA_MAX 40000
+#define IS31FL3196_CURRENT_uA_STEP 5000
+#define IS31FL3196_CURRENT_uA_DEFAULT 20000
/* Audio gain in CONFIG2 register */
-#define IS31FL319X_AUDIO_GAIN_DB_MAX ((u32)21)
-#define IS31FL319X_AUDIO_GAIN_DB_STEP ((u32)3)
+#define IS31FL3196_AUDIO_GAIN_DB_MAX ((u32)21)
+#define IS31FL3196_AUDIO_GAIN_DB_STEP 3
/*
* regmap is used as a cache of chip's register space,
@@ -78,52 +103,161 @@ struct is31fl319x_chip {
struct is31fl319x_chipdef {
int num_leds;
+ u8 reset_reg;
+ const struct regmap_config *is31fl319x_regmap_config;
+ int (*brightness_set)(struct led_classdev *cdev, enum led_brightness brightness);
+ u32 current_default;
+ u32 current_min;
+ u32 current_max;
+ bool is_3196or3199;
};
-static const struct is31fl319x_chipdef is31fl3190_cdef = {
- .num_leds = 1,
-};
+static bool is31fl319x_readable_reg(struct device *dev, unsigned int reg)
+{
+ /* we have no readable registers */
+ return false;
+}
-static const struct is31fl319x_chipdef is31fl3193_cdef = {
- .num_leds = 3,
+static bool is31fl3190_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* volatile registers are not cached */
+ switch (reg) {
+ case IS31FL3190_DATA_UPDATE:
+ case IS31FL3190_TIME_UPDATE:
+ case IS31FL3190_RESET:
+ return true; /* always write-through */
+ default:
+ return false;
+ }
+}
+
+static const struct reg_default is31fl3190_reg_defaults[] = {
+ { IS31FL3190_LEDMODE, 0x00 },
+ { IS31FL3190_CURRENT, 0x00 },
+ { IS31FL3190_PWM(0), 0x00 },
+ { IS31FL3190_PWM(1), 0x00 },
+ { IS31FL3190_PWM(2), 0x00 },
};
-static const struct is31fl319x_chipdef is31fl3196_cdef = {
- .num_leds = 6,
+static struct regmap_config is31fl3190_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = IS31FL3190_RESET,
+ .cache_type = REGCACHE_FLAT,
+ .readable_reg = is31fl319x_readable_reg,
+ .volatile_reg = is31fl3190_volatile_reg,
+ .reg_defaults = is31fl3190_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(is31fl3190_reg_defaults),
};
-static const struct is31fl319x_chipdef is31fl3199_cdef = {
- .num_leds = 9,
+static bool is31fl3196_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* volatile registers are not cached */
+ switch (reg) {
+ case IS31FL3196_DATA_UPDATE:
+ case IS31FL3196_TIME_UPDATE:
+ case IS31FL3196_RESET:
+ return true; /* always write-through */
+ default:
+ return false;
+ }
+}
+
+static const struct reg_default is31fl3196_reg_defaults[] = {
+ { IS31FL3196_CONFIG1, 0x00 },
+ { IS31FL3196_CONFIG2, 0x00 },
+ { IS31FL3196_PWM(0), 0x00 },
+ { IS31FL3196_PWM(1), 0x00 },
+ { IS31FL3196_PWM(2), 0x00 },
+ { IS31FL3196_PWM(3), 0x00 },
+ { IS31FL3196_PWM(4), 0x00 },
+ { IS31FL3196_PWM(5), 0x00 },
+ { IS31FL3196_PWM(6), 0x00 },
+ { IS31FL3196_PWM(7), 0x00 },
+ { IS31FL3196_PWM(8), 0x00 },
};
-static const struct of_device_id of_is31fl319x_match[] = {
- { .compatible = "issi,is31fl3190", .data = &is31fl3190_cdef, },
- { .compatible = "issi,is31fl3191", .data = &is31fl3190_cdef, },
- { .compatible = "issi,is31fl3193", .data = &is31fl3193_cdef, },
- { .compatible = "issi,is31fl3196", .data = &is31fl3196_cdef, },
- { .compatible = "issi,is31fl3199", .data = &is31fl3199_cdef, },
- { .compatible = "si-en,sn3199", .data = &is31fl3199_cdef, },
- { }
+static struct regmap_config is31fl3196_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = IS31FL3196_REG_CNT,
+ .cache_type = REGCACHE_FLAT,
+ .readable_reg = is31fl319x_readable_reg,
+ .volatile_reg = is31fl3196_volatile_reg,
+ .reg_defaults = is31fl3196_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(is31fl3196_reg_defaults),
};
-MODULE_DEVICE_TABLE(of, of_is31fl319x_match);
-static int is31fl319x_brightness_set(struct led_classdev *cdev,
+static int is31fl3190_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct is31fl319x_led *led = container_of(cdev, struct is31fl319x_led, cdev);
+ struct is31fl319x_chip *is31 = led->chip;
+ int chan = led - is31->leds;
+ int ret;
+ int i;
+ u8 ctrl = 0;
+
+ dev_dbg(&is31->client->dev, "channel %d: %d\n", chan, brightness);
+
+ mutex_lock(&is31->lock);
+
+ /* update PWM register */
+ ret = regmap_write(is31->regmap, IS31FL3190_PWM(chan), brightness);
+ if (ret < 0)
+ goto out;
+
+ /* read current brightness of all PWM channels */
+ for (i = 0; i < is31->cdef->num_leds; i++) {
+ unsigned int pwm_value;
+ bool on;
+
+ /*
+ * since neither cdev nor the chip can provide
+ * the current setting, we read from the regmap cache
+ */
+
+ ret = regmap_read(is31->regmap, IS31FL3190_PWM(i), &pwm_value);
+ on = ret >= 0 && pwm_value > LED_OFF;
+
+ ctrl |= on << i;
+ }
+
+ if (ctrl > 0) {
+ dev_dbg(&is31->client->dev, "power up %02x\n", ctrl);
+ regmap_write(is31->regmap, IS31FL3190_LEDCONTROL, ctrl);
+ /* update PWMs */
+ regmap_write(is31->regmap, IS31FL3190_DATA_UPDATE, 0x00);
+ /* enable chip from shut down and enable all channels */
+ ret = regmap_write(is31->regmap, IS31FL319X_SHUTDOWN, 0x20);
+ } else {
+ dev_dbg(&is31->client->dev, "power down\n");
+ /* shut down (no need to clear LEDCONTROL) */
+ ret = regmap_write(is31->regmap, IS31FL319X_SHUTDOWN, 0x01);
+ }
+
+out:
+ mutex_unlock(&is31->lock);
+
+ return ret;
+}
+
+static int is31fl3196_brightness_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
- struct is31fl319x_led *led = container_of(cdev, struct is31fl319x_led,
- cdev);
+ struct is31fl319x_led *led = container_of(cdev, struct is31fl319x_led, cdev);
struct is31fl319x_chip *is31 = led->chip;
int chan = led - is31->leds;
int ret;
int i;
u8 ctrl1 = 0, ctrl2 = 0;
- dev_dbg(&is31->client->dev, "%s %d: %d\n", __func__, chan, brightness);
+ dev_dbg(&is31->client->dev, "channel %d: %d\n", chan, brightness);
mutex_lock(&is31->lock);
/* update PWM register */
- ret = regmap_write(is31->regmap, IS31FL319X_PWM(chan), brightness);
+ ret = regmap_write(is31->regmap, IS31FL3196_PWM(chan), brightness);
if (ret < 0)
goto out;
@@ -137,9 +271,7 @@ static int is31fl319x_brightness_set(struct led_classdev *cdev,
* the current setting, we read from the regmap cache
*/
- ret = regmap_read(is31->regmap, IS31FL319X_PWM(i), &pwm_value);
- dev_dbg(&is31->client->dev, "%s read %d: ret=%d: %d\n",
- __func__, i, ret, pwm_value);
+ ret = regmap_read(is31->regmap, IS31FL3196_PWM(i), &pwm_value);
on = ret >= 0 && pwm_value > LED_OFF;
if (i < 3)
@@ -153,10 +285,10 @@ static int is31fl319x_brightness_set(struct led_classdev *cdev,
if (ctrl1 > 0 || ctrl2 > 0) {
dev_dbg(&is31->client->dev, "power up %02x %02x\n",
ctrl1, ctrl2);
- regmap_write(is31->regmap, IS31FL319X_CTRL1, ctrl1);
- regmap_write(is31->regmap, IS31FL319X_CTRL2, ctrl2);
+ regmap_write(is31->regmap, IS31FL3196_CTRL1, ctrl1);
+ regmap_write(is31->regmap, IS31FL3196_CTRL2, ctrl2);
/* update PWMs */
- regmap_write(is31->regmap, IS31FL319X_DATA_UPDATE, 0x00);
+ regmap_write(is31->regmap, IS31FL3196_DATA_UPDATE, 0x00);
/* enable chip from shut down */
ret = regmap_write(is31->regmap, IS31FL319X_SHUTDOWN, 0x01);
} else {
@@ -171,92 +303,141 @@ out:
return ret;
}
-static int is31fl319x_parse_child_dt(const struct device *dev,
- const struct device_node *child,
- struct is31fl319x_led *led)
+static const struct is31fl319x_chipdef is31fl3190_cdef = {
+ .num_leds = 1,
+ .reset_reg = IS31FL3190_RESET,
+ .is31fl319x_regmap_config = &is31fl3190_regmap_config,
+ .brightness_set = is31fl3190_brightness_set,
+ .current_default = IS31FL3190_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3190_CURRENT_uA_MIN,
+ .current_max = IS31FL3190_CURRENT_uA_MAX,
+ .is_3196or3199 = false,
+};
+
+static const struct is31fl319x_chipdef is31fl3193_cdef = {
+ .num_leds = 3,
+ .reset_reg = IS31FL3190_RESET,
+ .is31fl319x_regmap_config = &is31fl3190_regmap_config,
+ .brightness_set = is31fl3190_brightness_set,
+ .current_default = IS31FL3190_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3190_CURRENT_uA_MIN,
+ .current_max = IS31FL3190_CURRENT_uA_MAX,
+ .is_3196or3199 = false,
+};
+
+static const struct is31fl319x_chipdef is31fl3196_cdef = {
+ .num_leds = 6,
+ .reset_reg = IS31FL3196_RESET,
+ .is31fl319x_regmap_config = &is31fl3196_regmap_config,
+ .brightness_set = is31fl3196_brightness_set,
+ .current_default = IS31FL3196_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3196_CURRENT_uA_MIN,
+ .current_max = IS31FL3196_CURRENT_uA_MAX,
+ .is_3196or3199 = true,
+};
+
+static const struct is31fl319x_chipdef is31fl3199_cdef = {
+ .num_leds = 9,
+ .reset_reg = IS31FL3196_RESET,
+ .is31fl319x_regmap_config = &is31fl3196_regmap_config,
+ .brightness_set = is31fl3196_brightness_set,
+ .current_default = IS31FL3196_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3196_CURRENT_uA_MIN,
+ .current_max = IS31FL3196_CURRENT_uA_MAX,
+ .is_3196or3199 = true,
+};
+
+static const struct of_device_id of_is31fl319x_match[] = {
+ { .compatible = "issi,is31fl3190", .data = &is31fl3190_cdef, },
+ { .compatible = "issi,is31fl3191", .data = &is31fl3190_cdef, },
+ { .compatible = "issi,is31fl3193", .data = &is31fl3193_cdef, },
+ { .compatible = "issi,is31fl3196", .data = &is31fl3196_cdef, },
+ { .compatible = "issi,is31fl3199", .data = &is31fl3199_cdef, },
+ { .compatible = "si-en,sn3190", .data = &is31fl3190_cdef, },
+ { .compatible = "si-en,sn3191", .data = &is31fl3190_cdef, },
+ { .compatible = "si-en,sn3193", .data = &is31fl3193_cdef, },
+ { .compatible = "si-en,sn3196", .data = &is31fl3196_cdef, },
+ { .compatible = "si-en,sn3199", .data = &is31fl3199_cdef, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_is31fl319x_match);
+
+static int is31fl319x_parse_child_fw(const struct device *dev,
+ const struct fwnode_handle *child,
+ struct is31fl319x_led *led,
+ struct is31fl319x_chip *is31)
{
struct led_classdev *cdev = &led->cdev;
int ret;
- if (of_property_read_string(child, "label", &cdev->name))
- cdev->name = child->name;
+ if (fwnode_property_read_string(child, "label", &cdev->name))
+ cdev->name = fwnode_get_name(child);
- ret = of_property_read_string(child, "linux,default-trigger",
- &cdev->default_trigger);
+ ret = fwnode_property_read_string(child, "linux,default-trigger", &cdev->default_trigger);
if (ret < 0 && ret != -EINVAL) /* is optional */
return ret;
- led->max_microamp = IS31FL319X_CURRENT_DEFAULT;
- ret = of_property_read_u32(child, "led-max-microamp",
- &led->max_microamp);
+ led->max_microamp = is31->cdef->current_default;
+ ret = fwnode_property_read_u32(child, "led-max-microamp", &led->max_microamp);
if (!ret) {
- if (led->max_microamp < IS31FL319X_CURRENT_MIN)
+ if (led->max_microamp < is31->cdef->current_min)
return -EINVAL; /* not supported */
led->max_microamp = min(led->max_microamp,
- IS31FL319X_CURRENT_MAX);
+ is31->cdef->current_max);
}
return 0;
}
-static int is31fl319x_parse_dt(struct device *dev,
- struct is31fl319x_chip *is31)
+static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31)
{
- struct device_node *np = dev_of_node(dev), *child;
+ struct fwnode_handle *fwnode = dev_fwnode(dev), *child;
int count;
int ret;
- if (!np)
- return -ENODEV;
-
- is31->shutdown_gpio = devm_gpiod_get_optional(dev,
- "shutdown",
- GPIOD_OUT_HIGH);
- if (IS_ERR(is31->shutdown_gpio)) {
- ret = PTR_ERR(is31->shutdown_gpio);
- dev_err(dev, "Failed to get shutdown gpio: %d\n", ret);
- return ret;
- }
+ is31->shutdown_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
+ if (IS_ERR(is31->shutdown_gpio))
+ return dev_err_probe(dev, PTR_ERR(is31->shutdown_gpio),
+ "Failed to get shutdown gpio\n");
is31->cdef = device_get_match_data(dev);
- count = of_get_available_child_count(np);
+ count = 0;
+ fwnode_for_each_available_child_node(fwnode, child)
+ count++;
dev_dbg(dev, "probing with %d leds defined in DT\n", count);
- if (!count || count > is31->cdef->num_leds) {
- dev_err(dev, "Number of leds defined must be between 1 and %u\n",
- is31->cdef->num_leds);
- return -ENODEV;
- }
+ if (!count || count > is31->cdef->num_leds)
+ return dev_err_probe(dev, -ENODEV,
+ "Number of leds defined must be between 1 and %u\n",
+ is31->cdef->num_leds);
- for_each_available_child_of_node(np, child) {
+ fwnode_for_each_available_child_node(fwnode, child) {
struct is31fl319x_led *led;
u32 reg;
- ret = of_property_read_u32(child, "reg", &reg);
+ ret = fwnode_property_read_u32(child, "reg", &reg);
if (ret) {
- dev_err(dev, "Failed to read led 'reg' property\n");
+ ret = dev_err_probe(dev, ret, "Failed to read led 'reg' property\n");
goto put_child_node;
}
if (reg < 1 || reg > is31->cdef->num_leds) {
- dev_err(dev, "invalid led reg %u\n", reg);
- ret = -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "invalid led reg %u\n", reg);
goto put_child_node;
}
led = &is31->leds[reg - 1];
if (led->configured) {
- dev_err(dev, "led %u is already configured\n", reg);
- ret = -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "led %u is already configured\n", reg);
goto put_child_node;
}
- ret = is31fl319x_parse_child_dt(dev, child, led);
+ ret = is31fl319x_parse_child_fw(dev, child, led, is31);
if (ret) {
- dev_err(dev, "led %u DT parsing failed\n", reg);
+ ret = dev_err_probe(dev, ret, "led %u DT parsing failed\n", reg);
goto put_child_node;
}
@@ -264,82 +445,62 @@ static int is31fl319x_parse_dt(struct device *dev,
}
is31->audio_gain_db = 0;
- ret = of_property_read_u32(np, "audio-gain-db", &is31->audio_gain_db);
- if (!ret)
- is31->audio_gain_db = min(is31->audio_gain_db,
- IS31FL319X_AUDIO_GAIN_DB_MAX);
+ if (is31->cdef->is_3196or3199) {
+ ret = fwnode_property_read_u32(fwnode, "audio-gain-db", &is31->audio_gain_db);
+ if (!ret)
+ is31->audio_gain_db = min(is31->audio_gain_db,
+ IS31FL3196_AUDIO_GAIN_DB_MAX);
+ }
return 0;
put_child_node:
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
-static bool is31fl319x_readable_reg(struct device *dev, unsigned int reg)
-{ /* we have no readable registers */
- return false;
-}
-
-static bool is31fl319x_volatile_reg(struct device *dev, unsigned int reg)
-{ /* volatile registers are not cached */
- switch (reg) {
- case IS31FL319X_DATA_UPDATE:
- case IS31FL319X_TIME_UPDATE:
- case IS31FL319X_RESET:
- return true; /* always write-through */
+static inline int is31fl3190_microamp_to_cs(struct device *dev, u32 microamp)
+{
+ switch (microamp) {
+ case 5000:
+ return IS31FL3190_CURRENT_5_mA;
+ case 10000:
+ return IS31FL3190_CURRENT_10_mA;
+ case 17500:
+ return IS31FL3190_CURRENT_17dot5_mA;
+ case 30000:
+ return IS31FL3190_CURRENT_30_mA;
+ case 42000:
+ return IS31FL3190_CURRENT_42_mA;
default:
- return false;
+ dev_warn(dev, "Unsupported current value: %d, using 5000 µA!\n", microamp);
+ return IS31FL3190_CURRENT_5_mA;
}
}
-static const struct reg_default is31fl319x_reg_defaults[] = {
- { IS31FL319X_CONFIG1, 0x00},
- { IS31FL319X_CONFIG2, 0x00},
- { IS31FL319X_PWM(0), 0x00},
- { IS31FL319X_PWM(1), 0x00},
- { IS31FL319X_PWM(2), 0x00},
- { IS31FL319X_PWM(3), 0x00},
- { IS31FL319X_PWM(4), 0x00},
- { IS31FL319X_PWM(5), 0x00},
- { IS31FL319X_PWM(6), 0x00},
- { IS31FL319X_PWM(7), 0x00},
- { IS31FL319X_PWM(8), 0x00},
-};
-
-static struct regmap_config regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = IS31FL319X_REG_CNT,
- .cache_type = REGCACHE_FLAT,
- .readable_reg = is31fl319x_readable_reg,
- .volatile_reg = is31fl319x_volatile_reg,
- .reg_defaults = is31fl319x_reg_defaults,
- .num_reg_defaults = ARRAY_SIZE(is31fl319x_reg_defaults),
-};
-
-static inline int is31fl319x_microamp_to_cs(struct device *dev, u32 microamp)
-{ /* round down to nearest supported value (range check done by caller) */
- u32 step = microamp / IS31FL319X_CURRENT_STEP;
+static inline int is31fl3196_microamp_to_cs(struct device *dev, u32 microamp)
+{
+ /* round down to nearest supported value (range check done by caller) */
+ u32 step = microamp / IS31FL3196_CURRENT_uA_STEP;
- return ((IS31FL319X_CONFIG2_CS_STEP_REF - step) &
- IS31FL319X_CONFIG2_CS_MASK) <<
- IS31FL319X_CONFIG2_CS_SHIFT; /* CS encoding */
+ return ((IS31FL3196_CONFIG2_CS_STEP_REF - step) &
+ IS31FL3196_CONFIG2_CS_MASK) <<
+ IS31FL3196_CONFIG2_CS_SHIFT; /* CS encoding */
}
-static inline int is31fl319x_db_to_gain(u32 dezibel)
-{ /* round down to nearest supported value (range check done by caller) */
- return dezibel / IS31FL319X_AUDIO_GAIN_DB_STEP;
+static inline int is31fl3196_db_to_gain(u32 dezibel)
+{
+ /* round down to nearest supported value (range check done by caller) */
+ return dezibel / IS31FL3196_AUDIO_GAIN_DB_STEP;
}
-static int is31fl319x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int is31fl319x_probe(struct i2c_client *client)
{
struct is31fl319x_chip *is31;
struct device *dev = &client->dev;
int err;
int i = 0;
- u32 aggregated_led_microamp = IS31FL319X_CURRENT_MAX;
+ u32 aggregated_led_microamp;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -EIO;
@@ -349,10 +510,13 @@ static int is31fl319x_probe(struct i2c_client *client,
return -ENOMEM;
mutex_init(&is31->lock);
+ err = devm_add_action(dev, (void (*)(void *))mutex_destroy, &is31->lock);
+ if (err)
+ return err;
- err = is31fl319x_parse_dt(&client->dev, is31);
+ err = is31fl319x_parse_fw(&client->dev, is31);
if (err)
- goto free_mutex;
+ return err;
if (is31->shutdown_gpio) {
gpiod_direction_output(is31->shutdown_gpio, 0);
@@ -361,37 +525,35 @@ static int is31fl319x_probe(struct i2c_client *client,
}
is31->client = client;
- is31->regmap = devm_regmap_init_i2c(client, &regmap_config);
- if (IS_ERR(is31->regmap)) {
- dev_err(&client->dev, "failed to allocate register map\n");
- err = PTR_ERR(is31->regmap);
- goto free_mutex;
- }
+ is31->regmap = devm_regmap_init_i2c(client, is31->cdef->is31fl319x_regmap_config);
+ if (IS_ERR(is31->regmap))
+ return dev_err_probe(dev, PTR_ERR(is31->regmap), "failed to allocate register map\n");
i2c_set_clientdata(client, is31);
/* check for write-reply from chip (we can't read any registers) */
- err = regmap_write(is31->regmap, IS31FL319X_RESET, 0x00);
- if (err < 0) {
- dev_err(&client->dev, "no response from chip write: err = %d\n",
- err);
- err = -EIO; /* does not answer */
- goto free_mutex;
- }
+ err = regmap_write(is31->regmap, is31->cdef->reset_reg, 0x00);
+ if (err < 0)
+ return dev_err_probe(dev, err, "no response from chip write\n");
/*
* Kernel conventions require per-LED led-max-microamp property.
* But the chip does not allow to limit individual LEDs.
* So we take minimum from all subnodes for safety of hardware.
*/
+ aggregated_led_microamp = is31->cdef->current_max;
for (i = 0; i < is31->cdef->num_leds; i++)
if (is31->leds[i].configured &&
is31->leds[i].max_microamp < aggregated_led_microamp)
aggregated_led_microamp = is31->leds[i].max_microamp;
- regmap_write(is31->regmap, IS31FL319X_CONFIG2,
- is31fl319x_microamp_to_cs(dev, aggregated_led_microamp) |
- is31fl319x_db_to_gain(is31->audio_gain_db));
+ if (is31->cdef->is_3196or3199)
+ regmap_write(is31->regmap, IS31FL3196_CONFIG2,
+ is31fl3196_microamp_to_cs(dev, aggregated_led_microamp) |
+ is31fl3196_db_to_gain(is31->audio_gain_db));
+ else
+ regmap_update_bits(is31->regmap, IS31FL3190_CURRENT, IS31FL3190_CURRENT_MASK,
+ is31fl3190_microamp_to_cs(dev, aggregated_led_microamp));
for (i = 0; i < is31->cdef->num_leds; i++) {
struct is31fl319x_led *led = &is31->leds[i];
@@ -400,26 +562,14 @@ static int is31fl319x_probe(struct i2c_client *client,
continue;
led->chip = is31;
- led->cdev.brightness_set_blocking = is31fl319x_brightness_set;
+ led->cdev.brightness_set_blocking = is31->cdef->brightness_set;
err = devm_led_classdev_register(&client->dev, &led->cdev);
if (err < 0)
- goto free_mutex;
+ return err;
}
return 0;
-
-free_mutex:
- mutex_destroy(&is31->lock);
- return err;
-}
-
-static int is31fl319x_remove(struct i2c_client *client)
-{
- struct is31fl319x_chip *is31 = i2c_get_clientdata(client);
-
- mutex_destroy(&is31->lock);
- return 0;
}
/*
@@ -432,6 +582,10 @@ static const struct i2c_device_id is31fl319x_id[] = {
{ "is31fl3193" },
{ "is31fl3196" },
{ "is31fl3199" },
+ { "sn3190" },
+ { "sn3191" },
+ { "sn3193" },
+ { "sn3196" },
{ "sn3199" },
{},
};
@@ -440,10 +594,9 @@ MODULE_DEVICE_TABLE(i2c, is31fl319x_id);
static struct i2c_driver is31fl319x_driver = {
.driver = {
.name = "leds-is31fl319x",
- .of_match_table = of_match_ptr(of_is31fl319x_match),
+ .of_match_table = of_is31fl319x_match,
},
- .probe = is31fl319x_probe,
- .remove = is31fl319x_remove,
+ .probe_new = is31fl319x_probe,
.id_table = is31fl319x_id,
};
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 1adfed1c0619..eac6f4a573b2 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -239,9 +239,6 @@ static int omnia_leds_probe(struct i2c_client *client,
led += ret;
}
- if (devm_device_add_groups(dev, omnia_led_controller_groups))
- dev_warn(dev, "Could not add attribute group!\n");
-
return 0;
}
@@ -283,6 +280,7 @@ static struct i2c_driver omnia_leds_driver = {
.driver = {
.name = "leds-turris-omnia",
.of_match_table = of_omnia_leds_match,
+ .dev_groups = omnia_led_controller_groups,
},
};
diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
index 45e38708ecb1..da9d2218ae18 100644
--- a/drivers/leds/rgb/leds-pwm-multicolor.c
+++ b/drivers/leds/rgb/leds-pwm-multicolor.c
@@ -19,6 +19,7 @@
struct pwm_led {
struct pwm_device *pwm;
struct pwm_state state;
+ bool active_low;
};
struct pwm_mc_led {
@@ -45,6 +46,9 @@ static int led_pwm_mc_set(struct led_classdev *cdev,
duty *= mc_cdev->subled_info[i].brightness;
do_div(duty, cdev->max_brightness);
+ if (priv->leds[i].active_low)
+ duty = priv->leds[i].state.period - duty;
+
priv->leds[i].state.duty_cycle = duty;
priv->leds[i].state.enabled = duty > 0;
ret = pwm_apply_state(priv->leds[i].pwm,
@@ -72,11 +76,11 @@ static int iterate_subleds(struct device *dev, struct pwm_mc_led *priv,
pwmled = &priv->leds[priv->mc_cdev.num_colors];
pwmled->pwm = devm_fwnode_pwm_get(dev, fwnode, NULL);
if (IS_ERR(pwmled->pwm)) {
- ret = PTR_ERR(pwmled->pwm);
- dev_err(dev, "unable to request PWM: %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(pwmled->pwm), "unable to request PWM\n");
goto release_fwnode;
}
pwm_init_state(pwmled->pwm, &pwmled->state);
+ pwmled->active_low = fwnode_property_read_bool(fwnode, "active-low");
ret = fwnode_property_read_u32(fwnode, "color", &color);
if (ret) {
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 439fab4eaa85..1bbb9ca08d40 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -647,7 +647,7 @@ do_adb_query(struct adb_request *req)
switch(req->data[1]) {
case ADB_QUERY_GETDEVINFO:
- if (req->nbytes < 3)
+ if (req->nbytes < 3 || req->data[2] >= 16)
break;
mutex_lock(&adb_handler_mutex);
req->reply[0] = adb_handler[req->data[2]].original_address;
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index b10239d6ef93..02922073c9ef 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -19,13 +19,15 @@
#include <linux/suspend.h>
#include <linux/slab.h>
-#define IMX_MU_CHANS 16
+#define IMX_MU_CHANS 17
/* TX0/RX0/RXDB[0-3] */
#define IMX_MU_SCU_CHANS 6
/* TX0/RX0 */
#define IMX_MU_S4_CHANS 2
#define IMX_MU_CHAN_NAME_SIZE 20
+#define IMX_MU_NUM_RR 4
+
#define IMX_MU_SECO_TX_TOUT (msecs_to_jiffies(3000))
#define IMX_MU_SECO_RX_TOUT (msecs_to_jiffies(3000))
@@ -35,9 +37,11 @@ enum imx_mu_chan_type {
IMX_MU_TYPE_RX = 1, /* Rx */
IMX_MU_TYPE_TXDB = 2, /* Tx doorbell */
IMX_MU_TYPE_RXDB = 3, /* Rx doorbell */
+ IMX_MU_TYPE_RST = 4, /* Reset */
};
enum imx_mu_xcr {
+ IMX_MU_CR,
IMX_MU_GIER,
IMX_MU_GCR,
IMX_MU_TCR,
@@ -50,6 +54,7 @@ enum imx_mu_xsr {
IMX_MU_GSR,
IMX_MU_TSR,
IMX_MU_RSR,
+ IMX_MU_xSR_MAX,
};
struct imx_sc_rpc_msg_max {
@@ -85,7 +90,7 @@ struct imx_mu_priv {
int irq[IMX_MU_CHANS];
bool suspend;
- u32 xcr[4];
+ u32 xcr[IMX_MU_xCR_MAX];
bool side_b;
};
@@ -105,8 +110,8 @@ struct imx_mu_dcfg {
enum imx_mu_type type;
u32 xTR; /* Transmit Register0 */
u32 xRR; /* Receive Register0 */
- u32 xSR[4]; /* Status Registers */
- u32 xCR[4]; /* Control Registers */
+ u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */
+ u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */
};
#define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
@@ -121,6 +126,9 @@ struct imx_mu_dcfg {
#define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
/* General Purpose Interrupt Request */
#define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
+/* MU reset */
+#define IMX_MU_xCR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(5))
+#define IMX_MU_xSR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(7))
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
@@ -497,6 +505,8 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
val &= IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx) &
(ctrl & IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
break;
+ case IMX_MU_TYPE_RST:
+ return IRQ_NONE;
default:
dev_warn_ratelimited(priv->dev, "Unhandled channel type %d\n",
cp->type);
@@ -581,6 +591,8 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
{
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
+ int ret;
+ u32 sr;
if (cp->type == IMX_MU_TYPE_TXDB) {
tasklet_kill(&cp->txdb_tasklet);
@@ -598,6 +610,13 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
case IMX_MU_TYPE_RXDB:
imx_mu_xcr_rmw(priv, IMX_MU_GIER, 0, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
break;
+ case IMX_MU_TYPE_RST:
+ imx_mu_xcr_rmw(priv, IMX_MU_CR, IMX_MU_xCR_RST(priv->dcfg->type), 0);
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_SR], sr,
+ !(sr & IMX_MU_xSR_RST(priv->dcfg->type)), 1, 5);
+ if (ret)
+ dev_warn(priv->dev, "RST channel timeout\n");
+ break;
default:
break;
}
@@ -694,6 +713,7 @@ static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
static void imx_mu_init_generic(struct imx_mu_priv *priv)
{
unsigned int i;
+ unsigned int val;
for (i = 0; i < IMX_MU_CHANS; i++) {
struct imx_mu_con_priv *cp = &priv->con_priv[i];
@@ -715,6 +735,14 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
+
+ /* Clear any pending GIP */
+ val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
+ imx_mu_write(priv, val, priv->dcfg->xSR[IMX_MU_GSR]);
+
+ /* Clear any pending RSR */
+ for (i = 0; i < IMX_MU_NUM_RR; i++)
+ imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
}
static void imx_mu_init_specific(struct imx_mu_priv *priv)
@@ -865,7 +893,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
.xTR = 0x0,
.xRR = 0x10,
.xSR = {0x20, 0x20, 0x20, 0x20},
- .xCR = {0x24, 0x24, 0x24, 0x24},
+ .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
@@ -888,7 +916,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
.xTR = 0x200,
.xRR = 0x280,
.xSR = {0xC, 0x118, 0x124, 0x12C},
- .xCR = {0x110, 0x114, 0x120, 0x128},
+ .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 2578e5aaa935..9465f9081515 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -192,15 +192,10 @@ static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
{
- struct cmdq_task_cb *cb = &task->pkt->async_cb;
struct cmdq_cb_data data;
data.sta = sta;
- data.data = cb->data;
data.pkt = task->pkt;
- if (cb->cb)
- cb->cb(data);
-
mbox_chan_received_data(task->thread->chan, &data);
list_del(&task->list_entry);
@@ -448,7 +443,6 @@ done:
static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
{
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
- struct cmdq_task_cb *cb;
struct cmdq_cb_data data;
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task, *tmp;
@@ -465,13 +459,8 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
- cb = &task->pkt->async_cb;
data.sta = -ECONNABORTED;
- data.data = cb->data;
data.pkt = task->pkt;
- if (cb->cb)
- cb->cb(data);
-
mbox_chan_received_data(task->thread->chan, &data);
list_del(&task->list_entry);
kfree(task);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index e136d6edc1ed..147c493a989a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -812,7 +812,7 @@ int bch_btree_cache_alloc(struct cache_set *c)
c->shrink.seeks = 4;
c->shrink.batch = c->btree_pages * 2;
- if (register_shrinker(&c->shrink))
+ if (register_shrinker(&c->shrink, "md-bcache:%pU", c->set_uuid))
pr_warn("bcache: %s: could not register shrinker\n",
__func__);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index dc01ce33265b..09c7ed2650ca 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/stacktrace.h>
+#include <linux/jump_label.h>
#define DM_MSG_PREFIX "bufio"
@@ -81,6 +82,8 @@
*/
struct dm_bufio_client {
struct mutex lock;
+ spinlock_t spinlock;
+ bool no_sleep;
struct list_head lru[LIST_SIZE];
unsigned long n_buffers[LIST_SIZE];
@@ -90,7 +93,6 @@ struct dm_bufio_client {
s8 sectors_per_block_bits;
void (*alloc_callback)(struct dm_buffer *);
void (*write_callback)(struct dm_buffer *);
-
struct kmem_cache *slab_buffer;
struct kmem_cache *slab_cache;
struct dm_io_client *dm_io;
@@ -161,23 +163,34 @@ struct dm_buffer {
#endif
};
+static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
+
/*----------------------------------------------------------------*/
#define dm_bufio_in_request() (!!current->bio_list)
static void dm_bufio_lock(struct dm_bufio_client *c)
{
- mutex_lock_nested(&c->lock, dm_bufio_in_request());
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ spin_lock_bh(&c->spinlock);
+ else
+ mutex_lock_nested(&c->lock, dm_bufio_in_request());
}
static int dm_bufio_trylock(struct dm_bufio_client *c)
{
- return mutex_trylock(&c->lock);
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ return spin_trylock_bh(&c->spinlock);
+ else
+ return mutex_trylock(&c->lock);
}
static void dm_bufio_unlock(struct dm_bufio_client *c)
{
- mutex_unlock(&c->lock);
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ spin_unlock_bh(&c->spinlock);
+ else
+ mutex_unlock(&c->lock);
}
/*----------------------------------------------------------------*/
@@ -802,6 +815,10 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
BUG_ON(test_bit(B_WRITING, &b->state));
BUG_ON(test_bit(B_DIRTY, &b->state));
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
+ unlikely(test_bit(B_READING, &b->state)))
+ continue;
+
if (!b->hold_count) {
__make_buffer_clean(b);
__unlink_buffer(b);
@@ -810,6 +827,9 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
cond_resched();
}
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ return NULL;
+
list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
BUG_ON(test_bit(B_READING, &b->state));
@@ -1617,7 +1637,8 @@ static void drop_buffers(struct dm_bufio_client *c)
*/
static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
{
- if (!(gfp & __GFP_FS)) {
+ if (!(gfp & __GFP_FS) ||
+ (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
if (test_bit(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
@@ -1715,7 +1736,8 @@ static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrin
struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
unsigned reserved_buffers, unsigned aux_size,
void (*alloc_callback)(struct dm_buffer *),
- void (*write_callback)(struct dm_buffer *))
+ void (*write_callback)(struct dm_buffer *),
+ unsigned int flags)
{
int r;
struct dm_bufio_client *c;
@@ -1745,12 +1767,18 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->alloc_callback = alloc_callback;
c->write_callback = write_callback;
+ if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
+ c->no_sleep = true;
+ static_branch_inc(&no_sleep_enabled);
+ }
+
for (i = 0; i < LIST_SIZE; i++) {
INIT_LIST_HEAD(&c->lru[i]);
c->n_buffers[i] = 0;
}
mutex_init(&c->lock);
+ spin_lock_init(&c->spinlock);
INIT_LIST_HEAD(&c->reserved_buffers);
c->need_reserved_buffers = reserved_buffers;
@@ -1804,7 +1832,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->shrinker.scan_objects = dm_bufio_shrink_scan;
c->shrinker.seeks = 1;
c->shrinker.batch = 0;
- r = register_shrinker(&c->shrinker);
+ r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name,
+ MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
if (r)
goto bad;
@@ -1876,6 +1905,8 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
kmem_cache_destroy(c->slab_buffer);
dm_io_client_destroy(c->dm_io);
mutex_destroy(&c->lock);
+ if (c->no_sleep)
+ static_branch_dec(&no_sleep_enabled);
kfree(c);
}
EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 223e8e1a7a13..512cc6cea095 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -313,7 +313,8 @@ static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ec->bufio = dm_bufio_client_create(ec->dev->bdev, to_bytes(ec->u_bs), 1, 0, NULL, NULL);
+ ec->bufio = dm_bufio_client_create(ec->dev->bdev, to_bytes(ec->u_bs), 1,
+ 0, NULL, NULL, 0);
if (IS_ERR(ec->bufio)) {
ti->error = "Cannot create dm bufio client";
r = PTR_ERR(ec->bufio);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c60f9b2ece2d..aaf2472df6e5 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4439,7 +4439,7 @@ try_smaller_buffer:
}
ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
- 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
+ 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0);
if (IS_ERR(ic->bufio)) {
r = PTR_ERR(ic->bufio);
ti->error = "Cannot initialize dm-bufio";
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index f46f930eedf9..680cc05ec654 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -493,7 +493,7 @@ static int read_exceptions(struct pstore *ps,
client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
ps->store->chunk_size << SECTOR_SHIFT,
- 1, 0, NULL, NULL);
+ 1, 0, NULL, NULL, 0);
if (IS_ERR(client))
return PTR_ERR(client);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index cea2b3789736..23cffce56403 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -749,7 +749,7 @@ int verity_fec_ctr(struct dm_verity *v)
f->bufio = dm_bufio_client_create(f->dev->bdev,
f->io_size,
- 1, 0, NULL, NULL);
+ 1, 0, NULL, NULL, 0);
if (IS_ERR(f->bufio)) {
ti->error = "Cannot initialize FEC bufio client";
return PTR_ERR(f->bufio);
@@ -765,7 +765,7 @@ int verity_fec_ctr(struct dm_verity *v)
f->data_bufio = dm_bufio_client_create(v->data_dev->bdev,
1 << v->data_dev_block_bits,
- 1, 0, NULL, NULL);
+ 1, 0, NULL, NULL, 0);
if (IS_ERR(f->data_bufio)) {
ti->error = "Cannot initialize FEC data bufio client";
return PTR_ERR(f->data_bufio);
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 4fd853a56b1a..94b6cb599db4 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -20,6 +20,7 @@
#include <linux/reboot.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
+#include <linux/jump_label.h>
#define DM_MSG_PREFIX "verity"
@@ -35,14 +36,17 @@
#define DM_VERITY_OPT_PANIC "panic_on_corruption"
#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
+#define DM_VERITY_OPT_TASKLET_VERIFY "try_verify_in_tasklet"
-#define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC + \
+#define DM_VERITY_OPTS_MAX (4 + DM_VERITY_OPTS_FEC + \
DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
+static DEFINE_STATIC_KEY_FALSE(use_tasklet_enabled);
+
struct dm_verity_prefetch_work {
struct work_struct work;
struct dm_verity *v;
@@ -221,7 +225,7 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
struct mapped_device *md = dm_table_get_md(v->ti->table);
/* Corruption should be visible in device status in all modes */
- v->hash_failed = 1;
+ v->hash_failed = true;
if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
goto out;
@@ -287,7 +291,19 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
verity_hash_at_level(v, block, level, &hash_block, &offset);
- data = dm_bufio_read(v->bufio, hash_block, &buf);
+ if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
+ data = dm_bufio_get(v->bufio, hash_block, &buf);
+ if (data == NULL) {
+ /*
+ * In tasklet and the hash was not in the bufio cache.
+ * Return early and resume execution from a work-queue
+ * to read the hash from disk.
+ */
+ return -EAGAIN;
+ }
+ } else
+ data = dm_bufio_read(v->bufio, hash_block, &buf);
+
if (IS_ERR(data))
return PTR_ERR(data);
@@ -308,6 +324,15 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
v->digest_size) == 0))
aux->hash_verified = 1;
+ else if (static_branch_unlikely(&use_tasklet_enabled) &&
+ io->in_tasklet) {
+ /*
+ * Error handling code (FEC included) cannot be run in a
+ * tasklet since it may sleep, so fallback to work-queue.
+ */
+ r = -EAGAIN;
+ goto release_ret_r;
+ }
else if (verity_fec_decode(v, io,
DM_VERITY_BLOCK_TYPE_METADATA,
hash_block, data, NULL) == 0)
@@ -474,10 +499,24 @@ static int verity_verify_io(struct dm_verity_io *io)
{
bool is_zero;
struct dm_verity *v = io->v;
+#if defined(CONFIG_DM_VERITY_FEC)
struct bvec_iter start;
- unsigned b;
+#endif
+ struct bvec_iter iter_copy;
+ struct bvec_iter *iter;
struct crypto_wait wait;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
+ unsigned int b;
+
+ if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
+ /*
+ * Copy the iterator in case we need to restart
+ * verification in a work-queue.
+ */
+ iter_copy = io->iter;
+ iter = &iter_copy;
+ } else
+ iter = &io->iter;
for (b = 0; b < io->n_blocks; b++) {
int r;
@@ -486,7 +525,7 @@ static int verity_verify_io(struct dm_verity_io *io)
if (v->validated_blocks &&
likely(test_bit(cur_block, v->validated_blocks))) {
- verity_bv_skip_block(v, io, &io->iter);
+ verity_bv_skip_block(v, io, iter);
continue;
}
@@ -501,7 +540,7 @@ static int verity_verify_io(struct dm_verity_io *io)
* If we expect a zero block, don't validate, just
* return zeros.
*/
- r = verity_for_bv_block(v, io, &io->iter,
+ r = verity_for_bv_block(v, io, iter,
verity_bv_zero);
if (unlikely(r < 0))
return r;
@@ -513,8 +552,11 @@ static int verity_verify_io(struct dm_verity_io *io)
if (unlikely(r < 0))
return r;
- start = io->iter;
- r = verity_for_io_block(v, io, &io->iter, &wait);
+#if defined(CONFIG_DM_VERITY_FEC)
+ if (verity_fec_is_enabled(v))
+ start = *iter;
+#endif
+ r = verity_for_io_block(v, io, iter, &wait);
if (unlikely(r < 0))
return r;
@@ -528,9 +570,18 @@ static int verity_verify_io(struct dm_verity_io *io)
if (v->validated_blocks)
set_bit(cur_block, v->validated_blocks);
continue;
+ } else if (static_branch_unlikely(&use_tasklet_enabled) &&
+ io->in_tasklet) {
+ /*
+ * Error handling code (FEC included) cannot be run in a
+ * tasklet since it may sleep, so fallback to work-queue.
+ */
+ return -EAGAIN;
+#if defined(CONFIG_DM_VERITY_FEC)
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
- cur_block, NULL, &start) == 0) {
+ cur_block, NULL, &start) == 0) {
continue;
+#endif
} else {
if (bio->bi_status) {
/*
@@ -567,7 +618,8 @@ static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_status = status;
- verity_fec_finish_io(io);
+ if (!static_branch_unlikely(&use_tasklet_enabled) || !io->in_tasklet)
+ verity_fec_finish_io(io);
bio_endio(bio);
}
@@ -576,9 +628,29 @@ static void verity_work(struct work_struct *w)
{
struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
+ io->in_tasklet = false;
+
+ verity_fec_init_io(io);
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
}
+static void verity_tasklet(unsigned long data)
+{
+ struct dm_verity_io *io = (struct dm_verity_io *)data;
+ int err;
+
+ io->in_tasklet = true;
+ err = verity_verify_io(io);
+ if (err == -EAGAIN) {
+ /* fallback to retrying with work-queue */
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+ return;
+ }
+
+ verity_finish_io(io, errno_to_blk_status(err));
+}
+
static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
@@ -589,8 +661,13 @@ static void verity_end_io(struct bio *bio)
return;
}
- INIT_WORK(&io->work, verity_work);
- queue_work(io->v->verify_wq, &io->work);
+ if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) {
+ tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io);
+ tasklet_schedule(&io->tasklet);
+ } else {
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+ }
}
/*
@@ -701,8 +778,6 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
bio->bi_private = io;
io->iter = bio->bi_iter;
- verity_fec_init_io(io);
-
verity_submit_prefetch(v, io);
submit_bio_noacct(bio);
@@ -752,6 +827,8 @@ static void verity_status(struct dm_target *ti, status_type_t type,
args++;
if (v->validated_blocks)
args++;
+ if (v->use_tasklet)
+ args++;
if (v->signature_key_desc)
args += DM_VERITY_ROOT_HASH_VERIFICATION_OPTS;
if (!args)
@@ -777,6 +854,8 @@ static void verity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
if (v->validated_blocks)
DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
+ if (v->use_tasklet)
+ DMEMIT(" " DM_VERITY_OPT_TASKLET_VERIFY);
sz = verity_fec_status_table(v, sz, result, maxlen);
if (v->signature_key_desc)
DMEMIT(" " DM_VERITY_ROOT_HASH_VERIFICATION_OPT_SIG_KEY
@@ -890,6 +969,9 @@ static void verity_dtr(struct dm_target *ti)
kfree(v->signature_key_desc);
+ if (v->use_tasklet)
+ static_branch_dec(&use_tasklet_enabled);
+
kfree(v);
}
@@ -968,9 +1050,10 @@ static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name)
}
static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
- struct dm_verity_sig_opts *verify_args)
+ struct dm_verity_sig_opts *verify_args,
+ bool only_modifier_opts)
{
- int r;
+ int r = 0;
unsigned argc;
struct dm_target *ti = v->ti;
const char *arg_name;
@@ -991,6 +1074,8 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
argc--;
if (verity_is_verity_mode(arg_name)) {
+ if (only_modifier_opts)
+ continue;
r = verity_parse_verity_mode(v, arg_name);
if (r) {
ti->error = "Conflicting error handling parameters";
@@ -999,6 +1084,8 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
+ if (only_modifier_opts)
+ continue;
r = verity_alloc_zero_digest(v);
if (r) {
ti->error = "Cannot allocate zero digest";
@@ -1007,17 +1094,29 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_AT_MOST_ONCE)) {
+ if (only_modifier_opts)
+ continue;
r = verity_alloc_most_once(v);
if (r)
return r;
continue;
+ } else if (!strcasecmp(arg_name, DM_VERITY_OPT_TASKLET_VERIFY)) {
+ v->use_tasklet = true;
+ static_branch_inc(&use_tasklet_enabled);
+ continue;
+
} else if (verity_is_fec_opt_arg(arg_name)) {
+ if (only_modifier_opts)
+ continue;
r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
if (r)
return r;
continue;
+
} else if (verity_verify_is_sig_opt_arg(arg_name)) {
+ if (only_modifier_opts)
+ continue;
r = verity_verify_sig_parse_opt_args(as, v,
verify_args,
&argc, arg_name);
@@ -1025,8 +1124,17 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
return r;
continue;
+ } else if (only_modifier_opts) {
+ /*
+ * Ignore unrecognized opt, could easily be an extra
+ * argument to an option whose parsing was skipped.
+ * Normal parsing (@only_modifier_opts=false) will
+ * properly parse all options (and their extra args).
+ */
+ continue;
}
+ DMERR("Unrecognized verity feature request: %s", arg_name);
ti->error = "Unrecognized verity feature request";
return -EINVAL;
} while (argc && !r);
@@ -1054,6 +1162,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct dm_verity_sig_opts verify_args = {0};
struct dm_arg_set as;
unsigned int num;
+ unsigned int wq_flags;
unsigned long long num_ll;
int r;
int i;
@@ -1085,6 +1194,15 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
+ /* Parse optional parameters that modify primary args */
+ if (argc > 10) {
+ as.argc = argc - 10;
+ as.argv = argv + 10;
+ r = verity_parse_opt_args(&as, v, &verify_args, true);
+ if (r < 0)
+ goto bad;
+ }
+
if (sscanf(argv[0], "%u%c", &num, &dummy) != 1 ||
num > 1) {
ti->error = "Invalid version";
@@ -1156,7 +1274,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- v->tfm = crypto_alloc_ahash(v->alg_name, 0, 0);
+ v->tfm = crypto_alloc_ahash(v->alg_name, 0,
+ v->use_tasklet ? CRYPTO_ALG_ASYNC : 0);
if (IS_ERR(v->tfm)) {
ti->error = "Cannot initialize hash function";
r = PTR_ERR(v->tfm);
@@ -1218,8 +1337,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (argc) {
as.argc = argc;
as.argv = argv;
-
- r = verity_parse_opt_args(&as, v, &verify_args);
+ r = verity_parse_opt_args(&as, v, &verify_args, false);
if (r < 0)
goto bad;
}
@@ -1266,7 +1384,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
- dm_bufio_alloc_callback, NULL);
+ dm_bufio_alloc_callback, NULL,
+ v->use_tasklet ? DM_BUFIO_CLIENT_NO_SLEEP : 0);
if (IS_ERR(v->bufio)) {
ti->error = "Cannot initialize dm-bufio";
r = PTR_ERR(v->bufio);
@@ -1281,7 +1400,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
/* WQ_UNBOUND greatly improves performance when running on ramdisk */
- v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
+ wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND;
+ if (v->use_tasklet) {
+ /*
+ * Allow verify_wq to preempt softirq since verification in
+ * tasklet will fall-back to using it for error handling
+ * (or if the bufio cache doesn't have required hashes).
+ */
+ wq_flags |= WQ_HIGHPRI;
+ }
+ v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus());
if (!v->verify_wq) {
ti->error = "Cannot allocate workqueue";
r = -ENOMEM;
@@ -1343,7 +1471,7 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i
static struct target_type verity_target = {
.name = "verity",
.features = DM_TARGET_IMMUTABLE,
- .version = {1, 8, 1},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index c832cc3e3d24..45455de1b4bc 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -13,6 +13,7 @@
#include <linux/dm-bufio.h>
#include <linux/device-mapper.h>
+#include <linux/interrupt.h>
#include <crypto/hash.h>
#define DM_VERITY_MAX_LEVELS 63
@@ -51,9 +52,10 @@ struct dm_verity {
unsigned char hash_per_block_bits; /* log2(hashes in hash block) */
unsigned char levels; /* the number of tree levels */
unsigned char version;
+ bool hash_failed:1; /* set if hash of any block failed */
+ bool use_tasklet:1; /* try to verify in tasklet before work-queue */
unsigned digest_size; /* digest size for the current hash algorithm */
unsigned int ahash_reqsize;/* the size of temporary space for crypto */
- int hash_failed; /* set to 1 if hash of any block failed */
enum verity_mode mode; /* mode for handling verification errors */
unsigned corrupted_errs;/* Number of errors for corrupted blocks */
@@ -76,10 +78,12 @@ struct dm_verity_io {
sector_t block;
unsigned n_blocks;
+ bool in_tasklet;
struct bvec_iter iter;
struct work_struct work;
+ struct tasklet_struct tasklet;
/*
* Three variably-size fields follow this struct:
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 1fc161d65673..96a003eb7323 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1594,7 +1594,8 @@ done:
default:
BUG();
- return -1;
+ wc_unlock(wc);
+ return DM_MAPIO_KILL;
}
}
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 34db364c23a8..0278482fac94 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -2945,7 +2945,9 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
/* Metadata cache shrinker */
- ret = register_shrinker(&zmd->mblk_shrinker);
+ ret = register_shrinker(&zmd->mblk_shrinker, "md-meta:(%u:%u)",
+ MAJOR(dev->bdev->bd_dev),
+ MINOR(dev->bdev->bd_dev));
if (ret) {
dmz_zmd_err(zmd, "Register metadata cache shrinker failed");
goto err;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 28bd4a35b86b..60549b65c799 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -752,7 +752,7 @@ static int open_table_device(struct table_device *td, dev_t dev,
}
td->dm_dev.bdev = bdev;
- td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off);
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL);
return 0;
}
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 54c089a50b15..11935864f50f 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -391,7 +391,8 @@ struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread,
sizeof(struct buffer_aux),
dm_block_manager_alloc_callback,
- dm_block_manager_write_callback);
+ dm_block_manager_write_callback,
+ 0);
if (IS_ERR(bm->bufio)) {
r = PTR_ERR(bm->bufio);
kfree(bm);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 860c45c10a57..31a0cbf63384 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7644,7 +7644,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->shrinker.count_objects = raid5_cache_count;
conf->shrinker.batch = 128;
conf->shrinker.flags = 0;
- ret = register_shrinker(&conf->shrinker);
+ ret = register_shrinker(&conf->shrinker, "md-raid5:%s", mdname(mddev));
if (ret) {
pr_warn("md/raid:%s: couldn't register shrinker.\n",
mdname(mddev));
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 0de7acdf58a7..f66ac14cffad 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2517,7 +2517,6 @@ static struct snd_soc_component_driver tda1997x_codec_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int tda1997x_probe(struct i2c_client *client,
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index cb48c5ff3dee..c93d2906e4c7 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -875,7 +875,7 @@ static int vcodec_domains_get(struct venus_core *core)
}
skip_pmdomains:
- if (!core->has_opp_table)
+ if (!core->res->opp_pmdomain)
return 0;
/* Attach the power domain for setting performance state */
@@ -1007,6 +1007,10 @@ static int core_get_v4(struct venus_core *core)
if (ret)
return ret;
+ ret = vcodec_domains_get(core);
+ if (ret)
+ return ret;
+
if (core->res->opp_pmdomain) {
ret = devm_pm_opp_of_add_table(dev);
if (!ret) {
@@ -1017,10 +1021,6 @@ static int core_get_v4(struct venus_core *core)
}
}
- ret = vcodec_domains_get(core);
- if (ret)
- return ret;
-
return 0;
}
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 0834d5f866fd..39d2b03e2631 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1416,42 +1416,37 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
{
int ret;
struct device *dev = ir->dev;
- char *data;
-
- data = kzalloc(USB_CTRL_MSG_SZ, GFP_KERNEL);
- if (!data) {
- dev_err(dev, "%s: memory allocation failed!", __func__);
- return;
- }
+ char data[USB_CTRL_MSG_SZ];
/*
* This is a strange one. Windows issues a set address to the device
* on the receive control pipe and expect a certain value pair back
*/
- ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
- USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0,
- data, USB_CTRL_MSG_SZ, 3000);
+ ret = usb_control_msg_recv(ir->usbdev, 0, USB_REQ_SET_ADDRESS,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ 0, 0, data, USB_CTRL_MSG_SZ, 3000,
+ GFP_KERNEL);
dev_dbg(dev, "set address - ret = %d", ret);
dev_dbg(dev, "set address - data[0] = %d, data[1] = %d",
data[0], data[1]);
/* set feature: bit rate 38400 bps */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
- 0xc04e, 0x0000, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
+ 0xc04e, 0x0000, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set feature - ret = %d", ret);
/* bRequest 4: set char length to 8 bits */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- 4, USB_TYPE_VENDOR,
- 0x0808, 0x0000, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ 4, USB_TYPE_VENDOR,
+ 0x0808, 0x0000, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set char length - retB = %d", ret);
/* bRequest 2: set handshaking to use DTR/DSR */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- 2, USB_TYPE_VENDOR,
- 0x0000, 0x0100, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ 2, USB_TYPE_VENDOR,
+ 0x0000, 0x0100, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set handshake - retC = %d", ret);
/* device resume */
@@ -1459,8 +1454,6 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
/* get hw/sw revision? */
mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION));
-
- kfree(data);
}
static void mceusb_gen2_init(struct mceusb_dev *ir)
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 7835bb0f32fc..e012b21c4fd7 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -511,7 +511,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
- if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc))
+ if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc))
return -ENODEV;
switch (fc_usb->udev->speed) {
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 908f8d5392b2..85bc936c02f9 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1395,15 +1395,14 @@ err_msg:
static int tegra_emc_opp_table_init(struct tegra_emc *emc)
{
u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
- struct opp_table *hw_opp_table;
- int err;
+ int opp_token, err;
- hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
- err = PTR_ERR_OR_ZERO(hw_opp_table);
- if (err) {
+ err = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
+ if (err < 0) {
dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
return err;
}
+ opp_token = err;
err = dev_pm_opp_of_add_table(emc->dev);
if (err) {
@@ -1430,7 +1429,7 @@ static int tegra_emc_opp_table_init(struct tegra_emc *emc)
remove_table:
dev_pm_opp_of_remove_table(emc->dev);
put_hw_table:
- dev_pm_opp_put_supported_hw(hw_opp_table);
+ dev_pm_opp_put_supported_hw(opp_token);
return err;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 9566341de470..abb58ab1a1a4 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1358,12 +1358,13 @@ config MFD_STA2X11
select REGMAP_MMIO
config MFD_SUN6I_PRCM
- bool "Allwinner A31 PRCM controller"
+ bool "Allwinner A31/A23/A33 PRCM controller"
depends on ARCH_SUNXI || COMPILE_TEST
select MFD_CORE
help
Support for the PRCM (Power/Reset/Clock Management) unit available
- in A31 SoC.
+ in the A31, A23, and A33 SoCs. Other Allwinner SoCs contain similar
+ hardware, but they do not use this driver.
config MFD_SYSCON
bool "System Controller Register R/W Based on Regmap"
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 56338f9dbd0b..4fb7e35eb5ed 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -596,12 +596,11 @@ static __init int asic3_gpio_probe(struct platform_device *pdev,
return gpiochip_add_data(&asic->gpio, asic);
}
-static int asic3_gpio_remove(struct platform_device *pdev)
+static void asic3_gpio_remove(struct platform_device *pdev)
{
struct asic3 *asic = platform_get_drvdata(pdev);
gpiochip_remove(&asic->gpio);
- return 0;
}
static void asic3_clk_enable(struct asic3 *asic, struct asic3_clk *clk)
@@ -1030,7 +1029,6 @@ static int __init asic3_probe(struct platform_device *pdev)
static int asic3_remove(struct platform_device *pdev)
{
- int ret;
struct asic3 *asic = platform_get_drvdata(pdev);
asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
@@ -1038,9 +1036,8 @@ static int asic3_remove(struct platform_device *pdev)
asic3_mfd_remove(pdev);
- ret = asic3_gpio_remove(pdev);
- if (ret < 0)
- return ret;
+ asic3_gpio_remove(pdev);
+
asic3_irq_remove(pdev);
asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), 0);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 8161a5dc68e8..88a212a8168c 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -619,6 +619,9 @@ static const struct mfd_cell axp20x_cells[] = {
static const struct mfd_cell axp221_cells[] = {
{
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp221-gpio",
+ }, {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp22x_pek_resources),
.resources = axp22x_pek_resources,
@@ -645,6 +648,9 @@ static const struct mfd_cell axp221_cells[] = {
static const struct mfd_cell axp223_cells[] = {
{
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp221-gpio",
+ }, {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp22x_pek_resources),
.resources = axp22x_pek_resources,
@@ -785,6 +791,9 @@ static const struct mfd_cell axp806_cells[] = {
static const struct mfd_cell axp809_cells[] = {
{
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp221-gpio",
+ }, {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp809_pek_resources),
.resources = axp809_pek_resources,
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 02d4271dfe06..344ad03bdc42 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -65,6 +65,11 @@ static const struct cros_feature_to_name cros_mcu_devices[] = {
.desc = "System Control Processor",
},
{
+ .id = EC_FEATURE_SCP_C1,
+ .name = CROS_EC_DEV_SCP_C1_NAME,
+ .desc = "System Control Processor 2nd Core",
+ },
+ {
.id = EC_FEATURE_TOUCHPAD,
.name = CROS_EC_DEV_TP_NAME,
.desc = "Touchpad",
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 56c61c99eb23..27a881da4d6e 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -798,7 +798,7 @@ void db8500_prcmu_get_abb_event_buffer(void __iomem **buf)
* @opp: The new ARM operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
- * This function sets the the operating point of the ARM.
+ * This function sets the operating point of the ARM.
*/
int db8500_prcmu_set_arm_opp(u8 opp)
{
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 852129ea0766..6cd0b0c752d6 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -91,11 +91,6 @@ struct dln2_mod_rx_slots {
spinlock_t lock;
};
-enum dln2_endpoint {
- DLN2_EP_OUT = 0,
- DLN2_EP_IN = 1,
-};
-
struct dln2_dev {
struct usb_device *usb_dev;
struct usb_interface *interface;
@@ -777,16 +772,12 @@ static int dln2_probe(struct usb_interface *interface,
int ret;
int i, j;
- if (hostif->desc.bInterfaceNumber != 0 ||
- hostif->desc.bNumEndpoints < 2)
+ if (hostif->desc.bInterfaceNumber != 0)
return -ENODEV;
- epout = &hostif->endpoint[DLN2_EP_OUT].desc;
- if (!usb_endpoint_is_bulk_out(epout))
- return -ENODEV;
- epin = &hostif->endpoint[DLN2_EP_IN].desc;
- if (!usb_endpoint_is_bulk_in(epin))
- return -ENODEV;
+ ret = usb_find_common_endpoints(hostif, &epin, &epout, NULL, NULL);
+ if (ret)
+ return ret;
dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL);
if (!dln2)
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index f7950d2197df..bb08b7a73fe1 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -385,6 +385,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x7afc), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7afd), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7afe), (kernel_ulong_t)&bxt_uart_info },
+ /* MTL-P */
+ { PCI_VDEVICE(INTEL, 0x7e25), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7e26), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7e27), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7e30), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7e46), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7e50), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e51), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e52), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7e78), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e79), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e7a), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e7b), (kernel_ulong_t)&bxt_i2c_info },
/* LKF */
{ PCI_VDEVICE(INTEL, 0x98a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x98a9), (kernel_ulong_t)&bxt_uart_info },
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index bc069c4daa60..8dac0d41f64f 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -2,10 +2,11 @@
/*
* MFD core driver for Intel Broxton Whiskey Cove PMIC
*
- * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015-2017, 2022 Intel Corporation. All rights reserved.
*/
#include <linux/acpi.h>
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -18,9 +19,9 @@
#include <asm/intel_scu_ipc.h>
/* PMIC device registers */
-#define REG_ADDR_MASK 0xFF00
+#define REG_ADDR_MASK GENMASK(15, 8)
#define REG_ADDR_SHIFT 8
-#define REG_OFFSET_MASK 0xFF
+#define REG_OFFSET_MASK GENMASK(7, 0)
/* Interrupt Status Registers */
#define BXTWC_IRQLVL1 0x4E02
@@ -112,29 +113,29 @@ static const struct regmap_irq bxtwc_regmap_irqs[] = {
};
static const struct regmap_irq bxtwc_regmap_irqs_pwrbtn[] = {
- REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, 0x01),
+ REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, BIT(0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_bcu[] = {
- REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 0, 0x1f),
+ REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 0, GENMASK(4, 0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_adc[] = {
- REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 0, 0xff),
+ REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 0, GENMASK(7, 0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_chgr[] = {
- REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, 0x20),
- REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, 0x1f),
- REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, 0x1f),
+ REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, BIT(5)),
+ REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, GENMASK(4, 0)),
+ REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, GENMASK(4, 0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_tmu[] = {
- REGMAP_IRQ_REG(BXTWC_TMU_IRQ, 0, 0x06),
+ REGMAP_IRQ_REG(BXTWC_TMU_IRQ, 0, GENMASK(2, 1)),
};
static const struct regmap_irq bxtwc_regmap_irqs_crit[] = {
- REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 0, 0x03),
+ REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 0, GENMASK(1, 0)),
};
static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
@@ -333,17 +334,19 @@ static unsigned long bxtwc_reg_addr;
static ssize_t addr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "0x%lx\n", bxtwc_reg_addr);
+ return sysfs_emit(buf, "0x%lx\n", bxtwc_reg_addr);
}
static ssize_t addr_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- if (kstrtoul(buf, 0, &bxtwc_reg_addr)) {
- dev_err(dev, "Invalid register address\n");
- return -EINVAL;
- }
- return (ssize_t)count;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &bxtwc_reg_addr);
+ if (ret)
+ return ret;
+
+ return count;
}
static ssize_t val_show(struct device *dev,
@@ -354,12 +357,12 @@ static ssize_t val_show(struct device *dev,
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
ret = regmap_read(pmic->regmap, bxtwc_reg_addr, &val);
- if (ret < 0) {
+ if (ret) {
dev_err(dev, "Failed to read 0x%lx\n", bxtwc_reg_addr);
- return -EIO;
+ return ret;
}
- return sprintf(buf, "0x%02x\n", val);
+ return sysfs_emit(buf, "0x%02x\n", val);
}
static ssize_t val_store(struct device *dev,
@@ -377,7 +380,7 @@ static ssize_t val_store(struct device *dev,
if (ret) {
dev_err(dev, "Failed to write value 0x%02x to address 0x%lx",
val, bxtwc_reg_addr);
- return -EIO;
+ return ret;
}
return count;
}
@@ -394,6 +397,11 @@ static const struct attribute_group bxtwc_group = {
.attrs = bxtwc_attrs,
};
+static const struct attribute_group *bxtwc_groups[] = {
+ &bxtwc_group,
+ NULL
+};
+
static const struct regmap_config bxtwc_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
@@ -410,12 +418,9 @@ static int bxtwc_add_chained_irq_chip(struct intel_soc_pmic *pmic,
int irq;
irq = regmap_irq_get_virq(pdata, pirq);
- if (irq < 0) {
- dev_err(pmic->dev,
- "Failed to get parent vIRQ(%d) for chip %s, ret:%d\n",
- pirq, chip->name, irq);
- return irq;
- }
+ if (irq < 0)
+ return dev_err_probe(pmic->dev, irq, "Failed to get parent vIRQ(%d) for chip %s\n",
+ pirq, chip->name);
return devm_regmap_add_irq_chip(pmic->dev, pmic->regmap, irq, irq_flags,
0, chip, data);
@@ -423,25 +428,19 @@ static int bxtwc_add_chained_irq_chip(struct intel_soc_pmic *pmic,
static int bxtwc_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
- acpi_handle handle;
acpi_status status;
unsigned long long hrv;
struct intel_soc_pmic *pmic;
- handle = ACPI_HANDLE(&pdev->dev);
- status = acpi_evaluate_integer(handle, "_HRV", NULL, &hrv);
- if (ACPI_FAILURE(status)) {
- dev_err(&pdev->dev, "Failed to get PMIC hardware revision\n");
- return -ENODEV;
- }
- if (hrv != BROXTON_PMIC_WC_HRV) {
- dev_err(&pdev->dev, "Invalid PMIC hardware revision: %llu\n",
- hrv);
- return -ENODEV;
- }
+ status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_HRV", NULL, &hrv);
+ if (ACPI_FAILURE(status))
+ return dev_err_probe(dev, -ENODEV, "Failed to get PMIC hardware revision\n");
+ if (hrv != BROXTON_PMIC_WC_HRV)
+ return dev_err_probe(dev, -ENODEV, "Invalid PMIC hardware revision: %llu\n", hrv);
- pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
return -ENOMEM;
@@ -450,49 +449,39 @@ static int bxtwc_probe(struct platform_device *pdev)
return ret;
pmic->irq = ret;
- dev_set_drvdata(&pdev->dev, pmic);
- pmic->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pmic);
+ pmic->dev = dev;
- pmic->scu = devm_intel_scu_ipc_dev_get(&pdev->dev);
+ pmic->scu = devm_intel_scu_ipc_dev_get(dev);
if (!pmic->scu)
return -EPROBE_DEFER;
- pmic->regmap = devm_regmap_init(&pdev->dev, NULL, pmic,
- &bxtwc_regmap_config);
- if (IS_ERR(pmic->regmap)) {
- ret = PTR_ERR(pmic->regmap);
- dev_err(&pdev->dev, "Failed to initialise regmap: %d\n", ret);
- return ret;
- }
+ pmic->regmap = devm_regmap_init(dev, NULL, pmic, &bxtwc_regmap_config);
+ if (IS_ERR(pmic->regmap))
+ return dev_err_probe(dev, PTR_ERR(pmic->regmap), "Failed to initialise regmap\n");
- ret = devm_regmap_add_irq_chip(&pdev->dev, pmic->regmap, pmic->irq,
+ ret = devm_regmap_add_irq_chip(dev, pmic->regmap, pmic->irq,
IRQF_ONESHOT | IRQF_SHARED,
0, &bxtwc_regmap_irq_chip,
&pmic->irq_chip_data);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add IRQ chip\n");
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
BXTWC_PWRBTN_LVL1_IRQ,
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_pwrbtn,
&pmic->irq_chip_data_pwrbtn);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add PWRBTN IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add PWRBTN IRQ chip\n");
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
BXTWC_TMU_LVL1_IRQ,
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_tmu,
&pmic->irq_chip_data_tmu);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add TMU IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add TMU IRQ chip\n");
/* Add chained IRQ handler for BCU IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -500,12 +489,8 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_bcu,
&pmic->irq_chip_data_bcu);
-
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add BUC IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add BUC IRQ chip\n");
/* Add chained IRQ handler for ADC IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -513,12 +498,8 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_adc,
&pmic->irq_chip_data_adc);
-
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add ADC IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add ADC IRQ chip\n");
/* Add chained IRQ handler for CHGR IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -526,12 +507,8 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_chgr,
&pmic->irq_chip_data_chgr);
-
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add CHGR IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add CHGR IRQ chip\n");
/* Add chained IRQ handler for CRIT IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -539,54 +516,33 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_crit,
&pmic->irq_chip_data_crit);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add CRIT IRQ chip\n");
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add CRIT IRQ chip\n");
- return ret;
- }
-
- ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, bxt_wc_dev,
- ARRAY_SIZE(bxt_wc_dev), NULL, 0, NULL);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add devices\n");
- return ret;
- }
-
- ret = sysfs_create_group(&pdev->dev.kobj, &bxtwc_group);
- if (ret) {
- dev_err(&pdev->dev, "Failed to create sysfs group %d\n", ret);
- return ret;
- }
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, bxt_wc_dev, ARRAY_SIZE(bxt_wc_dev),
+ NULL, 0, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add devices\n");
/*
- * There is known hw bug. Upon reset BIT 5 of register
+ * There is a known H/W bug. Upon reset, BIT 5 of register
* BXTWC_CHGR_LVL1_IRQ is 0 which is the expected value. However,
* later it's set to 1(masked) automatically by hardware. So we
- * have the software workaround here to unmaksed it in order to let
- * charger interrutp work.
+ * place the software workaround here to unmask it again in order
+ * to re-enable the charger interrupt.
*/
- regmap_update_bits(pmic->regmap, BXTWC_MIRQLVL1,
- BXTWC_MIRQLVL1_MCHGR, 0);
-
- return 0;
-}
-
-static int bxtwc_remove(struct platform_device *pdev)
-{
- sysfs_remove_group(&pdev->dev.kobj, &bxtwc_group);
+ regmap_update_bits(pmic->regmap, BXTWC_MIRQLVL1, BXTWC_MIRQLVL1_MCHGR, 0);
return 0;
}
static void bxtwc_shutdown(struct platform_device *pdev)
{
- struct intel_soc_pmic *pmic = dev_get_drvdata(&pdev->dev);
+ struct intel_soc_pmic *pmic = platform_get_drvdata(pdev);
disable_irq(pmic->irq);
}
-#ifdef CONFIG_PM_SLEEP
static int bxtwc_suspend(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -603,8 +559,8 @@ static int bxtwc_resume(struct device *dev)
enable_irq(pmic->irq);
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(bxtwc_pm_ops, bxtwc_suspend, bxtwc_resume);
+
+static DEFINE_SIMPLE_DEV_PM_OPS(bxtwc_pm_ops, bxtwc_suspend, bxtwc_resume);
static const struct acpi_device_id bxtwc_acpi_ids[] = {
{ "INT34D3", },
@@ -614,16 +570,16 @@ MODULE_DEVICE_TABLE(acpi, bxtwc_acpi_ids);
static struct platform_driver bxtwc_driver = {
.probe = bxtwc_probe,
- .remove = bxtwc_remove,
.shutdown = bxtwc_shutdown,
.driver = {
.name = "BXTWC PMIC",
- .pm = &bxtwc_pm_ops,
- .acpi_match_table = ACPI_PTR(bxtwc_acpi_ids),
+ .pm = pm_sleep_ptr(&bxtwc_pm_ops),
+ .acpi_match_table = bxtwc_acpi_ids,
+ .dev_groups = bxtwc_groups,
},
};
module_platform_driver(bxtwc_driver);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Qipeng Zha<qipeng.zha@intel.com>");
+MODULE_AUTHOR("Qipeng Zha <qipeng.zha@intel.com>");
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
index 4eab191e053a..9216f0d34206 100644
--- a/drivers/mfd/intel_soc_pmic_chtwc.c
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -179,18 +179,13 @@ static int cht_wc_probe(struct i2c_client *client)
int ret;
status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_HRV", NULL, &hrv);
- if (ACPI_FAILURE(status)) {
- dev_err(dev, "Failed to get PMIC hardware revision\n");
- return -ENODEV;
- }
- if (hrv != CHT_WC_HRV) {
- dev_err(dev, "Invalid PMIC hardware revision: %llu\n", hrv);
- return -ENODEV;
- }
- if (client->irq < 0) {
- dev_err(dev, "Invalid IRQ\n");
- return -EINVAL;
- }
+ if (ACPI_FAILURE(status))
+ return dev_err_probe(dev, -ENODEV, "Failed to get PMIC hardware revision\n");
+ if (hrv != CHT_WC_HRV)
+ return dev_err_probe(dev, -ENODEV, "Invalid PMIC hardware revision: %llu\n", hrv);
+
+ if (client->irq < 0)
+ return dev_err_probe(dev, -EINVAL, "Invalid IRQ\n");
pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
@@ -227,7 +222,7 @@ static void cht_wc_shutdown(struct i2c_client *client)
disable_irq(pmic->irq);
}
-static int __maybe_unused cht_wc_suspend(struct device *dev)
+static int cht_wc_suspend(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -236,7 +231,7 @@ static int __maybe_unused cht_wc_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused cht_wc_resume(struct device *dev)
+static int cht_wc_resume(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -244,7 +239,7 @@ static int __maybe_unused cht_wc_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(cht_wc_pm_ops, cht_wc_suspend, cht_wc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(cht_wc_pm_ops, cht_wc_suspend, cht_wc_resume);
static const struct i2c_device_id cht_wc_i2c_id[] = {
{ }
@@ -258,7 +253,7 @@ static const struct acpi_device_id cht_wc_acpi_ids[] = {
static struct i2c_driver cht_wc_driver = {
.driver = {
.name = "CHT Whiskey Cove PMIC",
- .pm = &cht_wc_pm_ops,
+ .pm = pm_sleep_ptr(&cht_wc_pm_ops),
.acpi_match_table = cht_wc_acpi_ids,
},
.probe_new = cht_wc_probe,
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index fec2096474ad..a6661e07035b 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -419,9 +419,11 @@ static int max77620_initialise_fps(struct max77620_chip *chip)
ret = max77620_config_fps(chip, fps_child);
if (ret < 0) {
of_node_put(fps_child);
+ of_node_put(fps_np);
return ret;
}
}
+ of_node_put(fps_np);
config = chip->enable_global_lpm ? MAX77620_ONOFFCNFG2_SLP_LPM_MSK : 0;
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
diff --git a/drivers/mfd/max77714.c b/drivers/mfd/max77714.c
index d1e4247800d2..143a432ea343 100644
--- a/drivers/mfd/max77714.c
+++ b/drivers/mfd/max77714.c
@@ -3,7 +3,7 @@
* Maxim MAX77714 Core Driver
*
* Copyright (C) 2022 Luca Ceresoli
- * Author: Luca Ceresoli <luca@lucaceresoli.net>
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
*/
#include <linux/i2c.h>
@@ -148,5 +148,5 @@ static struct i2c_driver max77714_driver = {
module_i2c_driver(max77714_driver);
MODULE_DESCRIPTION("Maxim MAX77714 MFD core driver");
-MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c
index ea5e452510eb..389756436af6 100644
--- a/drivers/mfd/mt6358-irq.c
+++ b/drivers/mfd/mt6358-irq.c
@@ -3,6 +3,8 @@
// Copyright (c) 2020 MediaTek Inc.
#include <linux/interrupt.h>
+#include <linux/mfd/mt6357/core.h>
+#include <linux/mfd/mt6357/registers.h>
#include <linux/mfd/mt6358/core.h>
#include <linux/mfd/mt6358/registers.h>
#include <linux/mfd/mt6359/core.h>
@@ -17,6 +19,17 @@
#define MTK_PMIC_REG_WIDTH 16
+static const struct irq_top_t mt6357_ints[] = {
+ MT6357_TOP_GEN(BUCK),
+ MT6357_TOP_GEN(LDO),
+ MT6357_TOP_GEN(PSC),
+ MT6357_TOP_GEN(SCK),
+ MT6357_TOP_GEN(BM),
+ MT6357_TOP_GEN(HK),
+ MT6357_TOP_GEN(AUD),
+ MT6357_TOP_GEN(MISC),
+};
+
static const struct irq_top_t mt6358_ints[] = {
MT6358_TOP_GEN(BUCK),
MT6358_TOP_GEN(LDO),
@@ -39,6 +52,13 @@ static const struct irq_top_t mt6359_ints[] = {
MT6359_TOP_GEN(MISC),
};
+static struct pmic_irq_data mt6357_irqd = {
+ .num_top = ARRAY_SIZE(mt6357_ints),
+ .num_pmic_irqs = MT6357_IRQ_NR,
+ .top_int_status_reg = MT6357_TOP_INT_STATUS0,
+ .pmic_ints = mt6357_ints,
+};
+
static struct pmic_irq_data mt6358_irqd = {
.num_top = ARRAY_SIZE(mt6358_ints),
.num_pmic_irqs = MT6358_IRQ_NR,
@@ -211,6 +231,10 @@ int mt6358_irq_init(struct mt6397_chip *chip)
struct pmic_irq_data *irqd;
switch (chip->chip_id) {
+ case MT6357_CHIP_ID:
+ chip->irq_data = &mt6357_irqd;
+ break;
+
case MT6358_CHIP_ID:
case MT6366_CHIP_ID:
chip->irq_data = &mt6358_irqd;
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 1a368ad08f58..f6c1f80f94a4 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -12,10 +12,14 @@
#include <linux/regmap.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mt6323/core.h>
+#include <linux/mfd/mt6331/core.h>
+#include <linux/mfd/mt6357/core.h>
#include <linux/mfd/mt6358/core.h>
#include <linux/mfd/mt6359/core.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6331/registers.h>
+#include <linux/mfd/mt6357/registers.h>
#include <linux/mfd/mt6358/registers.h>
#include <linux/mfd/mt6359/registers.h>
#include <linux/mfd/mt6397/registers.h>
@@ -23,6 +27,12 @@
#define MT6323_RTC_BASE 0x8000
#define MT6323_RTC_SIZE 0x40
+#define MT6357_RTC_BASE 0x0588
+#define MT6357_RTC_SIZE 0x3c
+
+#define MT6331_RTC_BASE 0x4000
+#define MT6331_RTC_SIZE 0x40
+
#define MT6358_RTC_BASE 0x0588
#define MT6358_RTC_SIZE 0x3c
@@ -37,6 +47,16 @@ static const struct resource mt6323_rtc_resources[] = {
DEFINE_RES_IRQ(MT6323_IRQ_STATUS_RTC),
};
+static const struct resource mt6357_rtc_resources[] = {
+ DEFINE_RES_MEM(MT6357_RTC_BASE, MT6357_RTC_SIZE),
+ DEFINE_RES_IRQ(MT6357_IRQ_RTC),
+};
+
+static const struct resource mt6331_rtc_resources[] = {
+ DEFINE_RES_MEM(MT6331_RTC_BASE, MT6331_RTC_SIZE),
+ DEFINE_RES_IRQ(MT6331_IRQ_STATUS_RTC),
+};
+
static const struct resource mt6358_rtc_resources[] = {
DEFINE_RES_MEM(MT6358_RTC_BASE, MT6358_RTC_SIZE),
DEFINE_RES_IRQ(MT6358_IRQ_RTC),
@@ -66,6 +86,18 @@ static const struct resource mt6323_keys_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6323_IRQ_STATUS_FCHRKEY, "homekey"),
};
+static const struct resource mt6357_keys_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_PWRKEY, "powerkey"),
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_HOMEKEY, "homekey"),
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_PWRKEY_R, "powerkey_r"),
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_HOMEKEY_R, "homekey_r"),
+};
+
+static const struct resource mt6331_keys_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6331_IRQ_STATUS_PWRKEY, "powerkey"),
+ DEFINE_RES_IRQ_NAMED(MT6331_IRQ_STATUS_HOMEKEY, "homekey"),
+};
+
static const struct resource mt6397_keys_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6397_IRQ_PWRKEY, "powerkey"),
DEFINE_RES_IRQ_NAMED(MT6397_IRQ_HOMEKEY, "homekey"),
@@ -100,6 +132,43 @@ static const struct mfd_cell mt6323_devs[] = {
},
};
+static const struct mfd_cell mt6357_devs[] = {
+ {
+ .name = "mt6357-regulator",
+ }, {
+ .name = "mt6357-rtc",
+ .num_resources = ARRAY_SIZE(mt6357_rtc_resources),
+ .resources = mt6357_rtc_resources,
+ .of_compatible = "mediatek,mt6357-rtc",
+ }, {
+ .name = "mtk-pmic-keys",
+ .num_resources = ARRAY_SIZE(mt6357_keys_resources),
+ .resources = mt6357_keys_resources,
+ .of_compatible = "mediatek,mt6357-keys"
+ },
+};
+
+/* MT6331 is always used in combination with MT6332 */
+static const struct mfd_cell mt6331_mt6332_devs[] = {
+ {
+ .name = "mt6331-rtc",
+ .num_resources = ARRAY_SIZE(mt6331_rtc_resources),
+ .resources = mt6331_rtc_resources,
+ .of_compatible = "mediatek,mt6331-rtc",
+ }, {
+ .name = "mt6331-regulator",
+ .of_compatible = "mediatek,mt6331-regulator"
+ }, {
+ .name = "mt6332-regulator",
+ .of_compatible = "mediatek,mt6332-regulator"
+ }, {
+ .name = "mtk-pmic-keys",
+ .num_resources = ARRAY_SIZE(mt6331_keys_resources),
+ .resources = mt6331_keys_resources,
+ .of_compatible = "mediatek,mt6331-keys"
+ },
+};
+
static const struct mfd_cell mt6358_devs[] = {
{
.name = "mt6358-regulator",
@@ -179,6 +248,22 @@ static const struct chip_data mt6323_core = {
.irq_init = mt6397_irq_init,
};
+static const struct chip_data mt6357_core = {
+ .cid_addr = MT6357_SWCID,
+ .cid_shift = 8,
+ .cells = mt6357_devs,
+ .cell_size = ARRAY_SIZE(mt6357_devs),
+ .irq_init = mt6358_irq_init,
+};
+
+static const struct chip_data mt6331_mt6332_core = {
+ .cid_addr = MT6331_HWCID,
+ .cid_shift = 0,
+ .cells = mt6331_mt6332_devs,
+ .cell_size = ARRAY_SIZE(mt6331_mt6332_devs),
+ .irq_init = mt6397_irq_init,
+};
+
static const struct chip_data mt6358_core = {
.cid_addr = MT6358_SWCID,
.cid_shift = 8,
@@ -262,6 +347,12 @@ static const struct of_device_id mt6397_of_match[] = {
.compatible = "mediatek,mt6323",
.data = &mt6323_core,
}, {
+ .compatible = "mediatek,mt6331",
+ .data = &mt6331_mt6332_core,
+ }, {
+ .compatible = "mediatek,mt6357",
+ .data = &mt6357_core,
+ }, {
.compatible = "mediatek,mt6358",
.data = &mt6358_core,
}, {
diff --git a/drivers/mfd/mt6397-irq.c b/drivers/mfd/mt6397-irq.c
index 2924919da991..eff53fed8fe7 100644
--- a/drivers/mfd/mt6397-irq.c
+++ b/drivers/mfd/mt6397-irq.c
@@ -12,6 +12,8 @@
#include <linux/suspend.h>
#include <linux/mfd/mt6323/core.h>
#include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6331/core.h>
+#include <linux/mfd/mt6331/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6397/registers.h>
@@ -172,7 +174,12 @@ int mt6397_irq_init(struct mt6397_chip *chip)
chip->int_status[0] = MT6323_INT_STATUS0;
chip->int_status[1] = MT6323_INT_STATUS1;
break;
-
+ case MT6331_CHIP_ID:
+ chip->int_con[0] = MT6331_INT_CON0;
+ chip->int_con[1] = MT6331_INT_CON1;
+ chip->int_status[0] = MT6331_INT_STATUS_CON0;
+ chip->int_status[1] = MT6331_INT_STATUS_CON1;
+ break;
case MT6391_CHIP_ID:
case MT6397_CHIP_ID:
chip->int_con[0] = MT6397_INT_CON0;
diff --git a/drivers/mfd/qcom-pm8008.c b/drivers/mfd/qcom-pm8008.c
index c472d7f8103c..4b8ff947762f 100644
--- a/drivers/mfd/qcom-pm8008.c
+++ b/drivers/mfd/qcom-pm8008.c
@@ -54,13 +54,6 @@ enum {
#define PM8008_PERIPH_OFFSET(paddr) (paddr - PM8008_PERIPH_0_BASE)
-struct pm8008_data {
- struct device *dev;
- struct regmap *regmap;
- int irq;
- struct regmap_irq_chip_data *irq_data;
-};
-
static unsigned int p0_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_0_BASE)};
static unsigned int p1_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_1_BASE)};
static unsigned int p2_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_2_BASE)};
@@ -150,7 +143,7 @@ static struct regmap_config qcom_mfd_regmap_cfg = {
.max_register = 0xFFFF,
};
-static int pm8008_init(struct pm8008_data *chip)
+static int pm8008_init(struct regmap *regmap)
{
int rc;
@@ -160,34 +153,31 @@ static int pm8008_init(struct pm8008_data *chip)
* This is required to enable the writing of TYPE registers in
* regmap_irq_sync_unlock().
*/
- rc = regmap_write(chip->regmap,
- (PM8008_TEMP_ALARM_ADDR | INT_SET_TYPE_OFFSET),
- BIT(0));
+ rc = regmap_write(regmap, (PM8008_TEMP_ALARM_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
if (rc)
return rc;
/* Do the same for GPIO1 and GPIO2 peripherals */
- rc = regmap_write(chip->regmap,
- (PM8008_GPIO1_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
+ rc = regmap_write(regmap, (PM8008_GPIO1_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
if (rc)
return rc;
- rc = regmap_write(chip->regmap,
- (PM8008_GPIO2_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
+ rc = regmap_write(regmap, (PM8008_GPIO2_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
return rc;
}
-static int pm8008_probe_irq_peripherals(struct pm8008_data *chip,
+static int pm8008_probe_irq_peripherals(struct device *dev,
+ struct regmap *regmap,
int client_irq)
{
int rc, i;
struct regmap_irq_type *type;
struct regmap_irq_chip_data *irq_data;
- rc = pm8008_init(chip);
+ rc = pm8008_init(regmap);
if (rc) {
- dev_err(chip->dev, "Init failed: %d\n", rc);
+ dev_err(dev, "Init failed: %d\n", rc);
return rc;
}
@@ -207,10 +197,10 @@ static int pm8008_probe_irq_peripherals(struct pm8008_data *chip,
IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW);
}
- rc = devm_regmap_add_irq_chip(chip->dev, chip->regmap, client_irq,
+ rc = devm_regmap_add_irq_chip(dev, regmap, client_irq,
IRQF_SHARED, 0, &pm8008_irq_chip, &irq_data);
if (rc) {
- dev_err(chip->dev, "Failed to add IRQ chip: %d\n", rc);
+ dev_err(dev, "Failed to add IRQ chip: %d\n", rc);
return rc;
}
@@ -220,26 +210,23 @@ static int pm8008_probe_irq_peripherals(struct pm8008_data *chip,
static int pm8008_probe(struct i2c_client *client)
{
int rc;
- struct pm8008_data *chip;
-
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
+ struct device *dev;
+ struct regmap *regmap;
- chip->dev = &client->dev;
- chip->regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg);
- if (!chip->regmap)
+ dev = &client->dev;
+ regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg);
+ if (!regmap)
return -ENODEV;
- i2c_set_clientdata(client, chip);
+ i2c_set_clientdata(client, regmap);
- if (of_property_read_bool(chip->dev->of_node, "interrupt-controller")) {
- rc = pm8008_probe_irq_peripherals(chip, client->irq);
+ if (of_property_read_bool(dev->of_node, "interrupt-controller")) {
+ rc = pm8008_probe_irq_peripherals(dev, regmap, client->irq);
if (rc)
- dev_err(chip->dev, "Failed to probe irq periphs: %d\n", rc);
+ dev_err(dev, "Failed to probe irq periphs: %d\n", rc);
}
- return devm_of_platform_populate(chip->dev);
+ return devm_of_platform_populate(dev);
}
static const struct of_device_id pm8008_match[] = {
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 191fdb87c424..bdb2ce7ff03b 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -101,8 +101,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
}
}
- syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", np,
- (u64)res.start);
+ syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%pa", np, &res.start);
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
syscon_config.max_register = resource_size(&res) - reg_io_width;
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 5369c67e3280..663ffd4b8570 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -397,11 +397,8 @@ err_noirq:
static int t7l66xb_remove(struct platform_device *dev)
{
- struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
- int ret;
- ret = pdata->disable(dev);
clk_disable_unprepare(t7l66xb->clk48m);
clk_put(t7l66xb->clk48m);
clk_disable_unprepare(t7l66xb->clk32k);
@@ -412,8 +409,7 @@ static int t7l66xb_remove(struct platform_device *dev)
mfd_remove_devices(&dev->dev);
kfree(t7l66xb);
- return ret;
-
+ return 0;
}
static struct platform_driver t7l66xb_platform_driver = {
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 0be5731685b4..aa903a31dd43 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -798,20 +798,19 @@ static int tc6393xb_remove(struct platform_device *dev)
{
struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
- int ret;
mfd_remove_devices(&dev->dev);
tc6393xb_detach_irq(dev);
- ret = tcpd->disable(dev);
+ tcpd->disable(dev);
clk_disable_unprepare(tc6393xb->clk);
iounmap(tc6393xb->scr);
release_resource(&tc6393xb->rscr);
clk_put(tc6393xb->clk);
kfree(tc6393xb);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index bd6659cf3bc0..2cb9326f3e61 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -656,309 +656,6 @@ static inline struct device *add_child(unsigned mod_no, const char *name,
can_wakeup, irq0, irq1);
}
-static struct device *
-add_regulator_linked(int num, struct regulator_init_data *pdata,
- struct regulator_consumer_supply *consumers,
- unsigned num_consumers, unsigned long features)
-{
- struct twl_regulator_driver_data drv_data;
-
- /* regulator framework demands init_data ... */
- if (!pdata)
- return NULL;
-
- if (consumers) {
- pdata->consumer_supplies = consumers;
- pdata->num_consumer_supplies = num_consumers;
- }
-
- if (pdata->driver_data) {
- /* If we have existing drv_data, just add the flags */
- struct twl_regulator_driver_data *tmp;
- tmp = pdata->driver_data;
- tmp->features |= features;
- } else {
- /* add new driver data struct, used only during init */
- drv_data.features = features;
- drv_data.set_voltage = NULL;
- drv_data.get_voltage = NULL;
- drv_data.data = NULL;
- pdata->driver_data = &drv_data;
- }
-
- /* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */
- return add_numbered_child(TWL_MODULE_PM_MASTER, "twl_reg", num,
- pdata, sizeof(*pdata), false, 0, 0);
-}
-
-static struct device *
-add_regulator(int num, struct regulator_init_data *pdata,
- unsigned long features)
-{
- return add_regulator_linked(num, pdata, NULL, 0, features);
-}
-
-/*
- * NOTE: We know the first 8 IRQs after pdata->base_irq are
- * for the PIH, and the next are for the PWR_INT SIH, since
- * that's how twl_init_irq() sets things up.
- */
-
-static int
-add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
- unsigned long features)
-{
- struct device *child;
-
- if (IS_ENABLED(CONFIG_GPIO_TWL4030) && pdata->gpio) {
- child = add_child(TWL4030_MODULE_GPIO, "twl4030_gpio",
- pdata->gpio, sizeof(*pdata->gpio),
- false, irq_base + GPIO_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_KEYBOARD_TWL4030) && pdata->keypad) {
- child = add_child(TWL4030_MODULE_KEYPAD, "twl4030_keypad",
- pdata->keypad, sizeof(*pdata->keypad),
- true, irq_base + KEYPAD_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc &&
- twl_class_is_4030()) {
- child = add_child(TWL4030_MODULE_MADC, "twl4030_madc",
- pdata->madc, sizeof(*pdata->madc),
- true, irq_base + MADC_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_RTC_DRV_TWL4030)) {
- /*
- * REVISIT platform_data here currently might expose the
- * "msecure" line ... but for now we just expect board
- * setup to tell the chip "it's always ok to SET_TIME".
- * Eventually, Linux might become more aware of such
- * HW security concerns, and "least privilege".
- */
- child = add_child(TWL_MODULE_RTC, "twl_rtc", NULL, 0,
- true, irq_base + RTC_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_PWM_TWL)) {
- child = add_child(TWL_MODULE_PWM, "twl-pwm", NULL, 0,
- false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_PWM_TWL_LED)) {
- child = add_child(TWL_MODULE_LED, "twl-pwmled", NULL, 0,
- false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_USB) && pdata->usb &&
- twl_class_is_4030()) {
-
- static struct regulator_consumer_supply usb1v5 = {
- .supply = "usb1v5",
- };
- static struct regulator_consumer_supply usb1v8 = {
- .supply = "usb1v8",
- };
- static struct regulator_consumer_supply usb3v1 = {
- .supply = "usb3v1",
- };
-
- /* First add the regulators so that they can be used by transceiver */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030)) {
- /* this is a template that gets copied */
- struct regulator_init_data usb_fixed = {
- .constraints.valid_modes_mask =
- REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .constraints.valid_ops_mask =
- REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- };
-
- child = add_regulator_linked(TWL4030_REG_VUSB1V5,
- &usb_fixed, &usb1v5, 1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator_linked(TWL4030_REG_VUSB1V8,
- &usb_fixed, &usb1v8, 1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator_linked(TWL4030_REG_VUSB3V1,
- &usb_fixed, &usb3v1, 1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- }
-
- child = add_child(TWL_MODULE_USB, "twl4030_usb",
- pdata->usb, sizeof(*pdata->usb), true,
- /* irq0 = USB_PRES, irq1 = USB */
- irq_base + USB_PRES_INTR_OFFSET,
- irq_base + USB_INTR_OFFSET);
-
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- /* we need to connect regulators to this transceiver */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && child) {
- usb1v5.dev_name = dev_name(child);
- usb1v8.dev_name = dev_name(child);
- usb3v1.dev_name = dev_name(child);
- }
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_WATCHDOG) && twl_class_is_4030()) {
- child = add_child(TWL_MODULE_PM_RECEIVER, "twl4030_wdt", NULL,
- 0, false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_INPUT_TWL4030_PWRBUTTON) && twl_class_is_4030()) {
- child = add_child(TWL_MODULE_PM_MASTER, "twl4030_pwrbutton",
- NULL, 0, true, irq_base + 8 + 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_MFD_TWL4030_AUDIO) && pdata->audio &&
- twl_class_is_4030()) {
- child = add_child(TWL4030_MODULE_AUDIO_VOICE, "twl4030-audio",
- pdata->audio, sizeof(*pdata->audio),
- false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- /* twl4030 regulators */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && twl_class_is_4030()) {
- child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VIO, pdata->vio,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VDAC, pdata->vdac,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator((features & TWL4030_VAUX2)
- ? TWL4030_REG_VAUX2_4030
- : TWL4030_REG_VAUX2,
- pdata->vaux2, features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- /* maybe add LDOs that are omitted on cost-reduced parts */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && !(features & TPS_SUBSET)
- && twl_class_is_4030()) {
- child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VMMC2, pdata->vmmc2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VSIM, pdata->vsim,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VAUX1, pdata->vaux1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VAUX3, pdata->vaux3,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VAUX4, pdata->vaux4,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_CHARGER_TWL4030) && pdata->bci &&
- !(features & (TPS_SUBSET | TWL5031))) {
- child = add_child(TWL_MODULE_MAIN_CHARGE, "twl4030_bci",
- pdata->bci, sizeof(*pdata->bci), false,
- /* irq0 = CHG_PRES, irq1 = BCI */
- irq_base + BCI_PRES_INTR_OFFSET,
- irq_base + BCI_INTR_OFFSET);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_POWER) && pdata->power) {
- child = add_child(TWL_MODULE_PM_MASTER, "twl4030_power",
- pdata->power, sizeof(*pdata->power), false,
- 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- return 0;
-}
-
/*----------------------------------------------------------------------*/
/*
@@ -987,8 +684,7 @@ static inline int unprotect_pm_master(void)
return e;
}
-static void clocks_init(struct device *dev,
- struct twl4030_clock_init_data *clock)
+static void clocks_init(struct device *dev)
{
int e = 0;
struct clk *osc;
@@ -1018,8 +714,6 @@ static void clocks_init(struct device *dev,
}
ctrl |= HIGH_PERF_SQ;
- if (clock && clock->ck32k_lowpwr_enable)
- ctrl |= CK32K_LOWPWR_EN;
e |= unprotect_pm_master();
/* effect->MADC+USB ck en */
@@ -1063,7 +757,6 @@ static struct of_dev_auxdata twl_auxdata_lookup[] = {
static int
twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- struct twl4030_platform_data *pdata = dev_get_platdata(&client->dev);
struct device_node *node = client->dev.of_node;
struct platform_device *pdev;
const struct regmap_config *twl_regmap_config;
@@ -1071,7 +764,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
int status;
unsigned i, num_slaves;
- if (!node && !pdata) {
+ if (!node) {
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
@@ -1161,7 +854,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
twl_priv->ready = true;
/* setup clock framework */
- clocks_init(&client->dev, pdata ? pdata->clock : NULL);
+ clocks_init(&client->dev);
/* read TWL IDCODE Register */
if (twl_class_is_4030()) {
@@ -1209,14 +902,8 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
TWL4030_DCDC_GLOBAL_CFG);
}
- if (node) {
- if (pdata)
- twl_auxdata_lookup[0].platform_data = pdata->gpio;
- status = of_platform_populate(node, NULL, twl_auxdata_lookup,
- &client->dev);
- } else {
- status = add_children(pdata, irq_base, id->driver_data);
- }
+ status = of_platform_populate(node, NULL, twl_auxdata_lookup,
+ &client->dev);
fail:
if (status < 0)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 93ebd174d848..5d9e3483b89d 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -25,7 +25,7 @@
#define SDSP_DOMAIN_ID (2)
#define CDSP_DOMAIN_ID (3)
#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
-#define FASTRPC_MAX_SESSIONS 13 /*12 compute, 1 cpz*/
+#define FASTRPC_MAX_SESSIONS 14
#define FASTRPC_MAX_VMIDS 16
#define FASTRPC_ALIGN 128
#define FASTRPC_MAX_FDLIST 16
@@ -1943,7 +1943,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
spin_lock_irqsave(&cctx->lock, flags);
- sess = &cctx->session[cctx->sesscount];
+ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
+ dev_err(&pdev->dev, "too many sessions\n");
+ spin_unlock_irqrestore(&cctx->lock, flags);
+ return -ENOSPC;
+ }
+ sess = &cctx->session[cctx->sesscount++];
sess->used = false;
sess->valid = true;
sess->dev = dev;
@@ -1956,13 +1961,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
struct fastrpc_session_ctx *dup_sess;
for (i = 1; i < sessions; i++) {
- if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
+ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
break;
- dup_sess = &cctx->session[cctx->sesscount];
+ dup_sess = &cctx->session[cctx->sesscount++];
memcpy(dup_sess, sess, sizeof(*dup_sess));
}
}
- cctx->sesscount++;
spin_unlock_irqrestore(&cctx->lock, flags);
rc = dma_set_mask(dev, DMA_BIT_MASK(32));
if (rc) {
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 85dd6aa33df6..61a2be712bf7 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1585,7 +1585,7 @@ static int vmballoon_register_shrinker(struct vmballoon *b)
b->shrinker.count_objects = vmballoon_shrinker_count;
b->shrinker.seeks = DEFAULT_SEEKS;
- r = register_shrinker(&b->shrinker);
+ r = register_shrinker(&b->shrinker, "vmw-balloon");
if (r == 0)
b->shrinker_registered = true;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index cee4c0b59f43..06aa62ce0ed1 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -949,16 +949,17 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
/* Erase init depends on CSD and SSR */
mmc_init_erase(card);
-
- /*
- * Fetch switch information from card.
- */
- err = mmc_read_switch(card);
- if (err)
- return err;
}
/*
+ * Fetch switch information from card. Note, sd3_bus_mode can change if
+ * voltage switch outcome changes, so do this always.
+ */
+ err = mmc_read_switch(card);
+ if (err)
+ return err;
+
+ /*
* For SPI, enable CRC as appropriate.
* This CRC enable is located AFTER the reading of the
* card registers because some SDHC cards are not able
@@ -1480,26 +1481,15 @@ retry:
if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
mmc_sd_card_using_v18(card) &&
host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
- /*
- * Re-read switch information in case it has changed since
- * oldcard was initialized.
- */
- if (oldcard) {
- err = mmc_read_switch(card);
- if (err)
- goto free_card;
- }
- if (mmc_sd_card_using_v18(card)) {
- if (mmc_host_set_uhs_voltage(host) ||
- mmc_sd_init_uhs_card(card)) {
- v18_fixup_failed = true;
- mmc_power_cycle(host, ocr);
- if (!oldcard)
- mmc_remove_card(card);
- goto retry;
- }
- goto done;
+ if (mmc_host_set_uhs_voltage(host) ||
+ mmc_sd_init_uhs_card(card)) {
+ v18_fixup_failed = true;
+ mmc_power_cycle(host, ocr);
+ if (!oldcard)
+ mmc_remove_card(card);
+ goto retry;
}
+ goto cont;
}
/* Initialization sequence for UHS-I cards */
@@ -1534,7 +1524,7 @@ retry:
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
}
-
+cont:
if (!oldcard) {
/* Read/parse the extension registers. */
err = sd_read_ext_regs(card);
@@ -1566,7 +1556,7 @@ retry:
err = -EINVAL;
goto free_card;
}
-done:
+
host->card = card;
return 0;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 10c563999d3d..e63608834411 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -171,6 +171,7 @@ config MMC_SDHCI_OF_ASPEED
config MMC_SDHCI_OF_ASPEED_TEST
bool "Tests for the ASPEED SDHCI driver" if !KUNIT_ALL_TESTS
depends on MMC_SDHCI_OF_ASPEED && KUNIT
+ depends on (MMC_SDHCI_OF_ASPEED=m || KUNIT=y)
default KUNIT_ALL_TESTS
help
Enable KUnit tests for the ASPEED SDHCI driver. Select this
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 2f08d442e557..fc462995cf94 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1172,8 +1172,10 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
ret = device_reset_optional(&pdev->dev);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+ goto free_host;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 4ff73d1883de..69d78604d1fc 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -2446,6 +2446,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
/* disable busy check */
sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
+
if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1);
@@ -2932,11 +2935,14 @@ static int __maybe_unused msdc_suspend(struct device *dev)
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
int ret;
+ u32 val;
if (mmc->caps2 & MMC_CAP2_CQE) {
ret = cqhci_suspend(mmc);
if (ret)
return ret;
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
}
/*
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 0db9490dc659..e4003f6058eb 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -648,7 +648,7 @@ static int pxamci_probe(struct platform_device *pdev)
ret = pxamci_of_init(pdev, mmc);
if (ret)
- return ret;
+ goto out;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -672,7 +672,7 @@ static int pxamci_probe(struct platform_device *pdev)
ret = pxamci_init_ocr(host);
if (ret < 0)
- return ret;
+ goto out;
mmc->caps = 0;
host->cmdat = 0;
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 4e904850973c..a7343d4bc50e 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -349,6 +349,15 @@ static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
+#ifdef CONFIG_ACPI
+static const struct sdhci_pltfm_data sdhci_dwcmshc_bf3_pdata = {
+ .ops = &sdhci_dwcmshc_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_ACMD23_BROKEN,
+};
+#endif
+
static const struct sdhci_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
.ops = &sdhci_dwcmshc_rk35xx_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
@@ -431,7 +440,10 @@ MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
#ifdef CONFIG_ACPI
static const struct acpi_device_id sdhci_dwcmshc_acpi_ids[] = {
- { .id = "MLNXBF30" },
+ {
+ .id = "MLNXBF30",
+ .driver_data = (kernel_ulong_t)&sdhci_dwcmshc_bf3_pdata,
+ },
{}
};
#endif
@@ -447,7 +459,7 @@ static int dwcmshc_probe(struct platform_device *pdev)
int err;
u32 extra;
- pltfm_data = of_device_get_match_data(&pdev->dev);
+ pltfm_data = device_get_match_data(&pdev->dev);
if (!pltfm_data) {
dev_err(&pdev->dev, "Error: No device match data found\n");
return -ENODEV;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 134e27328597..25bad4318305 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -112,6 +112,13 @@ static const struct of_device_id dataflash_dt_ids[] = {
MODULE_DEVICE_TABLE(of, dataflash_dt_ids);
#endif
+static const struct spi_device_id dataflash_spi_ids[] = {
+ { .name = "at45", },
+ { .name = "dataflash", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, dataflash_spi_ids);
+
/* ......................................................................... */
/*
@@ -936,6 +943,7 @@ static struct spi_driver dataflash_driver = {
.probe = dataflash_probe,
.remove = dataflash_remove,
+ .id_table = dataflash_spi_ids,
/* FIXME: investigate suspend and resume... */
};
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index 6950a8764815..36e060386e59 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -270,7 +270,9 @@ static int powernv_flash_release(struct platform_device *pdev)
struct powernv_flash *data = dev_get_drvdata(&(pdev->dev));
/* All resources should be freed automatically */
- return mtd_device_unregister(&(data->mtd));
+ WARN_ON(mtd_device_unregister(&data->mtd));
+
+ return 0;
}
static const struct of_device_id powernv_flash_match[] = {
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 24073518587f..f58742486d3d 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -1045,13 +1045,9 @@ static int spear_smi_remove(struct platform_device *pdev)
{
struct spear_smi *dev;
struct spear_snor_flash *flash;
- int ret, i;
+ int i;
dev = platform_get_drvdata(pdev);
- if (!dev) {
- dev_err(&pdev->dev, "dev is null\n");
- return -ENODEV;
- }
/* clean up for all nor flash */
for (i = 0; i < dev->num_flashes; i++) {
@@ -1060,9 +1056,7 @@ static int spear_smi_remove(struct platform_device *pdev)
continue;
/* clean up mtd stuff */
- ret = mtd_device_unregister(&flash->mtd);
- if (ret)
- dev_err(&pdev->dev, "error removing mtd\n");
+ WARN_ON(mtd_device_unregister(&flash->mtd));
}
clk_disable_unprepare(dev->clk);
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index d3377b10fc0f..54861d889c30 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -2084,15 +2084,12 @@ static int stfsm_probe(struct platform_device *pdev)
* Configure READ/WRITE/ERASE sequences according to platform and
* device flags.
*/
- if (info->config) {
+ if (info->config)
ret = info->config(fsm);
- if (ret)
- goto err_clk_unprepare;
- } else {
+ else
ret = stfsm_prepare_rwe_seqs_default(fsm);
- if (ret)
- goto err_clk_unprepare;
- }
+ if (ret)
+ goto err_clk_unprepare;
fsm->mtd.name = info->name;
fsm->mtd.dev.parent = &pdev->dev;
@@ -2115,10 +2112,12 @@ static int stfsm_probe(struct platform_device *pdev)
(long long)fsm->mtd.size, (long long)(fsm->mtd.size >> 20),
fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10));
- return mtd_device_register(&fsm->mtd, NULL, 0);
-
+ ret = mtd_device_register(&fsm->mtd, NULL, 0);
+ if (ret) {
err_clk_unprepare:
- clk_disable_unprepare(fsm->clk);
+ clk_disable_unprepare(fsm->clk);
+ }
+
return ret;
}
@@ -2126,9 +2125,11 @@ static int stfsm_remove(struct platform_device *pdev)
{
struct stfsm *fsm = platform_get_drvdata(pdev);
+ WARN_ON(mtd_device_unregister(&fsm->mtd));
+
clk_disable_unprepare(fsm->clk);
- return mtd_device_unregister(&fsm->mtd);
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index a3439b791eeb..a6161ce340d4 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -233,16 +233,16 @@ static int am654_hbmc_remove(struct platform_device *pdev)
{
struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv;
- int ret;
- ret = hyperbus_unregister_device(&priv->hbdev);
+ hyperbus_unregister_device(&priv->hbdev);
+
if (priv->mux_ctrl)
mux_control_deselect(priv->mux_ctrl);
if (dev_priv->rx_chan)
dma_release_channel(dev_priv->rx_chan);
- return ret;
+ return 0;
}
static const struct of_device_id am654_hbmc_dt_ids[] = {
diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c
index 2f9fc4e17d53..4d8047d43e48 100644
--- a/drivers/mtd/hyperbus/hyperbus-core.c
+++ b/drivers/mtd/hyperbus/hyperbus-core.c
@@ -126,16 +126,12 @@ int hyperbus_register_device(struct hyperbus_device *hbdev)
}
EXPORT_SYMBOL_GPL(hyperbus_register_device);
-int hyperbus_unregister_device(struct hyperbus_device *hbdev)
+void hyperbus_unregister_device(struct hyperbus_device *hbdev)
{
- int ret = 0;
-
if (hbdev && hbdev->mtd) {
- ret = mtd_device_unregister(hbdev->mtd);
+ WARN_ON(mtd_device_unregister(hbdev->mtd));
map_destroy(hbdev->mtd);
}
-
- return ret;
}
EXPORT_SYMBOL_GPL(hyperbus_unregister_device);
diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c
index 6e08ec1d4f09..d00d30243403 100644
--- a/drivers/mtd/hyperbus/rpc-if.c
+++ b/drivers/mtd/hyperbus/rpc-if.c
@@ -134,7 +134,7 @@ static int rpcif_hb_probe(struct platform_device *pdev)
error = rpcif_hw_init(&hyperbus->rpc, true);
if (error)
- return error;
+ goto out_disable_rpm;
hyperbus->hbdev.map.size = hyperbus->rpc.size;
hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap;
@@ -145,19 +145,24 @@ static int rpcif_hb_probe(struct platform_device *pdev)
hyperbus->hbdev.np = of_get_next_child(pdev->dev.parent->of_node, NULL);
error = hyperbus_register_device(&hyperbus->hbdev);
if (error)
- rpcif_disable_rpm(&hyperbus->rpc);
+ goto out_disable_rpm;
+
+ return 0;
+out_disable_rpm:
+ rpcif_disable_rpm(&hyperbus->rpc);
return error;
}
static int rpcif_hb_remove(struct platform_device *pdev)
{
struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev);
- int error = hyperbus_unregister_device(&hyperbus->hbdev);
+
+ hyperbus_unregister_device(&hyperbus->hbdev);
rpcif_disable_rpm(&hyperbus->rpc);
- return error;
+ return 0;
}
static struct platform_driver rpcif_platform_driver = {
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
index 72f5c7b30079..367e2d906de0 100644
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -478,7 +478,9 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
*/
static int lpddr2_nvm_remove(struct platform_device *pdev)
{
- return mtd_device_unregister(dev_get_drvdata(&pdev->dev));
+ WARN_ON(mtd_device_unregister(dev_get_drvdata(&pdev->dev)));
+
+ return 0;
}
/* Initialize platform_driver data structure for lpddr2_nvm */
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 4f63b8430c71..85eca6a192e6 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -66,18 +66,12 @@ static int physmap_flash_remove(struct platform_device *dev)
{
struct physmap_flash_info *info;
struct physmap_flash_data *physmap_data;
- int i, err = 0;
+ int i;
info = platform_get_drvdata(dev);
- if (!info) {
- err = -EINVAL;
- goto out;
- }
if (info->cmtd) {
- err = mtd_device_unregister(info->cmtd);
- if (err)
- goto out;
+ WARN_ON(mtd_device_unregister(info->cmtd));
if (info->cmtd != info->mtds[0])
mtd_concat_destroy(info->cmtd);
@@ -92,10 +86,9 @@ static int physmap_flash_remove(struct platform_device *dev)
if (physmap_data && physmap_data->exit)
physmap_data->exit(dev);
-out:
pm_runtime_put(&dev->dev);
pm_runtime_disable(&dev->dev);
- return err;
+ return 0;
}
static void physmap_set_vpp(struct map_info *map, int state)
diff --git a/drivers/mtd/maps/physmap-versatile.c b/drivers/mtd/maps/physmap-versatile.c
index ad7cd9cfaee0..a1b8b7b25f88 100644
--- a/drivers/mtd/maps/physmap-versatile.c
+++ b/drivers/mtd/maps/physmap-versatile.c
@@ -93,6 +93,7 @@ static int ap_flash_init(struct platform_device *pdev)
return -ENODEV;
}
ebi_base = of_iomap(ebi, 0);
+ of_node_put(ebi);
if (!ebi_base)
return -ENODEV;
@@ -207,6 +208,7 @@ int of_flash_probe_versatile(struct platform_device *pdev,
versatile_flashprot = (enum versatile_flashprot)devid->data;
rmap = syscon_node_to_regmap(sysnp);
+ of_node_put(sysnp);
if (IS_ERR(rmap))
return PTR_ERR(rmap);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index d0f9c4b0285c..05860288a7af 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -615,21 +615,24 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
if (!usr_oob)
req.ooblen = 0;
+ req.len &= 0xffffffff;
+ req.ooblen &= 0xffffffff;
+
if (req.start + req.len > mtd->size)
return -EINVAL;
datbuf_len = min_t(size_t, req.len, mtd->erasesize);
if (datbuf_len > 0) {
- datbuf = kmalloc(datbuf_len, GFP_KERNEL);
+ datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
if (!datbuf)
return -ENOMEM;
}
oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
if (oobbuf_len > 0) {
- oobbuf = kmalloc(oobbuf_len, GFP_KERNEL);
+ oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
if (!oobbuf) {
- kfree(datbuf);
+ kvfree(datbuf);
return -ENOMEM;
}
}
@@ -679,8 +682,8 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
usr_oob += ops.oobretlen;
}
- kfree(datbuf);
- kfree(oobbuf);
+ kvfree(datbuf);
+ kvfree(oobbuf);
return ret;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 9eb0680db312..a9b8be9f40dc 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -546,6 +546,68 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
return 0;
}
+static void mtd_check_of_node(struct mtd_info *mtd)
+{
+ struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
+ const char *pname, *prefix = "partition-";
+ int plen, mtd_name_len, offset, prefix_len;
+ struct mtd_info *parent;
+ bool found = false;
+
+ /* Check if MTD already has a device node */
+ if (dev_of_node(&mtd->dev))
+ return;
+
+ /* Check if a partitions node exist */
+ if (!mtd_is_partition(mtd))
+ return;
+ parent = mtd->parent;
+ parent_dn = dev_of_node(&parent->dev);
+ if (!parent_dn)
+ return;
+
+ partitions = of_get_child_by_name(parent_dn, "partitions");
+ if (!partitions)
+ goto exit_parent;
+
+ prefix_len = strlen(prefix);
+ mtd_name_len = strlen(mtd->name);
+
+ /* Search if a partition is defined with the same name */
+ for_each_child_of_node(partitions, mtd_dn) {
+ offset = 0;
+
+ /* Skip partition with no/wrong prefix */
+ if (!of_node_name_prefix(mtd_dn, "partition-"))
+ continue;
+
+ /* Label have priority. Check that first */
+ if (of_property_read_string(mtd_dn, "label", &pname)) {
+ of_property_read_string(mtd_dn, "name", &pname);
+ offset = prefix_len;
+ }
+
+ plen = strlen(pname) - offset;
+ if (plen == mtd_name_len &&
+ !strncmp(mtd->name, pname + offset, plen)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ goto exit_partitions;
+
+ /* Set of_node only for nvmem */
+ if (of_device_is_compatible(mtd_dn, "nvmem-cells"))
+ mtd_set_of_node(mtd, mtd_dn);
+
+exit_partitions:
+ of_node_put(partitions);
+exit_parent:
+ of_node_put(parent_dn);
+}
+
/**
* add_mtd_device - register an MTD device
* @mtd: pointer to new MTD device info structure
@@ -658,6 +720,7 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->dev.devt = MTD_DEVT(i);
dev_set_name(&mtd->dev, "mtd%d", i);
dev_set_drvdata(&mtd->dev, mtd);
+ mtd_check_of_node(mtd);
of_node_get(mtd_get_of_node(mtd));
error = device_register(&mtd->dev);
if (error)
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 53bd10738418..296fb16c8dc3 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -347,17 +347,17 @@ static int anfc_select_target(struct nand_chip *chip, int target)
/* Update clock frequency */
if (nfc->cur_clk != anand->clk) {
- clk_disable_unprepare(nfc->controller_clk);
- ret = clk_set_rate(nfc->controller_clk, anand->clk);
+ clk_disable_unprepare(nfc->bus_clk);
+ ret = clk_set_rate(nfc->bus_clk, anand->clk);
if (ret) {
dev_err(nfc->dev, "Failed to change clock rate\n");
return ret;
}
- ret = clk_prepare_enable(nfc->controller_clk);
+ ret = clk_prepare_enable(nfc->bus_clk);
if (ret) {
dev_err(nfc->dev,
- "Failed to re-enable the controller clock\n");
+ "Failed to re-enable the bus clock\n");
return ret;
}
@@ -1043,7 +1043,13 @@ static int anfc_setup_interface(struct nand_chip *chip, int target,
DQS_BUFF_SEL_OUT(dqs_mode);
}
- anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+ if (nand_interface_is_sdr(conf)) {
+ anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+ } else {
+ /* ONFI timings are defined in picoseconds */
+ anand->clk = div_u64((u64)NSEC_PER_SEC * 1000,
+ conf->timings.nvddr.tCK_min);
+ }
/*
* Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 6ef14442c71a..c9ac3baf68c0 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -2629,7 +2629,9 @@ static int atmel_nand_controller_remove(struct platform_device *pdev)
{
struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
- return nc->caps->ops->remove(nc);
+ WARN_ON(nc->caps->ops->remove(nc));
+
+ return 0;
}
static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index 9dbf031716a6..af119e376352 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -679,8 +679,10 @@ static int cafe_nand_probe(struct pci_dev *pdev,
pci_set_master(pdev);
cafe = kzalloc(sizeof(*cafe), GFP_KERNEL);
- if (!cafe)
- return -ENOMEM;
+ if (!cafe) {
+ err = -ENOMEM;
+ goto out_disable_device;
+ }
mtd = nand_to_mtd(&cafe->nand);
mtd->dev.parent = &pdev->dev;
@@ -801,6 +803,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
pci_iounmap(pdev, cafe->mmio);
out_free_mtd:
kfree(cafe);
+ out_disable_device:
+ pci_disable_device(pdev);
out:
return err;
}
@@ -822,6 +826,7 @@ static void cafe_nand_remove(struct pci_dev *pdev)
pci_iounmap(pdev, cafe->mmio);
dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
kfree(cafe);
+ pci_disable_device(pdev);
}
static const struct pci_device_id cafe_nand_tbl[] = {
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index ac3be92872d0..829b76b303aa 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -1293,26 +1293,20 @@ meson_nfc_nand_chip_init(struct device *dev,
return 0;
}
-static int meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
+static void meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
{
struct meson_nfc_nand_chip *meson_chip;
struct mtd_info *mtd;
- int ret;
while (!list_empty(&nfc->chips)) {
meson_chip = list_first_entry(&nfc->chips,
struct meson_nfc_nand_chip, node);
mtd = nand_to_mtd(&meson_chip->nand);
- ret = mtd_device_unregister(mtd);
- if (ret)
- return ret;
+ WARN_ON(mtd_device_unregister(mtd));
- meson_nfc_free_buffer(&meson_chip->nand);
nand_cleanup(&meson_chip->nand);
list_del(&meson_chip->node);
}
-
- return 0;
}
static int meson_nfc_nand_chips_init(struct device *dev,
@@ -1445,16 +1439,11 @@ err_clk:
static int meson_nfc_remove(struct platform_device *pdev)
{
struct meson_nfc *nfc = platform_get_drvdata(pdev);
- int ret;
- ret = meson_nfc_nand_chip_cleanup(nfc);
- if (ret)
- return ret;
+ meson_nfc_nand_chip_cleanup(nfc);
meson_nfc_disable_clk(nfc);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 58c32a11792e..4a9f2b6c772d 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -2278,16 +2278,14 @@ static int omap_nand_remove(struct platform_device *pdev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct omap_nand_info *info = mtd_to_omap(mtd);
- int ret;
rawnand_sw_bch_cleanup(nand_chip);
if (info->dma)
dma_release_channel(info->dma);
- ret = mtd_device_unregister(mtd);
- WARN_ON(ret);
+ WARN_ON(mtd_device_unregister(mtd));
nand_cleanup(nand_chip);
- return ret;
+ return 0;
}
/* omap_nand_ids defined in linux/platform_data/mtd-nand-omap2.h */
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 048b255faa76..8f80019a9f01 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -80,8 +80,10 @@
#define DISABLE_STATUS_AFTER_WRITE 4
#define CW_PER_PAGE 6
#define UD_SIZE_BYTES 9
+#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
#define ECC_PARITY_SIZE_BYTES_RS 19
#define SPARE_SIZE_BYTES 23
+#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
#define NUM_ADDR_CYCLES 27
#define STATUS_BFR_READ 30
#define SET_RD_MODE_AFTER_STATUS 31
@@ -102,6 +104,7 @@
#define ECC_MODE 4
#define ECC_PARITY_SIZE_BYTES_BCH 8
#define ECC_NUM_DATA_BYTES 16
+#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
#define ECC_FORCE_CLK_OPEN 30
/* NAND_DEV_CMD1 bits */
@@ -238,6 +241,9 @@ nandc_set_reg(chip, reg, \
* @bam_ce - the array of BAM command elements
* @cmd_sgl - sgl for NAND BAM command pipe
* @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @last_data_desc - last DMA desc in data channel (tx/rx).
+ * @last_cmd_desc - last DMA desc in command channel.
+ * @txn_done - completion for NAND transfer.
* @bam_ce_pos - the index in bam_ce which is available for next sgl
* @bam_ce_start - the index in bam_ce which marks the start position ce
* for current sgl. It will be used for size calculation
@@ -250,14 +256,14 @@ nandc_set_reg(chip, reg, \
* @rx_sgl_start - start index in data sgl for rx.
* @wait_second_completion - wait for second DMA desc completion before making
* the NAND transfer completion.
- * @txn_done - completion for NAND transfer.
- * @last_data_desc - last DMA desc in data channel (tx/rx).
- * @last_cmd_desc - last DMA desc in command channel.
*/
struct bam_transaction {
struct bam_cmd_element *bam_ce;
struct scatterlist *cmd_sgl;
struct scatterlist *data_sgl;
+ struct dma_async_tx_descriptor *last_data_desc;
+ struct dma_async_tx_descriptor *last_cmd_desc;
+ struct completion txn_done;
u32 bam_ce_pos;
u32 bam_ce_start;
u32 cmd_sgl_pos;
@@ -267,25 +273,23 @@ struct bam_transaction {
u32 rx_sgl_pos;
u32 rx_sgl_start;
bool wait_second_completion;
- struct completion txn_done;
- struct dma_async_tx_descriptor *last_data_desc;
- struct dma_async_tx_descriptor *last_cmd_desc;
};
/*
* This data type corresponds to the nand dma descriptor
+ * @dma_desc - low level DMA engine descriptor
* @list - list for desc_info
- * @dir - DMA transfer direction
+ *
* @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
* ADM
* @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
* @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
- * @dma_desc - low level DMA engine descriptor
+ * @dir - DMA transfer direction
*/
struct desc_info {
+ struct dma_async_tx_descriptor *dma_desc;
struct list_head node;
- enum dma_data_direction dir;
union {
struct scatterlist adm_sgl;
struct {
@@ -293,7 +297,7 @@ struct desc_info {
int sgl_cnt;
};
};
- struct dma_async_tx_descriptor *dma_desc;
+ enum dma_data_direction dir;
};
/*
@@ -337,52 +341,64 @@ struct nandc_regs {
/*
* NAND controller data struct
*
- * @controller: base controller structure
- * @host_list: list containing all the chips attached to the
- * controller
* @dev: parent device
+ *
* @base: MMIO base
- * @base_phys: physical base address of controller registers
- * @base_dma: dma base address of controller registers
+ *
* @core_clk: controller clock
* @aon_clk: another controller clock
*
+ * @regs: a contiguous chunk of memory for DMA register
+ * writes. contains the register values to be
+ * written to controller
+ *
+ * @props: properties of current NAND controller,
+ * initialized via DT match data
+ *
+ * @controller: base controller structure
+ * @host_list: list containing all the chips attached to the
+ * controller
+ *
* @chan: dma channel
* @cmd_crci: ADM DMA CRCI for command flow control
* @data_crci: ADM DMA CRCI for data flow control
+ *
* @desc_list: DMA descriptor list (list of desc_infos)
*
* @data_buffer: our local DMA buffer for page read/writes,
* used when we can't use the buffer provided
* by upper layers directly
- * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
- * functions
* @reg_read_buf: local buffer for reading back registers via DMA
+ *
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
* @reg_read_dma: contains dma address for register read buffer
- * @reg_read_pos: marker for data read in reg_read_buf
*
- * @regs: a contiguous chunk of memory for DMA register
- * writes. contains the register values to be
- * written to controller
- * @cmd1/vld: some fixed controller register values
- * @props: properties of current NAND controller,
- * initialized via DT match data
+ * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
+ * functions
* @max_cwperpage: maximum QPIC codewords required. calculated
* from all connected NAND devices pagesize
+ *
+ * @reg_read_pos: marker for data read in reg_read_buf
+ *
+ * @cmd1/vld: some fixed controller register values
*/
struct qcom_nand_controller {
- struct nand_controller controller;
- struct list_head host_list;
-
struct device *dev;
void __iomem *base;
- phys_addr_t base_phys;
- dma_addr_t base_dma;
struct clk *core_clk;
struct clk *aon_clk;
+ struct nandc_regs *regs;
+ struct bam_transaction *bam_txn;
+
+ const struct qcom_nandc_props *props;
+
+ struct nand_controller controller;
+ struct list_head host_list;
+
union {
/* will be used only by QPIC for BAM DMA */
struct {
@@ -400,64 +416,89 @@ struct qcom_nand_controller {
};
struct list_head desc_list;
- struct bam_transaction *bam_txn;
u8 *data_buffer;
+ __le32 *reg_read_buf;
+
+ phys_addr_t base_phys;
+ dma_addr_t base_dma;
+ dma_addr_t reg_read_dma;
+
int buf_size;
int buf_count;
int buf_start;
unsigned int max_cwperpage;
- __le32 *reg_read_buf;
- dma_addr_t reg_read_dma;
int reg_read_pos;
- struct nandc_regs *regs;
-
u32 cmd1, vld;
- const struct qcom_nandc_props *props;
+};
+
+/*
+ * NAND special boot partitions
+ *
+ * @page_offset: offset of the partition where spare data is not protected
+ * by ECC (value in pages)
+ * @page_offset: size of the partition where spare data is not protected
+ * by ECC (value in pages)
+ */
+struct qcom_nand_boot_partition {
+ u32 page_offset;
+ u32 page_size;
};
/*
* NAND chip structure
*
+ * @boot_partitions: array of boot partitions where offset and size of the
+ * boot partitions are stored
+ *
* @chip: base NAND chip structure
* @node: list node to add itself to host_list in
* qcom_nand_controller
*
+ * @nr_boot_partitions: count of the boot partitions where spare data is not
+ * protected by ECC
+ *
* @cs: chip select value for this chip
* @cw_size: the number of bytes in a single step/codeword
* of a page, consisting of all data, ecc, spare
* and reserved bytes
* @cw_data: the number of bytes within a codeword protected
* by ECC
- * @use_ecc: request the controller to use ECC for the
- * upcoming read/write
- * @bch_enabled: flag to tell whether BCH ECC mode is used
* @ecc_bytes_hw: ECC bytes used by controller hardware for this
* chip
- * @status: value to be returned if NAND_CMD_STATUS command
- * is executed
+ *
* @last_command: keeps track of last command on this chip. used
* for reading correct status
*
* @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
* ecc/non-ecc mode for the current nand flash
* device
+ *
+ * @status: value to be returned if NAND_CMD_STATUS command
+ * is executed
+ * @codeword_fixup: keep track of the current layout used by
+ * the driver for read/write operation.
+ * @use_ecc: request the controller to use ECC for the
+ * upcoming read/write
+ * @bch_enabled: flag to tell whether BCH ECC mode is used
*/
struct qcom_nand_host {
+ struct qcom_nand_boot_partition *boot_partitions;
+
struct nand_chip chip;
struct list_head node;
+ int nr_boot_partitions;
+
int cs;
int cw_size;
int cw_data;
- bool use_ecc;
- bool bch_enabled;
int ecc_bytes_hw;
int spare_bytes;
int bbm_size;
- u8 status;
+
int last_command;
u32 cfg0, cfg1;
@@ -466,23 +507,30 @@ struct qcom_nand_host {
u32 ecc_bch_cfg;
u32 clrflashstatus;
u32 clrreadstatus;
+
+ u8 status;
+ bool codeword_fixup;
+ bool use_ecc;
+ bool bch_enabled;
};
/*
* This data type corresponds to the NAND controller properties which varies
* among different NAND controllers.
* @ecc_modes - ecc mode for NAND
+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
* @is_bam - whether NAND controller is using BAM
* @is_qpic - whether NAND CTRL is part of qpic IP
* @qpic_v2 - flag to indicate QPIC IP version 2
- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+ * @use_codeword_fixup - whether NAND has different layout for boot partitions
*/
struct qcom_nandc_props {
u32 ecc_modes;
+ u32 dev_cmd_reg_start;
bool is_bam;
bool is_qpic;
bool qpic_v2;
- u32 dev_cmd_reg_start;
+ bool use_codeword_fixup;
};
/* Frees the BAM transaction memory */
@@ -1701,7 +1749,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
oob_size1 = host->bbm_size;
- if (qcom_nandc_is_last_cw(ecc, cw)) {
+ if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
data_size2 = ecc->size - data_size1 -
((ecc->steps - 1) * 4);
oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
@@ -1782,7 +1830,7 @@ check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
}
for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
- if (qcom_nandc_is_last_cw(ecc, cw)) {
+ if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) * 4);
oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
} else {
@@ -1940,7 +1988,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
for (i = 0; i < ecc->steps; i++) {
int data_size, oob_size;
- if (qcom_nandc_is_last_cw(ecc, i)) {
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) << 2);
oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
host->spare_bytes;
@@ -2037,6 +2085,69 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
return ret;
}
+static bool qcom_nandc_is_boot_partition(struct qcom_nand_host *host, int page)
+{
+ struct qcom_nand_boot_partition *boot_partition;
+ u32 start, end;
+ int i;
+
+ /*
+ * Since the frequent access will be to the non-boot partitions like rootfs,
+ * optimize the page check by:
+ *
+ * 1. Checking if the page lies after the last boot partition.
+ * 2. Checking from the boot partition end.
+ */
+
+ /* First check the last boot partition */
+ boot_partition = &host->boot_partitions[host->nr_boot_partitions - 1];
+ start = boot_partition->page_offset;
+ end = start + boot_partition->page_size;
+
+ /* Page is after the last boot partition end. This is NOT a boot partition */
+ if (page > end)
+ return false;
+
+ /* Actually check if it's a boot partition */
+ if (page < end && page >= start)
+ return true;
+
+ /* Check the other boot partitions starting from the second-last partition */
+ for (i = host->nr_boot_partitions - 2; i >= 0; i--) {
+ boot_partition = &host->boot_partitions[i];
+ start = boot_partition->page_offset;
+ end = start + boot_partition->page_size;
+
+ if (page < end && page >= start)
+ return true;
+ }
+
+ return false;
+}
+
+static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
+{
+ bool codeword_fixup = qcom_nandc_is_boot_partition(host, page);
+
+ /* Skip conf write if we are already in the correct mode */
+ if (codeword_fixup == host->codeword_fixup)
+ return;
+
+ host->codeword_fixup = codeword_fixup;
+
+ host->cw_data = codeword_fixup ? 512 : 516;
+ host->spare_bytes = host->cw_size - host->ecc_bytes_hw -
+ host->bbm_size - host->cw_data;
+
+ host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK);
+ host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES |
+ host->cw_data << UD_SIZE_BYTES;
+
+ host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK;
+ host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES;
+ host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS;
+}
+
/* implements ecc->read_page() */
static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
@@ -2045,6 +2156,9 @@ static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
u8 *data_buf, *oob_buf = NULL;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
@@ -2064,6 +2178,9 @@ static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int cw, ret;
u8 *data_buf = buf, *oob_buf = chip->oob_poi;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
for (cw = 0; cw < ecc->steps; cw++) {
ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
page, cw);
@@ -2084,6 +2201,9 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2104,6 +2224,9 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
u8 *data_buf, *oob_buf;
int i, ret;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
clear_read_regs(nandc);
@@ -2119,7 +2242,7 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
for (i = 0; i < ecc->steps; i++) {
int data_size, oob_size;
- if (qcom_nandc_is_last_cw(ecc, i)) {
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) << 2);
oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
host->spare_bytes;
@@ -2176,6 +2299,9 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
u8 *data_buf, *oob_buf;
int i, ret;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2194,7 +2320,7 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
oob_size1 = host->bbm_size;
- if (qcom_nandc_is_last_cw(ecc, i)) {
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size2 = ecc->size - data_size1 -
((ecc->steps - 1) << 2);
oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
@@ -2254,6 +2380,9 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
int data_size, oob_size;
int ret;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
host->use_ecc = true;
clear_bam_transaction(nandc);
@@ -2915,6 +3044,74 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
static const char * const probes[] = { "cmdlinepart", "ofpart", "qcomsmem", NULL };
+static int qcom_nand_host_parse_boot_partitions(struct qcom_nand_controller *nandc,
+ struct qcom_nand_host *host,
+ struct device_node *dn)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_boot_partition *boot_partition;
+ struct device *dev = nandc->dev;
+ int partitions_count, i, j, ret;
+
+ if (!of_find_property(dn, "qcom,boot-partitions", NULL))
+ return 0;
+
+ partitions_count = of_property_count_u32_elems(dn, "qcom,boot-partitions");
+ if (partitions_count <= 0) {
+ dev_err(dev, "Error parsing boot partition\n");
+ return partitions_count ? partitions_count : -EINVAL;
+ }
+
+ host->nr_boot_partitions = partitions_count / 2;
+ host->boot_partitions = devm_kcalloc(dev, host->nr_boot_partitions,
+ sizeof(*host->boot_partitions), GFP_KERNEL);
+ if (!host->boot_partitions) {
+ host->nr_boot_partitions = 0;
+ return -ENOMEM;
+ }
+
+ for (i = 0, j = 0; i < host->nr_boot_partitions; i++, j += 2) {
+ boot_partition = &host->boot_partitions[i];
+
+ ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j,
+ &boot_partition->page_offset);
+ if (ret) {
+ dev_err(dev, "Error parsing boot partition offset at index %d\n", i);
+ host->nr_boot_partitions = 0;
+ return ret;
+ }
+
+ if (boot_partition->page_offset % mtd->writesize) {
+ dev_err(dev, "Boot partition offset not multiple of writesize at index %i\n",
+ i);
+ host->nr_boot_partitions = 0;
+ return -EINVAL;
+ }
+ /* Convert offset to nand pages */
+ boot_partition->page_offset /= mtd->writesize;
+
+ ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j + 1,
+ &boot_partition->page_size);
+ if (ret) {
+ dev_err(dev, "Error parsing boot partition size at index %d\n", i);
+ host->nr_boot_partitions = 0;
+ return ret;
+ }
+
+ if (boot_partition->page_size % mtd->writesize) {
+ dev_err(dev, "Boot partition size not multiple of writesize at index %i\n",
+ i);
+ host->nr_boot_partitions = 0;
+ return -EINVAL;
+ }
+ /* Convert size to nand pages */
+ boot_partition->page_size /= mtd->writesize;
+ }
+
+ return 0;
+}
+
static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
struct qcom_nand_host *host,
struct device_node *dn)
@@ -2972,6 +3169,14 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
if (ret)
nand_cleanup(chip);
+ if (nandc->props->use_codeword_fixup) {
+ ret = qcom_nand_host_parse_boot_partitions(nandc, host, dn);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+ }
+
return ret;
}
@@ -3137,6 +3342,7 @@ static int qcom_nandc_remove(struct platform_device *pdev)
static const struct qcom_nandc_props ipq806x_nandc_props = {
.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
.is_bam = false,
+ .use_codeword_fixup = true,
.dev_cmd_reg_start = 0x0,
};
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
index ba24cb36d0b9..b2b42dd1a2de 100644
--- a/drivers/mtd/nand/raw/sm_common.c
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -52,7 +52,7 @@ static const struct mtd_ooblayout_ops oob_sm_ops = {
.free = oob_sm_ooblayout_free,
};
-/* NOTE: This layout is is not compatabable with SmartMedia, */
+/* NOTE: This layout is not compatabable with SmartMedia, */
/* because the 256 byte devices have page depenent oob layout */
/* However it does preserve the bad block markers */
/* If you use smftl, it will bypass this and work correctly */
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index b36e5260ae27..e12f9f580a15 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -1223,11 +1223,8 @@ static int tegra_nand_remove(struct platform_device *pdev)
struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
struct nand_chip *chip = ctrl->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
- int ret;
- ret = mtd_device_unregister(mtd);
- if (ret)
- return ret;
+ WARN_ON(mtd_device_unregister(mtd));
nand_cleanup(chip);
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index 80dabe6ff0f3..b520fe634041 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o xtx.o
+spinand-objs := core.o ato.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c
new file mode 100644
index 000000000000..82b377c06812
--- /dev/null
+++ b/drivers/mtd/nand/spi/ato.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Aidan MacDonald
+ *
+ * Author: Aidan MacDonald <aidanmacdonald.0x0@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+
+#define SPINAND_MFR_ATO 0x9b
+
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+
+static int ato25d1ga_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 8;
+ region->length = 8;
+ return 0;
+}
+
+static int ato25d1ga_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ if (section) {
+ region->offset = (16 * section);
+ region->length = 8;
+ } else {
+ /* first byte of section 0 is reserved for the BBM */
+ region->offset = 1;
+ region->length = 7;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops ato25d1ga_ooblayout = {
+ .ecc = ato25d1ga_ooblayout_ecc,
+ .free = ato25d1ga_ooblayout_free,
+};
+
+
+static const struct spinand_info ato_spinand_table[] = {
+ SPINAND_INFO("ATO25D1GA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x12),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&ato25d1ga_ooblayout, NULL)),
+};
+
+static const struct spinand_manufacturer_ops ato_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer ato_spinand_manufacturer = {
+ .id = SPINAND_MFR_ATO,
+ .name = "ATO",
+ .chips = ato_spinand_table,
+ .nchips = ARRAY_SIZE(ato_spinand_table),
+ .ops = &ato_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index d5b685d1605e..9d73910a7ae8 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -927,6 +927,7 @@ static const struct nand_ops spinand_ops = {
};
static const struct spinand_manufacturer *spinand_manufacturers[] = {
+ &ato_spinand_manufacturer,
&gigadevice_spinand_manufacturer,
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index 23763d16e4f9..b43df73927a0 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -186,3 +186,12 @@ config MTD_QCOMSMEM_PARTS
help
This provides support for parsing partitions from Shared Memory (SMEM)
for NAND and SPI flash on Qualcomm platforms.
+
+config MTD_SERCOMM_PARTS
+ tristate "Sercomm partition table parser"
+ depends on MTD && RALINK
+ help
+ This provides partitions table parser for devices with Sercomm
+ partition map. This partition table contains real partition
+ offsets, which may differ from device to device depending on the
+ number and location of bad blocks on NAND.
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index 2e98aa048278..2fcf0ab9e7da 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -10,6 +10,7 @@ ofpart-$(CONFIG_MTD_OF_PARTS_LINKSYS_NS)+= ofpart_linksys_ns.o
obj-$(CONFIG_MTD_PARSER_IMAGETAG) += parser_imagetag.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
+obj-$(CONFIG_MTD_SERCOMM_PARTS) += scpart.o
obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_QCOMSMEM_PARTS) += qcomsmempart.o
diff --git a/drivers/mtd/parsers/ofpart_bcm4908.c b/drivers/mtd/parsers/ofpart_bcm4908.c
index 0eddef4c198e..bb072a0940e4 100644
--- a/drivers/mtd/parsers/ofpart_bcm4908.c
+++ b/drivers/mtd/parsers/ofpart_bcm4908.c
@@ -35,12 +35,15 @@ static long long bcm4908_partitions_fw_offset(void)
err = kstrtoul(s + len + 1, 0, &offset);
if (err) {
pr_err("failed to parse %s\n", s + len + 1);
+ of_node_put(root);
return err;
}
+ of_node_put(root);
return offset << 10;
}
+ of_node_put(root);
return -ENOENT;
}
diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
index feb44a573d44..a16b42a88581 100644
--- a/drivers/mtd/parsers/redboot.c
+++ b/drivers/mtd/parsers/redboot.c
@@ -58,6 +58,7 @@ static void parse_redboot_of(struct mtd_info *master)
return;
ret = of_property_read_u32(npart, "fis-index-block", &dirblock);
+ of_node_put(npart);
if (ret)
return;
diff --git a/drivers/mtd/parsers/scpart.c b/drivers/mtd/parsers/scpart.c
new file mode 100644
index 000000000000..02601bb33de4
--- /dev/null
+++ b/drivers/mtd/parsers/scpart.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * drivers/mtd/scpart.c: Sercomm Partition Parser
+ *
+ * Copyright (C) 2018 NOGUCHI Hiroshi
+ * Copyright (C) 2022 Mikhail Zhilkin
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/module.h>
+
+#define MOD_NAME "scpart"
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) MOD_NAME ": " fmt
+
+#define ID_ALREADY_FOUND 0xffffffffUL
+
+#define MAP_OFFS_IN_BLK 0x800
+#define MAP_MIRROR_NUM 2
+
+static const char sc_part_magic[] = {
+ 'S', 'C', 'F', 'L', 'M', 'A', 'P', 'O', 'K', '\0',
+};
+#define PART_MAGIC_LEN sizeof(sc_part_magic)
+
+/* assumes that all fields are set by CPU native endian */
+struct sc_part_desc {
+ uint32_t part_id;
+ uint32_t part_offs;
+ uint32_t part_bytes;
+};
+
+static uint32_t scpart_desc_is_valid(struct sc_part_desc *pdesc)
+{
+ return ((pdesc->part_id != 0xffffffffUL) &&
+ (pdesc->part_offs != 0xffffffffUL) &&
+ (pdesc->part_bytes != 0xffffffffUL));
+}
+
+static int scpart_scan_partmap(struct mtd_info *master, loff_t partmap_offs,
+ struct sc_part_desc **ppdesc)
+{
+ int cnt = 0;
+ int res = 0;
+ int res2;
+ loff_t offs;
+ size_t retlen;
+ struct sc_part_desc *pdesc = NULL;
+ struct sc_part_desc *tmpdesc;
+ uint8_t *buf;
+
+ buf = kzalloc(master->erasesize, GFP_KERNEL);
+ if (!buf) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ res2 = mtd_read(master, partmap_offs, master->erasesize, &retlen, buf);
+ if (res2 || retlen != master->erasesize) {
+ res = -EIO;
+ goto free;
+ }
+
+ for (offs = MAP_OFFS_IN_BLK;
+ offs < master->erasesize - sizeof(*tmpdesc);
+ offs += sizeof(*tmpdesc)) {
+ tmpdesc = (struct sc_part_desc *)&buf[offs];
+ if (!scpart_desc_is_valid(tmpdesc))
+ break;
+ cnt++;
+ }
+
+ if (cnt > 0) {
+ int bytes = cnt * sizeof(*pdesc);
+
+ pdesc = kcalloc(cnt, sizeof(*pdesc), GFP_KERNEL);
+ if (!pdesc) {
+ res = -ENOMEM;
+ goto free;
+ }
+ memcpy(pdesc, &(buf[MAP_OFFS_IN_BLK]), bytes);
+
+ *ppdesc = pdesc;
+ res = cnt;
+ }
+
+free:
+ kfree(buf);
+
+out:
+ return res;
+}
+
+static int scpart_find_partmap(struct mtd_info *master,
+ struct sc_part_desc **ppdesc)
+{
+ int magic_found = 0;
+ int res = 0;
+ int res2;
+ loff_t offs = 0;
+ size_t retlen;
+ uint8_t rdbuf[PART_MAGIC_LEN];
+
+ while ((magic_found < MAP_MIRROR_NUM) &&
+ (offs < master->size) &&
+ !mtd_block_isbad(master, offs)) {
+ res2 = mtd_read(master, offs, PART_MAGIC_LEN, &retlen, rdbuf);
+ if (res2 || retlen != PART_MAGIC_LEN) {
+ res = -EIO;
+ goto out;
+ }
+ if (!memcmp(rdbuf, sc_part_magic, PART_MAGIC_LEN)) {
+ pr_debug("Signature found at 0x%llx\n", offs);
+ magic_found++;
+ res = scpart_scan_partmap(master, offs, ppdesc);
+ if (res > 0)
+ goto out;
+ }
+ offs += master->erasesize;
+ }
+
+out:
+ if (res > 0)
+ pr_info("Valid 'SC PART MAP' (%d partitions) found at 0x%llx\n", res, offs);
+ else
+ pr_info("No valid 'SC PART MAP' was found\n");
+
+ return res;
+}
+
+static int scpart_parse(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ const char *partname;
+ int n;
+ int nr_scparts;
+ int nr_parts = 0;
+ int res = 0;
+ struct sc_part_desc *scpart_map = NULL;
+ struct mtd_partition *parts = NULL;
+ struct device_node *mtd_node;
+ struct device_node *ofpart_node;
+ struct device_node *pp;
+
+ mtd_node = mtd_get_of_node(master);
+ if (!mtd_node) {
+ res = -ENOENT;
+ goto out;
+ }
+
+ ofpart_node = of_get_child_by_name(mtd_node, "partitions");
+ if (!ofpart_node) {
+ pr_info("%s: 'partitions' subnode not found on %pOF.\n",
+ master->name, mtd_node);
+ res = -ENOENT;
+ goto out;
+ }
+
+ nr_scparts = scpart_find_partmap(master, &scpart_map);
+ if (nr_scparts <= 0) {
+ pr_info("No any partitions was found in 'SC PART MAP'.\n");
+ res = -ENOENT;
+ goto free;
+ }
+
+ parts = kcalloc(of_get_child_count(ofpart_node), sizeof(*parts),
+ GFP_KERNEL);
+ if (!parts) {
+ res = -ENOMEM;
+ goto free;
+ }
+
+ for_each_child_of_node(ofpart_node, pp) {
+ u32 scpart_id;
+
+ if (of_property_read_u32(pp, "sercomm,scpart-id", &scpart_id))
+ continue;
+
+ for (n = 0 ; n < nr_scparts ; n++)
+ if ((scpart_map[n].part_id != ID_ALREADY_FOUND) &&
+ (scpart_id == scpart_map[n].part_id))
+ break;
+ if (n >= nr_scparts)
+ /* not match */
+ continue;
+
+ /* add the partition found in OF into MTD partition array */
+ parts[nr_parts].offset = scpart_map[n].part_offs;
+ parts[nr_parts].size = scpart_map[n].part_bytes;
+ parts[nr_parts].of_node = pp;
+
+ if (!of_property_read_string(pp, "label", &partname))
+ parts[nr_parts].name = partname;
+ if (of_property_read_bool(pp, "read-only"))
+ parts[nr_parts].mask_flags |= MTD_WRITEABLE;
+ if (of_property_read_bool(pp, "lock"))
+ parts[nr_parts].mask_flags |= MTD_POWERUP_LOCK;
+
+ /* mark as 'done' */
+ scpart_map[n].part_id = ID_ALREADY_FOUND;
+
+ nr_parts++;
+ }
+
+ if (nr_parts > 0) {
+ *pparts = parts;
+ res = nr_parts;
+ } else
+ pr_info("No partition in OF matches partition ID with 'SC PART MAP'.\n");
+
+ of_node_put(pp);
+
+free:
+ of_node_put(ofpart_node);
+ kfree(scpart_map);
+ if (res <= 0)
+ kfree(parts);
+
+out:
+ return res;
+}
+
+static const struct of_device_id scpart_parser_of_match_table[] = {
+ { .compatible = "sercomm,sc-partitions" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, scpart_parser_of_match_table);
+
+static struct mtd_part_parser scpart_parser = {
+ .parse_fn = scpart_parse,
+ .name = "scpart",
+ .of_match_table = scpart_parser_of_match_table,
+};
+module_mtd_part_parser(scpart_parser);
+
+/* mtd parsers will request the module by parser name */
+MODULE_ALIAS("scpart");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("NOGUCHI Hiroshi <drvlabo@gmail.com>");
+MODULE_AUTHOR("Mikhail Zhilkin <csharper2005@gmail.com>");
+MODULE_DESCRIPTION("Sercomm partition parser");
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 0cff2cda1b5a..7f955fade838 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1111,9 +1111,9 @@ static void sm_release(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
- mutex_lock(&ftl->mutex);
del_timer_sync(&ftl->timer);
cancel_work_sync(&ftl->flush_work);
+ mutex_lock(&ftl->mutex);
sm_cache_flush(ftl);
mutex_unlock(&ftl->mutex);
}
diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
index 94a969185ceb..5070d72835ec 100644
--- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
@@ -237,7 +237,7 @@ static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
reg = readl(host->regbase + FMC_CFG);
reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK);
reg |= FMC_CFG_OP_MODE_NORMAL;
- reg |= (nor->addr_width == 4) ? SPI_NOR_ADDR_MODE_4BYTES
+ reg |= (nor->addr_nbytes == 4) ? SPI_NOR_ADDR_MODE_4BYTES
: SPI_NOR_ADDR_MODE_3BYTES;
writel(reg, host->regbase + FMC_CFG);
diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
index 9032b9ab2eaf..ab3990e6ac25 100644
--- a/drivers/mtd/spi-nor/controllers/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
@@ -203,7 +203,7 @@ static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
SPIFI_CMD_DATALEN(len) |
SPIFI_CMD_FIELDFORM_ALL_SERIAL |
SPIFI_CMD_OPCODE(nor->program_opcode) |
- SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
writel(cmd, spifi->io_base + SPIFI_CMD);
for (i = 0; i < len; i++)
@@ -230,7 +230,7 @@ static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL |
SPIFI_CMD_OPCODE(nor->erase_opcode) |
- SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
writel(cmd, spifi->io_base + SPIFI_CMD);
return nxp_spifi_wait_for_cmd(spifi);
@@ -252,12 +252,12 @@ static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
}
/* Memory mode supports address length between 1 and 4 */
- if (spifi->nor.addr_width < 1 || spifi->nor.addr_width > 4)
+ if (spifi->nor.addr_nbytes < 1 || spifi->nor.addr_nbytes > 4)
return -EINVAL;
spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) |
SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) |
- SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
return 0;
}
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 502967c76c5f..f2c64006f8d7 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -38,7 +38,7 @@
*/
#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
-#define SPI_NOR_MAX_ADDR_WIDTH 4
+#define SPI_NOR_MAX_ADDR_NBYTES 4
#define SPI_NOR_SRST_SLEEP_MIN 200
#define SPI_NOR_SRST_SLEEP_MAX 400
@@ -177,7 +177,7 @@ int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
{
- if (spi_nor_protocol_is_dtr(nor->write_proto))
+ if (spi_nor_protocol_is_dtr(nor->reg_proto))
return -EOPNOTSUPP;
return nor->controller_ops->erase(nor, offs);
@@ -198,7 +198,7 @@ static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
{
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, from, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0),
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
SPI_MEM_OP_DATA_IN(len, buf, 0));
bool usebouncebuf;
@@ -262,7 +262,7 @@ static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
{
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, to, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(len, buf, 0));
ssize_t nbytes;
@@ -972,7 +972,7 @@ static int spi_nor_erase_chip(struct spi_nor *nor)
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP;
- spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
@@ -1113,9 +1113,9 @@ int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
if (nor->spimem) {
struct spi_mem_op op =
SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode,
- nor->addr_width, addr);
+ nor->addr_nbytes, addr);
- spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
return spi_mem_exec_op(nor->spimem, &op);
} else if (nor->controller_ops->erase) {
@@ -1126,13 +1126,13 @@ int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
* Default implementation, if driver doesn't have a specialized HW
* control
*/
- for (i = nor->addr_width - 1; i >= 0; i--) {
+ for (i = nor->addr_nbytes - 1; i >= 0; i--) {
nor->bouncebuf[i] = addr & 0xff;
addr >>= 8;
}
return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
- nor->bouncebuf, nor->addr_width);
+ nor->bouncebuf, nor->addr_nbytes);
}
/**
@@ -2249,43 +2249,43 @@ static int spi_nor_default_setup(struct spi_nor *nor,
return 0;
}
-static int spi_nor_set_addr_width(struct spi_nor *nor)
+static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
{
- if (nor->addr_width) {
- /* already configured from SFDP */
+ if (nor->params->addr_nbytes) {
+ nor->addr_nbytes = nor->params->addr_nbytes;
} else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
/*
* In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
- * in this protocol an odd address width cannot be used because
+ * in this protocol an odd addr_nbytes cannot be used because
* then the address phase would only span a cycle and a half.
* Half a cycle would be left over. We would then have to start
* the dummy phase in the middle of a cycle and so too the data
* phase, and we will end the transaction with half a cycle left
* over.
*
- * Force all 8D-8D-8D flashes to use an address width of 4 to
+ * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
* avoid this situation.
*/
- nor->addr_width = 4;
- } else if (nor->info->addr_width) {
- nor->addr_width = nor->info->addr_width;
+ nor->addr_nbytes = 4;
+ } else if (nor->info->addr_nbytes) {
+ nor->addr_nbytes = nor->info->addr_nbytes;
} else {
- nor->addr_width = 3;
+ nor->addr_nbytes = 3;
}
- if (nor->addr_width == 3 && nor->params->size > 0x1000000) {
+ if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
+ nor->addr_nbytes = 4;
}
- if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
- dev_dbg(nor->dev, "address width is too large: %u\n",
- nor->addr_width);
+ if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
+ dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
+ nor->addr_nbytes);
return -EINVAL;
}
/* Set 4byte opcodes when possible. */
- if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
!(nor->flags & SNOR_F_HAS_4BAIT))
spi_nor_set_4byte_opcodes(nor);
@@ -2304,7 +2304,7 @@ static int spi_nor_setup(struct spi_nor *nor,
if (ret)
return ret;
- return spi_nor_set_addr_width(nor);
+ return spi_nor_set_addr_nbytes(nor);
}
/**
@@ -2382,12 +2382,7 @@ static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
*/
erase_mask = 0;
i = 0;
- if (no_sfdp_flags & SECT_4K_PMC) {
- erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], 4096u,
- SPINOR_OP_BE_4K_PMC);
- i++;
- } else if (no_sfdp_flags & SECT_4K) {
+ if (no_sfdp_flags & SECT_4K) {
erase_mask |= BIT(i);
spi_nor_set_erase_type(&map->erase_type[i], 4096u,
SPINOR_OP_BE_4K);
@@ -2497,7 +2492,6 @@ static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
if (spi_nor_parse_sfdp(nor)) {
memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
- nor->addr_width = 0;
nor->flags &= ~SNOR_F_4B_OPCODES;
}
}
@@ -2718,7 +2712,7 @@ static int spi_nor_init(struct spi_nor *nor)
nor->flags & SNOR_F_SWP_IS_VOLATILE))
spi_nor_try_unlock_all(nor);
- if (nor->addr_width == 4 &&
+ if (nor->addr_nbytes == 4 &&
nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
!(nor->flags & SNOR_F_4B_OPCODES)) {
/*
@@ -2730,7 +2724,7 @@ static int spi_nor_init(struct spi_nor *nor)
*/
WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
"enabling reset hack; may not recover from unexpected reboots\n");
- nor->params->set_4byte_addr_mode(nor, true);
+ return nor->params->set_4byte_addr_mode(nor, true);
}
return 0;
@@ -2845,7 +2839,7 @@ static void spi_nor_put_device(struct mtd_info *mtd)
void spi_nor_restore(struct spi_nor *nor)
{
/* restore the addressing mode */
- if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
+ if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
nor->flags & SNOR_F_BROKEN_RESET)
nor->params->set_4byte_addr_mode(nor, false);
@@ -2989,7 +2983,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
* - select op codes for (Fast) Read, Page Program and Sector Erase.
* - set the number of dummy cycles (mode cycles + wait states).
* - set the SPI protocols for register and memory accesses.
- * - set the address width.
+ * - set the number of address bytes.
*/
ret = spi_nor_setup(nor, hwcaps);
if (ret)
@@ -3030,7 +3024,7 @@ static int spi_nor_create_read_dirmap(struct spi_nor *nor)
{
struct spi_mem_dirmap_info info = {
.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
SPI_MEM_OP_DATA_IN(0, NULL, 0)),
.offset = 0,
@@ -3061,7 +3055,7 @@ static int spi_nor_create_write_dirmap(struct spi_nor *nor)
{
struct spi_mem_dirmap_info info = {
.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
.offset = 0,
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 3f841ec36e56..85b0cf254e97 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -84,9 +84,9 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPI_NOR_SECTOR_ERASE_OP(opcode, addr_width, addr) \
+#define SPI_NOR_SECTOR_ERASE_OP(opcode, addr_nbytes, addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
- SPI_MEM_OP_ADDR(addr_width, addr, 0), \
+ SPI_MEM_OP_ADDR(addr_nbytes, addr, 0), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
@@ -340,6 +340,11 @@ struct spi_nor_otp {
* @writesize Minimal writable flash unit size. Defaults to 1. Set to
* ECC unit size for ECC-ed flashes.
* @page_size: the page size of the SPI NOR flash memory.
+ * @addr_nbytes: number of address bytes to send.
+ * @addr_mode_nbytes: number of address bytes of current address mode. Useful
+ * when the flash operates with 4B opcodes but needs the
+ * internal address mode for opcodes that don't have a 4B
+ * opcode correspondent.
* @rdsr_dummy: dummy cycles needed for Read Status Register command
* in octal DTR mode.
* @rdsr_addr_nbytes: dummy address bytes needed for Read Status Register
@@ -372,6 +377,8 @@ struct spi_nor_flash_parameter {
u64 size;
u32 writesize;
u32 page_size;
+ u8 addr_nbytes;
+ u8 addr_mode_nbytes;
u8 rdsr_dummy;
u8 rdsr_addr_nbytes;
@@ -429,7 +436,7 @@ struct spi_nor_fixups {
* isn't necessarily called a "sector" by the vendor.
* @n_sectors: the number of sectors.
* @page_size: the flash's page size.
- * @addr_width: the flash's address width.
+ * @addr_nbytes: number of address bytes to send.
*
* @parse_sfdp: true when flash supports SFDP tables. The false value has no
* meaning. If one wants to skip the SFDP tables, one should
@@ -457,7 +464,6 @@ struct spi_nor_fixups {
* flags are used together with the SPI_NOR_SKIP_SFDP flag.
* SPI_NOR_SKIP_SFDP: skip parsing of SFDP tables.
* SECT_4K: SPINOR_OP_BE_4K works uniformly.
- * SECT_4K_PMC: SPINOR_OP_BE_4K_PMC works uniformly.
* SPI_NOR_DUAL_READ: flash supports Dual Read.
* SPI_NOR_QUAD_READ: flash supports Quad Read.
* SPI_NOR_OCTAL_READ: flash supports Octal Read.
@@ -488,7 +494,7 @@ struct flash_info {
unsigned sector_size;
u16 n_sectors;
u16 page_size;
- u16 addr_width;
+ u8 addr_nbytes;
bool parse_sfdp;
u16 flags;
@@ -505,7 +511,6 @@ struct flash_info {
u8 no_sfdp_flags;
#define SPI_NOR_SKIP_SFDP BIT(0)
#define SECT_4K BIT(1)
-#define SECT_4K_PMC BIT(2)
#define SPI_NOR_DUAL_READ BIT(3)
#define SPI_NOR_QUAD_READ BIT(4)
#define SPI_NOR_OCTAL_READ BIT(5)
@@ -550,11 +555,11 @@ struct flash_info {
.n_sectors = (_n_sectors), \
.page_size = 256, \
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_nbytes) \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
- .addr_width = (_addr_width), \
+ .addr_nbytes = (_addr_nbytes), \
.flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR, \
#define OTP_INFO(_len, _n_regions, _base, _offset) \
diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
index eaf84f7a0676..df76cb5de3f9 100644
--- a/drivers/mtd/spi-nor/debugfs.c
+++ b/drivers/mtd/spi-nor/debugfs.c
@@ -86,7 +86,7 @@ static int spi_nor_params_show(struct seq_file *s, void *data)
seq_printf(s, "size\t\t%s\n", buf);
seq_printf(s, "write size\t%u\n", params->writesize);
seq_printf(s, "page size\t%u\n", params->page_size);
- seq_printf(s, "address width\t%u\n", nor->addr_width);
+ seq_printf(s, "address nbytes\t%u\n", nor->addr_nbytes);
seq_puts(s, "flags\t\t");
spi_nor_print_flags(s, nor->flags, snor_f_names, sizeof(snor_f_names));
diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c
index 79e2408f4998..fcc3b0e7cda9 100644
--- a/drivers/mtd/spi-nor/esmt.c
+++ b/drivers/mtd/spi-nor/esmt.c
@@ -13,7 +13,7 @@ static const struct flash_info esmt_nor_parts[] = {
{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K) },
- { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64)
+ { "f25l32qa-2s", INFO(0x8c4116, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K) },
{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128)
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
index c012bc2486e1..89a66a19d754 100644
--- a/drivers/mtd/spi-nor/issi.c
+++ b/drivers/mtd/spi-nor/issi.c
@@ -14,13 +14,13 @@ is25lp256_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_bfpt *bfpt)
{
/*
- * IS25LP256 supports 4B opcodes, but the BFPT advertises a
- * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
- * Overwrite the address width advertised by the BFPT.
+ * IS25LP256 supports 4B opcodes, but the BFPT advertises
+ * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY.
+ * Overwrite the number of address bytes advertised by the BFPT.
*/
if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
- nor->addr_width = 4;
+ nor->params->addr_nbytes = 4;
return 0;
}
@@ -29,6 +29,21 @@ static const struct spi_nor_fixups is25lp256_fixups = {
.post_bfpt = is25lp256_post_bfpt_fixups,
};
+static void pm25lv_nor_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_erase_map *map = &nor->params->erase_map;
+ int i;
+
+ /* The PM25LV series has a different 4k sector erase opcode */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (map->erase_type[i].size == 4096)
+ map->erase_type[i].opcode = SPINOR_OP_BE_4K_PMC;
+}
+
+static const struct spi_nor_fixups pm25lv_nor_fixups = {
+ .late_init = pm25lv_nor_late_init,
+};
+
static const struct flash_info issi_nor_parts[] = {
/* ISSI */
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2)
@@ -62,9 +77,13 @@ static const struct flash_info issi_nor_parts[] = {
/* PMC */
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2)
- NO_SFDP_FLAGS(SECT_4K_PMC) },
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &pm25lv_nor_fixups
+ },
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K_PMC) },
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &pm25lv_nor_fixups
+ },
{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
};
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index a96f74e0f568..3c9681a3f7a3 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -399,8 +399,16 @@ static int micron_st_nor_ready(struct spi_nor *nor)
return sr_ready;
ret = micron_st_nor_read_fsr(nor, nor->bouncebuf);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * Some controllers, such as Intel SPI, do not support low
+ * level operations such as reading the flag status
+ * register. They only expose small amount of high level
+ * operations to the software. If this is the case we use
+ * only the status register value.
+ */
+ return ret == -EOPNOTSUPP ? sr_ready : ret;
+ }
if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
if (nor->bouncebuf[0] & FSR_E_ERR)
diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c
index fa63d8571218..00ab0d2d6d2f 100644
--- a/drivers/mtd/spi-nor/otp.c
+++ b/drivers/mtd/spi-nor/otp.c
@@ -35,13 +35,13 @@
*/
int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf)
{
- u8 addr_width, read_opcode, read_dummy;
+ u8 addr_nbytes, read_opcode, read_dummy;
struct spi_mem_dirmap_desc *rdesc;
enum spi_nor_protocol read_proto;
int ret;
read_opcode = nor->read_opcode;
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
read_proto = nor->read_proto;
rdesc = nor->dirmap.rdesc;
@@ -54,7 +54,7 @@ int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf)
ret = spi_nor_read_data(nor, addr, len, buf);
nor->read_opcode = read_opcode;
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
nor->read_proto = read_proto;
nor->dirmap.rdesc = rdesc;
@@ -85,11 +85,11 @@ int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len,
{
enum spi_nor_protocol write_proto;
struct spi_mem_dirmap_desc *wdesc;
- u8 addr_width, program_opcode;
+ u8 addr_nbytes, program_opcode;
int ret, written;
program_opcode = nor->program_opcode;
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
write_proto = nor->write_proto;
wdesc = nor->dirmap.wdesc;
@@ -113,7 +113,7 @@ int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len,
out:
nor->program_opcode = program_opcode;
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->write_proto = write_proto;
nor->dirmap.wdesc = wdesc;
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index a5211543d30d..2257f1b4c2e2 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -134,7 +134,7 @@ struct sfdp_4bait {
/**
* spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
- * addr_width and read_dummy members of the struct spi_nor
+ * addr_nbytes and read_dummy members of the struct spi_nor
* should be previously
* set.
* @nor: pointer to a 'struct spi_nor'
@@ -178,21 +178,21 @@ static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
size_t len, void *buf)
{
- u8 addr_width, read_opcode, read_dummy;
+ u8 addr_nbytes, read_opcode, read_dummy;
int ret;
read_opcode = nor->read_opcode;
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
nor->read_opcode = SPINOR_OP_RDSFDP;
- nor->addr_width = 3;
+ nor->addr_nbytes = 3;
nor->read_dummy = 8;
ret = spi_nor_read_raw(nor, addr, len, buf);
nor->read_opcode = read_opcode;
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
return ret;
@@ -462,11 +462,13 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4:
- nor->addr_width = 3;
+ params->addr_nbytes = 3;
+ params->addr_mode_nbytes = 3;
break;
case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
- nor->addr_width = 4;
+ params->addr_nbytes = 4;
+ params->addr_mode_nbytes = 4;
break;
default:
@@ -637,12 +639,12 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
}
/**
- * spi_nor_smpt_addr_width() - return the address width used in the
+ * spi_nor_smpt_addr_nbytes() - return the number of address bytes used in the
* configuration detection command.
* @nor: pointer to a 'struct spi_nor'
* @settings: configuration detection command descriptor, dword1
*/
-static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
+static u8 spi_nor_smpt_addr_nbytes(const struct spi_nor *nor, const u32 settings)
{
switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
case SMPT_CMD_ADDRESS_LEN_0:
@@ -653,7 +655,7 @@ static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
return 4;
case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
default:
- return nor->addr_width;
+ return nor->params->addr_mode_nbytes;
}
}
@@ -690,7 +692,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
u32 addr;
int err;
u8 i;
- u8 addr_width, read_opcode, read_dummy;
+ u8 addr_nbytes, read_opcode, read_dummy;
u8 read_data_mask, map_id;
/* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
@@ -698,7 +700,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
if (!buf)
return ERR_PTR(-ENOMEM);
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
read_opcode = nor->read_opcode;
@@ -709,7 +711,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
break;
read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
- nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
+ nor->addr_nbytes = spi_nor_smpt_addr_nbytes(nor, smpt[i]);
nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
addr = smpt[i + 1];
@@ -756,7 +758,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
/* fall through */
out:
kfree(buf);
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
nor->read_opcode = read_opcode;
return ret;
@@ -1044,7 +1046,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
/*
* We need at least one 4-byte op code per read, program and erase
* operation; the .read(), .write() and .erase() hooks share the
- * nor->addr_width value.
+ * nor->addr_nbytes value.
*/
if (!read_hwcaps || !pp_hwcaps || !erase_mask)
goto out;
@@ -1098,7 +1100,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
* Spansion memory. However this quirk is no longer needed with new
* SFDP compliant memories.
*/
- nor->addr_width = 4;
+ params->addr_nbytes = 4;
nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
/* fall through */
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 43cd6cd92537..0150049007be 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -14,6 +14,8 @@
#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
+#define SPINOR_REG_CYPRESS_CFR1V 0x00800002
+#define SPINOR_REG_CYPRESS_CFR1V_QUAD_EN BIT(1) /* Quad Enable */
#define SPINOR_REG_CYPRESS_CFR2V 0x00800003
#define SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24 0xb
#define SPINOR_REG_CYPRESS_CFR3V 0x00800004
@@ -114,6 +116,150 @@ static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
}
/**
+ * cypress_nor_quad_enable_volatile() - enable Quad I/O mode in volatile
+ * register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * It is recommended to update volatile registers in the field application due
+ * to a risk of the non-volatile registers corruption by power interrupt. This
+ * function sets Quad Enable bit in CFR1 volatile. If users set the Quad Enable
+ * bit in the CFR1 non-volatile in advance (typically by a Flash programmer
+ * before mounting Flash on PCB), the Quad Enable bit in the CFR1 volatile is
+ * also set during Flash power-up.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_quad_enable_volatile(struct spi_nor *nor)
+{
+ struct spi_mem_op op;
+ u8 addr_mode_nbytes = nor->params->addr_mode_nbytes;
+ u8 cfr1v_written;
+ int ret;
+
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V,
+ nor->bouncebuf);
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR1V_QUAD_EN)
+ return 0;
+
+ /* Update the Quad Enable bit. */
+ nor->bouncebuf[0] |= SPINOR_REG_CYPRESS_CFR1V_QUAD_EN;
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V, 1,
+ nor->bouncebuf);
+ ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ cfr1v_written = nor->bouncebuf[0];
+
+ /* Read back and check it. */
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V,
+ nor->bouncebuf);
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] != cfr1v_written) {
+ dev_err(nor->dev, "CFR1: Read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * cypress_nor_set_page_size() - Set page size which corresponds to the flash
+ * configuration.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * The BFPT table advertises a 512B or 256B page size depending on part but the
+ * page size is actually configurable (with the default being 256B). Read from
+ * CFR3V[4] and set the correct size.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_set_page_size(struct spi_nor *nor)
+{
+ struct spi_mem_op op =
+ CYPRESS_NOR_RD_ANY_REG_OP(3, SPINOR_REG_CYPRESS_CFR3V,
+ nor->bouncebuf);
+ int ret;
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3V_PGSZ)
+ nor->params->page_size = 512;
+ else
+ nor->params->page_size = 256;
+
+ return 0;
+}
+
+static int
+s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ /* Replace Quad Enable with volatile version */
+ nor->params->quad_enable = cypress_nor_quad_enable_volatile;
+
+ return cypress_nor_set_page_size(nor);
+}
+
+static void s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
+{
+ struct spi_nor_erase_type *erase_type =
+ nor->params->erase_map.erase_type;
+ unsigned int i;
+
+ /*
+ * In some parts, 3byte erase opcodes are advertised by 4BAIT.
+ * Convert them to 4byte erase opcodes.
+ */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ switch (erase_type[i].opcode) {
+ case SPINOR_OP_SE:
+ erase_type[i].opcode = SPINOR_OP_SE_4B;
+ break;
+ case SPINOR_OP_BE_4K:
+ erase_type[i].opcode = SPINOR_OP_BE_4K_4B;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void s25hx_t_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ /* Fast Read 4B requires mode cycles */
+ params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
+
+ /* The writesize should be ECC data unit size */
+ params->writesize = 16;
+}
+
+static struct spi_nor_fixups s25hx_t_fixups = {
+ .post_bfpt = s25hx_t_post_bfpt_fixup,
+ .post_sfdp = s25hx_t_post_sfdp_fixup,
+ .late_init = s25hx_t_late_init,
+};
+
+/**
* cypress_nor_octal_dtr_enable() - Enable octal DTR on Cypress flashes.
* @nor: pointer to a 'struct spi_nor'
* @enable: whether to enable or disable Octal DTR
@@ -167,28 +313,7 @@ static int s28hs512t_post_bfpt_fixup(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
- /*
- * The BFPT table advertises a 512B page size but the page size is
- * actually configurable (with the default being 256B). Read from
- * CFR3V[4] and set the correct size.
- */
- struct spi_mem_op op =
- CYPRESS_NOR_RD_ANY_REG_OP(3, SPINOR_REG_CYPRESS_CFR3V,
- nor->bouncebuf);
- int ret;
-
- spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3V_PGSZ)
- nor->params->page_size = 512;
- else
- nor->params->page_size = 256;
-
- return 0;
+ return cypress_nor_set_page_size(nor);
}
static const struct spi_nor_fixups s28hs512t_fixups = {
@@ -310,6 +435,22 @@ static const struct flash_info spansion_nor_parts[] = {
{ "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "s25hl512t", INFO6(0x342a1a, 0x0f0390, 256 * 1024, 256)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 256 * 1024, 512)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hs512t", INFO6(0x342b1a, 0x0f0390, 256 * 1024, 256)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 256 * 1024, 512)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
{ "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
FLAGS(SPI_NOR_NO_ERASE) },
{ "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256)
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
index 1d2f5db047bd..5723157739fc 100644
--- a/drivers/mtd/spi-nor/xilinx.c
+++ b/drivers/mtd/spi-nor/xilinx.c
@@ -31,7 +31,7 @@
.sector_size = (8 * (_page_size)), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
- .addr_width = 3, \
+ .addr_nbytes = 3, \
.flags = SPI_NOR_NO_FR
/* Xilinx S3AN share MFR with Atmel SPI NOR */
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index d7fb33c078e8..e58a1e0cadd2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -84,11 +84,13 @@ enum ad_link_speed_type {
static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
0, 0, 0, 0, 0, 0
};
-static u16 ad_ticks_per_sec;
+
+static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
- MULTICAST_LACPDU_ADDR;
+const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
+ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
+};
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
@@ -2001,36 +2003,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
/**
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
* @bond: bonding struct to work on
- * @tick_resolution: tick duration (millisecond resolution)
*
* Can be called only after the mac address of the bond is set.
*/
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
+void bond_3ad_initialize(struct bonding *bond)
{
- /* check that the bond is not initialized yet */
- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
- bond->dev->dev_addr)) {
-
- BOND_AD_INFO(bond).aggregator_identifier = 0;
-
- BOND_AD_INFO(bond).system.sys_priority =
- bond->params.ad_actor_sys_prio;
- if (is_zero_ether_addr(bond->params.ad_actor_system))
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->dev->dev_addr);
- else
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->params.ad_actor_system);
-
- /* initialize how many times this module is called in one
- * second (should be about every 100ms)
- */
- ad_ticks_per_sec = tick_resolution;
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
+ BOND_AD_INFO(bond).system.sys_priority =
+ bond->params.ad_actor_sys_prio;
+ if (is_zero_ether_addr(bond->params.ad_actor_system))
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->dev->dev_addr);
+ else
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->params.ad_actor_system);
- bond_3ad_initiate_agg_selection(bond,
- AD_AGGREGATOR_SELECTION_TIMER *
- ad_ticks_per_sec);
- }
+ bond_3ad_initiate_agg_selection(bond,
+ AD_AGGREGATOR_SELECTION_TIMER *
+ ad_ticks_per_sec);
}
/**
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 007d43e46dcb..b9dbad3a8af8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -653,6 +653,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb,
static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
{
struct slave *tx_slave = NULL;
+ struct net_device *dev;
struct arp_pkt *arp;
if (!pskb_network_may_pull(skb, sizeof(*arp)))
@@ -665,6 +666,15 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
if (!bond_slave_has_mac_rx(bond, arp->mac_src))
return NULL;
+ dev = ip_dev_find(dev_net(bond->dev), arp->ip_src);
+ if (dev) {
+ if (netif_is_bridge_master(dev)) {
+ dev_put(dev);
+ return NULL;
+ }
+ dev_put(dev);
+ }
+
if (arp->op_code == htons(ARPOP_REPLY)) {
/* the arp must be sent on the selected rx channel */
tx_slave = rlb_choose_channel(skb, bond, arp);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e75acb14d066..86d42306aa5e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -865,12 +865,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
dev_uc_unsync(slave_dev, bond_dev);
dev_mc_unsync(slave_dev, bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* del lacpdu mc addr from mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_del(slave_dev, lacpdu_multicast);
- }
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_del(slave_dev, lacpdu_mcast_addr);
}
/*--------------------------- Active slave change ---------------------------*/
@@ -890,7 +886,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
- bond_hw_addr_flush(bond->dev, old_active->dev);
+ if (bond->dev->flags & IFF_UP)
+ bond_hw_addr_flush(bond->dev, old_active->dev);
}
if (new_active) {
@@ -901,10 +898,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
- netif_addr_lock_bh(bond->dev);
- dev_uc_sync(new_active->dev, bond->dev);
- dev_mc_sync(new_active->dev, bond->dev);
- netif_addr_unlock_bh(bond->dev);
+ if (bond->dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond->dev);
+ dev_uc_sync(new_active->dev, bond->dev);
+ dev_mc_sync(new_active->dev, bond->dev);
+ netif_addr_unlock_bh(bond->dev);
+ }
}
}
@@ -2001,6 +2000,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
new_slave->target_last_arp_rx[i] = new_slave->last_rx;
+ new_slave->last_tx = new_slave->last_rx;
+
if (bond->params.miimon && !bond->params.use_carrier) {
link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -2079,7 +2080,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
- bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
+ bond_3ad_initialize(bond);
} else {
SLAVE_AD_INFO(new_slave)->id =
SLAVE_AD_INFO(prev_slave)->id + 1;
@@ -2164,16 +2165,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
}
}
- netif_addr_lock_bh(bond_dev);
- dev_mc_sync_multiple(slave_dev, bond_dev);
- dev_uc_sync_multiple(slave_dev, bond_dev);
- netif_addr_unlock_bh(bond_dev);
-
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* add lacpdu mc addr to mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+ if (bond_dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond_dev);
+ dev_mc_sync_multiple(slave_dev, bond_dev);
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
- dev_mc_add(slave_dev, lacpdu_multicast);
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_add(slave_dev, lacpdu_mcast_addr);
}
}
@@ -2445,7 +2444,8 @@ static int __bond_release_one(struct net_device *bond_dev,
if (old_flags & IFF_ALLMULTI)
dev_set_allmulti(slave_dev, -1);
- bond_hw_addr_flush(bond_dev, slave_dev);
+ if (old_flags & IFF_UP)
+ bond_hw_addr_flush(bond_dev, slave_dev);
}
slave_disable_netpoll(slave);
@@ -2884,8 +2884,11 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
return;
}
- if (bond_handle_vlan(slave, tags, skb))
+ if (bond_handle_vlan(slave, tags, skb)) {
+ slave_update_last_tx(slave);
arp_xmit(skb);
+ }
+
return;
}
@@ -3074,8 +3077,7 @@ static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
- bond_time_in_interval(bond,
- dev_trans_start(curr_arp_slave->dev), 1))
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_arp(bond, slave, sip, tip);
out_unlock:
@@ -3103,8 +3105,10 @@ static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
}
addrconf_addr_solict_mult(daddr, &mcaddr);
- if (bond_handle_vlan(slave, tags, skb))
+ if (bond_handle_vlan(slave, tags, skb)) {
+ slave_update_last_tx(slave);
ndisc_send_skb(skb, &mcaddr, saddr);
+ }
}
static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
@@ -3161,6 +3165,9 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
found:
if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
bond_ns_send(slave, &targets[i], &saddr, tags);
+ else
+ bond_ns_send(slave, &targets[i], &in6addr_any, tags);
+
dst_release(dst);
kfree(tags);
}
@@ -3192,12 +3199,19 @@ static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
return ret;
}
-static void bond_validate_ns(struct bonding *bond, struct slave *slave,
+static void bond_validate_na(struct bonding *bond, struct slave *slave,
struct in6_addr *saddr, struct in6_addr *daddr)
{
int i;
- if (ipv6_addr_any(saddr) || !bond_has_this_ip6(bond, daddr)) {
+ /* Ignore NAs that:
+ * 1. Source address is unspecified address.
+ * 2. Dest address is neither all-nodes multicast address nor
+ * exist on bond interface.
+ */
+ if (ipv6_addr_any(saddr) ||
+ (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
+ !bond_has_this_ip6(bond, daddr))) {
slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
__func__, saddr, daddr);
return;
@@ -3240,15 +3254,14 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
* see bond_arp_rcv().
*/
if (bond_is_active_slave(slave))
- bond_validate_ns(bond, slave, saddr, daddr);
+ bond_validate_na(bond, slave, saddr, daddr);
else if (curr_active_slave &&
time_after(slave_last_rx(bond, curr_active_slave),
curr_active_slave->last_link_up))
- bond_validate_ns(bond, slave, saddr, daddr);
+ bond_validate_na(bond, slave, saddr, daddr);
else if (curr_arp_slave &&
- bond_time_in_interval(bond,
- dev_trans_start(curr_arp_slave->dev), 1))
- bond_validate_ns(bond, slave, saddr, daddr);
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
+ bond_validate_na(bond, slave, saddr, daddr);
out:
return RX_HANDLER_ANOTHER;
@@ -3335,12 +3348,12 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* so it can wait
*/
bond_for_each_slave_rcu(bond, slave, iter) {
- unsigned long trans_start = dev_trans_start(slave->dev);
+ unsigned long last_tx = slave_last_tx(slave);
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
if (slave->link != BOND_LINK_UP) {
- if (bond_time_in_interval(bond, trans_start, 1) &&
+ if (bond_time_in_interval(bond, last_tx, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
bond_propose_link_state(slave, BOND_LINK_UP);
@@ -3365,7 +3378,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
+ if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
!bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
@@ -3431,7 +3444,7 @@ re_arm:
*/
static int bond_ab_arp_inspect(struct bonding *bond)
{
- unsigned long trans_start, last_rx;
+ unsigned long last_tx, last_rx;
struct list_head *iter;
struct slave *slave;
int commit = 0;
@@ -3482,9 +3495,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
* - (more than missed_max*delta since receive AND
* the bond has an IP address)
*/
- trans_start = dev_trans_start(slave->dev);
+ last_tx = slave_last_tx(slave);
if (bond_is_active_slave(slave) &&
- (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
+ (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
!bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
@@ -3501,8 +3514,8 @@ static int bond_ab_arp_inspect(struct bonding *bond)
*/
static void bond_ab_arp_commit(struct bonding *bond)
{
- unsigned long trans_start;
struct list_head *iter;
+ unsigned long last_tx;
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
@@ -3511,10 +3524,10 @@ static void bond_ab_arp_commit(struct bonding *bond)
continue;
case BOND_LINK_UP:
- trans_start = dev_trans_start(slave->dev);
+ last_tx = slave_last_tx(slave);
if (rtnl_dereference(bond->curr_active_slave) != slave ||
(!rtnl_dereference(bond->curr_active_slave) &&
- bond_time_in_interval(bond, trans_start, 1))) {
+ bond_time_in_interval(bond, last_tx, 1))) {
struct slave *current_arp_slave;
current_arp_slave = rtnl_dereference(bond->current_arp_slave);
@@ -4169,6 +4182,12 @@ static int bond_open(struct net_device *bond_dev)
struct list_head *iter;
struct slave *slave;
+ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
+ bond->rr_tx_counter = alloc_percpu(u32);
+ if (!bond->rr_tx_counter)
+ return -ENOMEM;
+ }
+
/* reset slave->backup and slave->inactive */
if (bond_has_slaves(bond)) {
bond_for_each_slave(bond, slave, iter) {
@@ -4206,6 +4225,9 @@ static int bond_open(struct net_device *bond_dev)
/* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv;
bond_3ad_initiate_agg_selection(bond, 1);
+
+ bond_for_each_slave(bond, slave, iter)
+ dev_mc_add(slave->dev, lacpdu_mcast_addr);
}
if (bond_mode_can_use_xmit_hash(bond))
@@ -4217,6 +4239,7 @@ static int bond_open(struct net_device *bond_dev)
static int bond_close(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
bond_work_cancel_all(bond);
bond->send_peer_notif = 0;
@@ -4224,6 +4247,19 @@ static int bond_close(struct net_device *bond_dev)
bond_alb_deinitialize(bond);
bond->recv_probe = NULL;
+ if (bond_uses_primary(bond)) {
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ rcu_read_unlock();
+ } else {
+ struct list_head *iter;
+
+ bond_for_each_slave(bond, slave, iter)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ }
+
return 0;
}
@@ -5333,8 +5369,14 @@ static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *dev)
{
- if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
- return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
+ struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
+
+ /* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded
+ * was true, if tls_device_down is running in parallel, but it's OK,
+ * because bond_get_slave_by_dev has a NULL check.
+ */
+ if (likely(bond_get_slave_by_dev(bond, tls_netdev)))
+ return bond_dev_queue_xmit(bond, skb, tls_netdev);
return bond_tx_drop(dev, skb);
}
#endif
@@ -6207,15 +6249,6 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
- if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
- bond->rr_tx_counter = alloc_percpu(u32);
- if (!bond->rr_tx_counter) {
- destroy_workqueue(bond->wq);
- bond->wq = NULL;
- return -ENOMEM;
- }
- }
-
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index f857968efed7..ccb438eca517 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -941,11 +941,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
- if (unlikely(drop)) {
- skb = ERR_PTR(-ENOBUFS);
- goto mark_as_read;
- }
-
mb = flexcan_get_mb(priv, n);
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
@@ -974,6 +969,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
reg_ctrl = priv->read(&mb->can_ctrl);
}
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
skb = alloc_canfd_skb(offload->dev, &cfd);
else
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 65ba6697bd7d..c469b2f3e57d 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -63,7 +63,7 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
else
*mscan_clksrc = MSCAN_CLKSRC_XTAL;
- freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
+ freq = mpc5xxx_get_bus_frequency(&ofdev->dev);
if (!freq)
return 0;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index e750d13c8841..c320de474f40 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1070,9 +1070,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
- /* mask out flags we don't care about */
- intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
-
/* receive buffer 0 */
if (intf & CANINTF_RX0IF) {
mcp251x_hw_rx(spi, 0);
@@ -1082,6 +1079,18 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (mcp251x_is_2510(spi))
mcp251x_write_bits(spi, CANINTF,
CANINTF_RX0IF, 0x00);
+
+ /* check if buffer 1 is already known to be full, no need to re-read */
+ if (!(intf & CANINTF_RX1IF)) {
+ u8 intf1, eflag1;
+
+ /* intf needs to be read again to avoid a race condition */
+ mcp251x_read_2regs(spi, CANINTF, &intf1, &eflag1);
+
+ /* combine flags from both operations for error handling */
+ intf |= intf1;
+ eflag |= eflag1;
+ }
}
/* receive buffer 1 */
@@ -1092,6 +1101,9 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
clear_intf |= CANINTF_RX1IF;
}
+ /* mask out flags we don't care about */
+ intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
+
/* any error or tx interrupt we need to clear? */
if (intf & (CANINTF_ERR | CANINTF_TX))
clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index d1e1a459c045..d31191686a54 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -195,7 +195,7 @@ struct __packed ems_cpc_msg {
__le32 ts_sec; /* timestamp in seconds */
__le32 ts_nsec; /* timestamp in nano seconds */
- union {
+ union __packed {
u8 generic[64];
struct cpc_can_msg can_msg;
struct cpc_can_params can_params;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index baf749c8cda3..c1ff3c046d62 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -824,6 +824,7 @@ static int gs_can_open(struct net_device *netdev)
flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
/* finally start device */
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
dm->mode = cpu_to_le32(GS_CAN_MODE_START);
dm->flags = cpu_to_le32(flags);
rc = usb_control_msg(interface_to_usbdev(dev->iface),
@@ -835,13 +836,12 @@ static int gs_can_open(struct net_device *netdev)
if (rc < 0) {
netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
kfree(dm);
+ dev->can.state = CAN_STATE_STOPPED;
return rc;
}
kfree(dm);
- dev->can.state = CAN_STATE_ERROR_ACTIVE;
-
parent->active_channels++;
if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(netdev);
@@ -925,17 +925,21 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
}
/* blink LED's for finding the this interface */
-static int gs_usb_set_phys_id(struct net_device *dev,
+static int gs_usb_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
+ const struct gs_can *dev = netdev_priv(netdev);
int rc = 0;
+ if (!(dev->feature & GS_CAN_FEATURE_IDENTIFY))
+ return -EOPNOTSUPP;
+
switch (state) {
case ETHTOOL_ID_ACTIVE:
- rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON);
+ rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_ON);
break;
case ETHTOOL_ID_INACTIVE:
- rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF);
+ rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_OFF);
break;
default:
break;
@@ -1072,9 +1076,10 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX |
GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO;
- if (le32_to_cpu(dconf->sw_version) > 1)
- if (feature & GS_CAN_FEATURE_IDENTIFY)
- netdev->ethtool_ops = &gs_usb_ethtool_ops;
+ /* GS_CAN_FEATURE_IDENTIFY is only supported for sw_version > 1 */
+ if (!(le32_to_cpu(dconf->sw_version) > 1 &&
+ feature & GS_CAN_FEATURE_IDENTIFY))
+ dev->feature &= ~GS_CAN_FEATURE_IDENTIFY;
kfree(bt_const);
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 4b14d80d27ed..e4f446db0ca1 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -613,6 +613,9 @@ int ksz9477_fdb_dump(struct ksz_device *dev, int port,
goto exit;
}
+ if (!(ksz_data & ALU_VALID))
+ continue;
+
/* read ALU table */
ksz9477_read_table(dev, alu_table);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index ed7d137cba99..872aba63e7d4 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -170,6 +170,13 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.exit = ksz8_switch_exit,
};
+static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause);
+
static const struct ksz_dev_ops ksz9477_dev_ops = {
.setup = ksz9477_setup,
.get_port_addr = ksz9477_get_port_addr,
@@ -196,6 +203,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.mdb_del = ksz9477_mdb_del,
.change_mtu = ksz9477_change_mtu,
.max_mtu = ksz9477_max_mtu,
+ .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = ksz9477_config_cpu_port,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = ksz9477_reset_switch,
@@ -230,6 +238,7 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.mdb_del = ksz9477_mdb_del,
.change_mtu = lan937x_change_mtu,
.max_mtu = ksz9477_max_mtu,
+ .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = lan937x_config_cpu_port,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = lan937x_reset_switch,
@@ -803,9 +812,15 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
if (dev->info->supports_rgmii[port])
phy_interface_set_rgmii(config->supported_interfaces);
- if (dev->info->internal_phy[port])
+ if (dev->info->internal_phy[port]) {
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
+ /* Compatibility for phylib's default interface type when the
+ * phy-mode property is absent
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
if (dev->dev_ops->get_caps)
dev->dev_ops->get_caps(dev, port, config);
@@ -962,6 +977,7 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
const u16 *regs;
int ret;
@@ -1001,6 +1017,14 @@ static int ksz_setup(struct dsa_switch *ds)
return ret;
}
+ /* Start with learning disabled on standalone user ports, and enabled
+ * on the CPU port. In lack of other finer mechanisms, learning on the
+ * CPU port will avoid flooding bridge local addresses on the network
+ * in some cases.
+ */
+ p = &dev->ports[dev->cpu_port];
+ p->learning = true;
+
/* start switch */
regmap_update_bits(dev->regmap[0], regs[S_START_CTRL],
SW_START, SW_START);
@@ -1277,6 +1301,8 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ p = &dev->ports[port];
+
switch (state) {
case BR_STATE_DISABLED:
data |= PORT_LEARN_DISABLE;
@@ -1286,9 +1312,13 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
break;
case BR_STATE_LEARNING:
data |= PORT_RX_ENABLE;
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
break;
case BR_STATE_FORWARDING:
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
break;
case BR_STATE_BLOCKING:
data |= PORT_LEARN_DISABLE;
@@ -1300,12 +1330,38 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
- p = &dev->ports[port];
p->stp_state = state;
ksz_update_port_member(dev, port);
}
+static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~BR_LEARNING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
+
+ if (flags.mask & BR_LEARNING) {
+ p->learning = !!(flags.val & BR_LEARNING);
+
+ /* Make the change take effect immediately */
+ ksz_port_stp_state_set(ds, port, p->stp_state);
+ }
+
+ return 0;
+}
+
static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
@@ -1609,13 +1665,13 @@ static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex,
ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val);
}
-static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause, bool rx_pause)
+static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
{
- struct ksz_device *dev = ds->priv;
struct ksz_port *p;
p = &dev->ports[port];
@@ -1629,6 +1685,15 @@ static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
ksz_port_set_xmii_speed(dev, port, speed);
ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause);
+}
+
+static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct ksz_device *dev = ds->priv;
if (dev->dev_ops->phylink_mac_link_up)
dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface,
@@ -1719,6 +1784,8 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.port_bridge_join = ksz_port_bridge_join,
.port_bridge_leave = ksz_port_bridge_leave,
.port_stp_state_set = ksz_port_stp_state_set,
+ .port_pre_bridge_flags = ksz_port_pre_bridge_flags,
+ .port_bridge_flags = ksz_port_bridge_flags,
.port_fast_age = ksz_port_fast_age,
.port_vlan_filtering = ksz_port_vlan_filtering,
.port_vlan_add = ksz_port_vlan_add,
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 764ada3a0f42..0d9520dc6d2d 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -65,6 +65,7 @@ struct ksz_chip_data {
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
+ bool learning;
int stp_state;
struct phy_device phydev;
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index daedd2bf20c1..5579644e8fde 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -244,10 +244,6 @@ void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port)
lan937x_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_TAIL_TAG_ENABLE, true);
- /* disable frame check length field */
- lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0, PORT_CHECK_LENGTH,
- false);
-
/* set back pressure for half duplex */
lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE,
true);
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index a4c6eb9a52d0..83dca9179aa0 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -118,6 +118,9 @@ static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
int addr = REG_PORT(p);
int ret;
+ if (dsa_is_unused_port(priv->ds, p))
+ return 0;
+
/* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 859196898a7d..aadb0bd7c24f 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -610,6 +610,9 @@ static int felix_change_tag_protocol(struct dsa_switch *ds,
old_proto_ops = felix->tag_proto_ops;
+ if (proto_ops == old_proto_ops)
+ return 0;
+
err = proto_ops->setup(ds);
if (err)
goto setup_failed;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 61ed317602e7..f8f19a85744c 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -22,6 +22,7 @@
#define VSC9959_NUM_PORTS 6
#define VSC9959_TAS_GCL_ENTRY_MAX 63
+#define VSC9959_TAS_MIN_GATE_LEN_NS 33
#define VSC9959_VCAP_POLICER_BASE 63
#define VSC9959_VCAP_POLICER_MAX 383
#define VSC9959_SWITCH_PCI_BAR 4
@@ -274,27 +275,98 @@ static const u32 vsc9959_rew_regmap[] = {
static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000200),
+ REG(SYS_COUNT_TX_UNICAST, 0x000204),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000208),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00020c),
REG(SYS_COUNT_TX_COLLISION, 0x000210),
REG(SYS_COUNT_TX_DROPS, 0x000214),
+ REG(SYS_COUNT_TX_PAUSE, 0x000218),
REG(SYS_COUNT_TX_64, 0x00021c),
REG(SYS_COUNT_TX_65_127, 0x000220),
- REG(SYS_COUNT_TX_128_511, 0x000224),
- REG(SYS_COUNT_TX_512_1023, 0x000228),
- REG(SYS_COUNT_TX_1024_1526, 0x00022c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000230),
+ REG(SYS_COUNT_TX_128_255, 0x000224),
+ REG(SYS_COUNT_TX_256_511, 0x000228),
+ REG(SYS_COUNT_TX_512_1023, 0x00022c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000230),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000234),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000238),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00023c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000240),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000244),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000248),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00024c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000250),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000254),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000258),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00025c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000260),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000264),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000268),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
REG(SYS_COUNT_TX_AGING, 0x000278),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000400),
+ REG(SYS_COUNT_DROP_TAIL, 0x000404),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00040c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000410),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000414),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000418),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00041c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000420),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000424),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000428),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00042c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000430),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000434),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000438),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444),
REG(SYS_RESET_CFG, 0x000e00),
REG(SYS_SR_ETYPE_CFG, 0x000e04),
REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
@@ -547,100 +619,379 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4),
};
-static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x80, .name = "tx_octets", },
- { .offset = 0x81, .name = "tx_unicast", },
- { .offset = 0x82, .name = "tx_multicast", },
- { .offset = 0x83, .name = "tx_broadcast", },
- { .offset = 0x84, .name = "tx_collision", },
- { .offset = 0x85, .name = "tx_drops", },
- { .offset = 0x86, .name = "tx_pause", },
- { .offset = 0x87, .name = "tx_frames_below_65_octets", },
- { .offset = 0x88, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x89, .name = "tx_frames_128_255_octets", },
- { .offset = 0x8B, .name = "tx_frames_256_511_octets", },
- { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x8D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x8E, .name = "tx_yellow_prio_0", },
- { .offset = 0x8F, .name = "tx_yellow_prio_1", },
- { .offset = 0x90, .name = "tx_yellow_prio_2", },
- { .offset = 0x91, .name = "tx_yellow_prio_3", },
- { .offset = 0x92, .name = "tx_yellow_prio_4", },
- { .offset = 0x93, .name = "tx_yellow_prio_5", },
- { .offset = 0x94, .name = "tx_yellow_prio_6", },
- { .offset = 0x95, .name = "tx_yellow_prio_7", },
- { .offset = 0x96, .name = "tx_green_prio_0", },
- { .offset = 0x97, .name = "tx_green_prio_1", },
- { .offset = 0x98, .name = "tx_green_prio_2", },
- { .offset = 0x99, .name = "tx_green_prio_3", },
- { .offset = 0x9A, .name = "tx_green_prio_4", },
- { .offset = 0x9B, .name = "tx_green_prio_5", },
- { .offset = 0x9C, .name = "tx_green_prio_6", },
- { .offset = 0x9D, .name = "tx_green_prio_7", },
- { .offset = 0x9E, .name = "tx_aged", },
- { .offset = 0x100, .name = "drop_local", },
- { .offset = 0x101, .name = "drop_tail", },
- { .offset = 0x102, .name = "drop_yellow_prio_0", },
- { .offset = 0x103, .name = "drop_yellow_prio_1", },
- { .offset = 0x104, .name = "drop_yellow_prio_2", },
- { .offset = 0x105, .name = "drop_yellow_prio_3", },
- { .offset = 0x106, .name = "drop_yellow_prio_4", },
- { .offset = 0x107, .name = "drop_yellow_prio_5", },
- { .offset = 0x108, .name = "drop_yellow_prio_6", },
- { .offset = 0x109, .name = "drop_yellow_prio_7", },
- { .offset = 0x10A, .name = "drop_green_prio_0", },
- { .offset = 0x10B, .name = "drop_green_prio_1", },
- { .offset = 0x10C, .name = "drop_green_prio_2", },
- { .offset = 0x10D, .name = "drop_green_prio_3", },
- { .offset = 0x10E, .name = "drop_green_prio_4", },
- { .offset = 0x10F, .name = "drop_green_prio_5", },
- { .offset = 0x110, .name = "drop_green_prio_6", },
- { .offset = 0x111, .name = "drop_green_prio_7", },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout vsc9959_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static const struct vcap_field vsc9959_vcap_es0_keys[] = {
@@ -1128,6 +1479,23 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
mdiobus_free(felix->imdio);
}
+/* The switch considers any frame (regardless of size) as eligible for
+ * transmission if the traffic class gate is open for at least 33 ns.
+ * Overruns are prevented by cropping an interval at the end of the gate time
+ * slot for which egress scheduling is blocked, but we need to still keep 33 ns
+ * available for one packet to be transmitted, otherwise the port tc will hang.
+ * This function returns the size of a gate interval that remains available for
+ * setting the guard band, after reserving the space for one egress frame.
+ */
+static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns)
+{
+ /* Gate always open */
+ if (gate_len_ns == U64_MAX)
+ return U64_MAX;
+
+ return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC;
+}
+
/* Extract shortest continuous gate open intervals in ns for each traffic class
* of a cyclic tc-taprio schedule. If a gate is always open, the duration is
* considered U64_MAX. If the gate is always closed, it is considered 0.
@@ -1137,6 +1505,7 @@ static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
{
struct tc_taprio_sched_entry *entry;
u64 gate_len[OCELOT_NUM_TC];
+ u8 gates_ever_opened = 0;
int tc, i, n;
/* Initialize arrays */
@@ -1164,16 +1533,87 @@ static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
if (entry->gate_mask & BIT(tc)) {
gate_len[tc] += entry->interval;
+ gates_ever_opened |= BIT(tc);
} else {
/* Gate closes now, record a potential new
* minimum and reinitialize length
*/
- if (min_gate_len[tc] > gate_len[tc])
+ if (min_gate_len[tc] > gate_len[tc] &&
+ gate_len[tc])
min_gate_len[tc] = gate_len[tc];
gate_len[tc] = 0;
}
}
}
+
+ /* min_gate_len[tc] actually tracks minimum *open* gate time, so for
+ * permanently closed gates, min_gate_len[tc] will still be U64_MAX.
+ * Therefore they are currently indistinguishable from permanently
+ * open gates. Overwrite the gate len with 0 when we know they're
+ * actually permanently closed, i.e. after the loop above.
+ */
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (!(gates_ever_opened & BIT(tc)))
+ min_gate_len[tc] = 0;
+}
+
+/* ocelot_write_rix is a macro that concatenates QSYS_MAXSDU_CFG_* with _RSZ,
+ * so we need to spell out the register access to each traffic class in helper
+ * functions, to simplify callers
+ */
+static void vsc9959_port_qmaxsdu_set(struct ocelot *ocelot, int port, int tc,
+ u32 max_sdu)
+{
+ switch (tc) {
+ case 0:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
+ port);
+ break;
+ case 1:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
+ port);
+ break;
+ case 2:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
+ port);
+ break;
+ case 3:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
+ port);
+ break;
+ case 4:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
+ port);
+ break;
+ case 5:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
+ port);
+ break;
+ case 6:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
+ port);
+ break;
+ case 7:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
+ port);
+ break;
+ }
+}
+
+static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc)
+{
+ switch (tc) {
+ case 0: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port);
+ case 1: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_1, port);
+ case 2: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_2, port);
+ case 3: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_3, port);
+ case 4: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_4, port);
+ case 5: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_5, port);
+ case 6: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_6, port);
+ case 7: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_7, port);
+ default:
+ return 0;
+ }
}
/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
@@ -1232,11 +1672,16 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
vsc9959_tas_min_gate_lengths(ocelot_port->taprio, min_gate_len);
+ mutex_lock(&ocelot->fwd_domain_lock);
+
for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ u64 remaining_gate_len_ps;
u32 max_sdu;
- if (min_gate_len[tc] == U64_MAX /* Gate always open */ ||
- min_gate_len[tc] * PSEC_PER_NSEC > needed_bit_time_ps) {
+ remaining_gate_len_ps =
+ vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]);
+
+ if (remaining_gate_len_ps > needed_bit_time_ps) {
/* Setting QMAXSDU_CFG to 0 disables oversized frame
* dropping.
*/
@@ -1249,9 +1694,15 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
/* If traffic class doesn't support a full MTU sized
* frame, make sure to enable oversize frame dropping
* for frames larger than the smallest that would fit.
+ *
+ * However, the exact same register, QSYS_QMAXSDU_CFG_*,
+ * controls not only oversized frame dropping, but also
+ * per-tc static guard band lengths, so it reduces the
+ * useful gate interval length. Therefore, be careful
+ * to calculate a guard band (and therefore max_sdu)
+ * that still leaves 33 ns available in the time slot.
*/
- max_sdu = div_u64(min_gate_len[tc] * PSEC_PER_NSEC,
- picos_per_byte);
+ max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
/* A TC gate may be completely closed, which is a
* special case where all packets are oversized.
* Any limit smaller than 64 octets accomplishes this
@@ -1274,47 +1725,14 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
max_sdu);
}
- /* ocelot_write_rix is a macro that concatenates
- * QSYS_MAXSDU_CFG_* with _RSZ, so we need to spell out
- * the writes to each traffic class
- */
- switch (tc) {
- case 0:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
- port);
- break;
- case 1:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
- port);
- break;
- case 2:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
- port);
- break;
- case 3:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
- port);
- break;
- case 4:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
- port);
- break;
- case 5:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
- port);
- break;
- case 6:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
- port);
- break;
- case 7:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
- port);
- break;
- }
+ vsc9959_port_qmaxsdu_set(ocelot, port, tc, max_sdu);
}
ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port);
+
+ ocelot->ops->cut_through_fwd(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
@@ -1341,13 +1759,13 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
break;
}
+ mutex_lock(&ocelot->tas_lock);
+
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
QSYS_TAG_CONFIG_LINK_SPEED_M,
QSYS_TAG_CONFIG, port);
- mutex_lock(&ocelot->tas_lock);
-
if (ocelot_port->taprio)
vsc9959_tas_guard_bands_update(ocelot, port);
@@ -2153,7 +2571,7 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
struct felix_stream_filter_counters *counters)
{
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
SYS_STAT_CFG_STAT_VIEW_M,
@@ -2170,7 +2588,7 @@ static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
SYS_STAT_CFG);
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
}
static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
@@ -2407,7 +2825,7 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
struct dsa_switch *ds = felix->ds;
- int port, other_port;
+ int tc, port, other_port;
lockdep_assert_held(&ocelot->fwd_domain_lock);
@@ -2451,19 +2869,27 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
min_speed = other_ocelot_port->speed;
}
- /* Enable cut-through forwarding for all traffic classes. */
- if (ocelot_port->speed == min_speed)
+ /* Enable cut-through forwarding for all traffic classes that
+ * don't have oversized dropping enabled, since this check is
+ * bypassed in cut-through mode.
+ */
+ if (ocelot_port->speed == min_speed) {
val = GENMASK(7, 0);
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (vsc9959_port_qmaxsdu_get(ocelot, port, tc))
+ val &= ~BIT(tc);
+ }
+
set:
tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port);
if (tmp == val)
continue;
dev_dbg(ocelot->dev,
- "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding\n",
+ "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding on TC mask 0x%x\n",
port, mask, ocelot_port->speed, min_speed,
- val ? "enabling" : "disabling");
+ val ? "enabling" : "disabling", val);
ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port);
}
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index ea0649211356..b34f4cdfe814 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -270,27 +270,98 @@ static const u32 vsc9953_rew_regmap[] = {
static const u32 vsc9953_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
REG(SYS_COUNT_TX_COLLISION, 0x000110),
REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000318),
REG_RESERVED(SYS_SR_ETYPE_CFG),
REG(SYS_VLAN_ETYPE_CFG, 0x000320),
@@ -543,101 +614,379 @@ static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4),
};
-static const struct ocelot_stat_layout vsc9953_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x40, .name = "tx_octets", },
- { .offset = 0x41, .name = "tx_unicast", },
- { .offset = 0x42, .name = "tx_multicast", },
- { .offset = 0x43, .name = "tx_broadcast", },
- { .offset = 0x44, .name = "tx_collision", },
- { .offset = 0x45, .name = "tx_drops", },
- { .offset = 0x46, .name = "tx_pause", },
- { .offset = 0x47, .name = "tx_frames_below_65_octets", },
- { .offset = 0x48, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x49, .name = "tx_frames_128_255_octets", },
- { .offset = 0x4A, .name = "tx_frames_256_511_octets", },
- { .offset = 0x4B, .name = "tx_frames_512_1023_octets", },
- { .offset = 0x4C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x4D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x4E, .name = "tx_yellow_prio_0", },
- { .offset = 0x4F, .name = "tx_yellow_prio_1", },
- { .offset = 0x50, .name = "tx_yellow_prio_2", },
- { .offset = 0x51, .name = "tx_yellow_prio_3", },
- { .offset = 0x52, .name = "tx_yellow_prio_4", },
- { .offset = 0x53, .name = "tx_yellow_prio_5", },
- { .offset = 0x54, .name = "tx_yellow_prio_6", },
- { .offset = 0x55, .name = "tx_yellow_prio_7", },
- { .offset = 0x56, .name = "tx_green_prio_0", },
- { .offset = 0x57, .name = "tx_green_prio_1", },
- { .offset = 0x58, .name = "tx_green_prio_2", },
- { .offset = 0x59, .name = "tx_green_prio_3", },
- { .offset = 0x5A, .name = "tx_green_prio_4", },
- { .offset = 0x5B, .name = "tx_green_prio_5", },
- { .offset = 0x5C, .name = "tx_green_prio_6", },
- { .offset = 0x5D, .name = "tx_green_prio_7", },
- { .offset = 0x5E, .name = "tx_aged", },
- { .offset = 0x80, .name = "drop_local", },
- { .offset = 0x81, .name = "drop_tail", },
- { .offset = 0x82, .name = "drop_yellow_prio_0", },
- { .offset = 0x83, .name = "drop_yellow_prio_1", },
- { .offset = 0x84, .name = "drop_yellow_prio_2", },
- { .offset = 0x85, .name = "drop_yellow_prio_3", },
- { .offset = 0x86, .name = "drop_yellow_prio_4", },
- { .offset = 0x87, .name = "drop_yellow_prio_5", },
- { .offset = 0x88, .name = "drop_yellow_prio_6", },
- { .offset = 0x89, .name = "drop_yellow_prio_7", },
- { .offset = 0x8A, .name = "drop_green_prio_0", },
- { .offset = 0x8B, .name = "drop_green_prio_1", },
- { .offset = 0x8C, .name = "drop_green_prio_2", },
- { .offset = 0x8D, .name = "drop_green_prio_3", },
- { .offset = 0x8E, .name = "drop_green_prio_4", },
- { .offset = 0x8F, .name = "drop_green_prio_5", },
- { .offset = 0x90, .name = "drop_green_prio_6", },
- { .offset = 0x91, .name = "drop_green_prio_7", },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout vsc9953_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static const struct vcap_field vsc9953_vcap_es0_keys[] = {
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 1d3e7782a71f..c181346388a4 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -1889,9 +1889,9 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (!priv)
return -ENOMEM;
- priv->info = of_device_get_match_data(priv->dev);
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
+ priv->info = of_device_get_match_data(priv->dev);
priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
GPIOD_ASIS);
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 0569ff066634..10c6fea1227f 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -93,7 +93,7 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region)) {
- while (i-- >= 0)
+ while (--i >= 0)
dsa_devlink_region_destroy(priv->regions[i]);
return PTR_ERR(region);
}
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 3887ed33c5fe..fa622639d640 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -109,6 +109,7 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
{
struct xrs700x_port *p = &priv->ports[port];
struct rtnl_link_stats64 stats;
+ unsigned long flags;
int i;
memset(&stats, 0, sizeof(stats));
@@ -138,9 +139,9 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
*/
stats.rx_packets += stats.multicast;
- u64_stats_update_begin(&p->syncp);
+ flags = u64_stats_update_begin_irqsave(&p->syncp);
p->stats64 = stats;
- u64_stats_update_end(&p->syncp);
+ u64_stats_update_end_irqrestore(&p->syncp, flags);
mutex_unlock(&p->mib_mutex);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 88595863d8bc..8a0af371e7dc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -94,11 +94,8 @@ static int aq_ndev_close(struct net_device *ndev)
int err = 0;
err = aq_nic_stop(aq_nic);
- if (err < 0)
- goto err_exit;
aq_nic_deinit(aq_nic, true);
-err_exit:
return err;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index e11cc29d3264..06508eebb585 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -265,12 +265,10 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
static void aq_nic_polling_timer_cb(struct timer_list *t)
{
struct aq_nic_s *self = from_timer(self, t, polling_timer);
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_isr(i, (void *)aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_isr(i, (void *)self->aq_vec[i]);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
@@ -1014,7 +1012,6 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
- struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
unsigned int count = 0U;
unsigned int i = 0U;
@@ -1064,11 +1061,11 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data += i;
for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
- for (i = 0U, aq_vec = self->aq_vec[0];
- aq_vec && self->aq_vecs > i;
- ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ if (!self->aq_vec[i])
+ break;
data += count;
- count = aq_vec_get_sw_stats(aq_vec, tc, data);
+ count = aq_vec_get_sw_stats(self->aq_vec[i], tc, data);
}
}
@@ -1382,7 +1379,6 @@ int aq_nic_set_loopback(struct aq_nic_s *self)
int aq_nic_stop(struct aq_nic_s *self)
{
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
netif_tx_disable(self->ndev);
@@ -1400,9 +1396,8 @@ int aq_nic_stop(struct aq_nic_s *self)
aq_ptp_irq_free(self);
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_stop(aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_stop(self->aq_vec[i]);
aq_ptp_ring_stop(self);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 2dfc1e32bbb3..93580484a3f4 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -189,8 +189,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
}
slot->skb = skb;
- ring->end += nr_frags + 1;
netdev_sent_queue(net_dev, skb->len);
+ ring->end += nr_frags + 1;
wmb();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 7071604f9984..02808513ffe4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -13844,7 +13844,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
* Since some switches tend to reinit the AN process and clear the
- * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
* and recovered many times
*/
if (vars->check_kr2_recovery_cnt > 0) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ba0f1ffac507..96da0ba3d507 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -659,7 +659,6 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
for (i = 0; i < nr_pkts; i++) {
struct bnxt_sw_tx_bd *tx_buf;
- bool compl_deferred = false;
struct sk_buff *skb;
int j, last;
@@ -668,6 +667,8 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
skb = tx_buf->skb;
tx_buf->skb = NULL;
+ tx_bytes += skb->len;
+
if (tx_buf->is_push) {
tx_buf->is_push = 0;
goto next_tx_int;
@@ -688,8 +689,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ /* PTP worker takes ownership of the skb */
if (!bnxt_get_tx_ts_p5(bp, skb))
- compl_deferred = true;
+ skb = NULL;
else
atomic_inc(&bp->ptp_cfg->tx_avail);
}
@@ -698,9 +700,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
next_tx_int:
cons = NEXT_TX(cons);
- tx_bytes += skb->len;
- if (!compl_deferred)
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb);
}
netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
@@ -11178,10 +11178,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
- if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
- features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
-
- if (!(bp->flags & BNXT_FLAG_TPA))
+ if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
if (!(features & NETIF_F_GRO))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 075c6206325c..b1b17f911300 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2130,6 +2130,7 @@ struct bnxt {
#define BNXT_DUMP_CRASH 1
struct bpf_prog *xdp_prog;
+ u8 xdp_has_frags;
struct bnxt_ptp_cfg *ptp_cfg;
u8 ptp_all_rx_tstamp;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 14df8cfc2946..a36803e79e92 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -21,7 +21,6 @@
#include "bnxt_ptp.h"
#include "bnxt_coredump.h"
#include "bnxt_nvm_defs.h"
-#include "bnxt_ethtool.h"
static void __bnxt_fw_recover(struct bnxt *bp)
{
@@ -1307,6 +1306,7 @@ int bnxt_dl_register(struct bnxt *bp)
if (rc)
goto err_dl_port_unreg;
+ devlink_set_features(dl, DEVLINK_F_RELOAD);
out:
devlink_register(dl);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 7f3c0875b6f5..8e316367f6ce 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -317,9 +317,9 @@ void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
- PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE))) {
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE))) {
ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
- PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE);
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE);
netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n");
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 730febd19330..a4cba7cb2783 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -623,7 +623,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5)
- hw_resc->max_irqs -= vf_msix * n;
+ hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index f53387ed0167..c3065ec0a479 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -181,6 +181,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp)
{
struct bnxt_sw_rx_bd *rx_buf;
+ u32 buflen = PAGE_SIZE;
struct pci_dev *pdev;
dma_addr_t mapping;
u32 offset;
@@ -192,7 +193,10 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
- xdp_init_buff(xdp, BNXT_PAGE_MODE_BUF_SIZE + offset, &rxr->xdp_rxq);
+ if (bp->xdp_has_frags)
+ buflen = BNXT_PAGE_MODE_BUF_SIZE + offset;
+
+ xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
}
@@ -397,8 +401,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
}
- if (prog)
+ if (prog) {
tx_xdp = bp->rx_nr_rings;
+ bp->xdp_has_frags = prog->aux->xdp_has_frags;
+ }
tc = netdev_get_num_tc(dev);
if (!tc)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index c888ddee1fc4..7ded559842e8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -393,6 +393,9 @@ int bcmgenet_mii_probe(struct net_device *dev)
if (priv->internal_phy && !GENET_IS_V5(priv))
dev->phydev->irq = PHY_MAC_INTERRUPT;
+ /* Indicate that the MAC is responsible for PHY PM */
+ dev->phydev->mac_managed_pm = true;
+
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index db1e9d810b41..89889d8150da 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -18076,16 +18076,20 @@ static void tg3_shutdown(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
+ tg3_reset_task_cancel(tp);
+
rtnl_lock();
+
netif_device_detach(dev);
if (netif_running(dev))
dev_close(dev);
- if (system_state == SYSTEM_POWER_OFF)
- tg3_power_down(tp);
+ tg3_power_down(tp);
rtnl_unlock();
+
+ pci_disable_device(pdev);
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 84604aff53ce..89256b866840 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -243,7 +243,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
/*
* on rx, the iscsi pdu has to be < rx page size and the
- * the max rx data length programmed in TP
+ * max rx data length programmed in TP
*/
val = min(adapter->params.tp.rx_pg_size,
((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 26433a62d7f0..fed5f93bf620 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -497,7 +497,7 @@ struct cpl_t5_pass_accept_rpl {
__be32 opt2;
__be64 opt0;
__be32 iss;
- __be32 rsvd[3];
+ __be32 rsvd;
};
struct cpl_act_open_req {
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index bfee0e4e54b1..da9973b711f4 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -1932,6 +1932,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
int data_len, qidx, ret = 0, mss;
struct tls_record_info *record;
struct chcr_ktls_info *tx_info;
+ struct net_device *tls_netdev;
struct tls_context *tls_ctx;
struct sge_eth_txq *q;
struct adapter *adap;
@@ -1945,7 +1946,12 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(tls_ctx->netdev != dev))
+ tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
+ /* Don't quit on NULL: if tls_device_down is running in parallel,
+ * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+ * true. Rather continue processing this packet.
+ */
+ if (unlikely(tls_netdev && tls_netdev != dev))
goto out;
tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 9e6de2f968fa..6dae768671e3 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1919,7 +1919,7 @@ static void gmac_get_stats64(struct net_device *netdev,
/* Racing with RX NAPI */
do {
- start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
stats->rx_packets = port->stats.rx_packets;
stats->rx_bytes = port->stats.rx_bytes;
@@ -1931,11 +1931,11 @@ static void gmac_get_stats64(struct net_device *netdev,
stats->rx_crc_errors = port->stats.rx_crc_errors;
stats->rx_frame_errors = port->stats.rx_frame_errors;
- } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
/* Racing with MIB and TX completion interrupts */
do {
- start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
stats->tx_errors = port->stats.tx_errors;
stats->tx_packets = port->stats.tx_packets;
@@ -1945,15 +1945,15 @@ static void gmac_get_stats64(struct net_device *netdev,
stats->rx_missed_errors = port->stats.rx_missed_errors;
stats->rx_fifo_errors = port->stats.rx_fifo_errors;
- } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
/* Racing with hard_start_xmit */
do {
- start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
stats->tx_dropped = port->stats.tx_dropped;
- } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
stats->rx_dropped += stats->rx_missed_errors;
}
@@ -2031,18 +2031,18 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
/* Racing with MIB interrupt */
do {
p = values;
- start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
for (i = 0; i < RX_STATS_NUM; i++)
*p++ = port->hw_stats[i];
- } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
values = p;
/* Racing with RX NAPI */
do {
p = values;
- start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
for (i = 0; i < RX_STATUS_NUM; i++)
*p++ = port->rx_stats[i];
@@ -2050,13 +2050,13 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
*p++ = port->rx_csum_stats[i];
*p++ = port->rx_napi_exits;
- } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
values = p;
/* Racing with TX start_xmit */
do {
p = values;
- start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
for (i = 0; i < TX_MAX_FRAGS; i++) {
*values++ = port->tx_frag_stats[i];
@@ -2065,7 +2065,7 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
*values++ = port->tx_frags_linearized;
*values++ = port->tx_hw_csummed;
- } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
}
static int gmac_get_ksettings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index cb069a0af7b9..a5f7152a1716 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -340,14 +340,14 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
return 0;
}
-static void tsnep_tx_unmap(struct tsnep_tx *tx, int count)
+static void tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
int i;
for (i = 0; i < count; i++) {
- entry = &tx->entry[(tx->read + i) % TSNEP_RING_SIZE];
+ entry = &tx->entry[(index + i) % TSNEP_RING_SIZE];
if (entry->len) {
if (i == 0)
@@ -395,7 +395,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
retval = tsnep_tx_map(skb, tx, count);
if (retval != 0) {
- tsnep_tx_unmap(tx, count);
+ tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
entry->skb = NULL;
@@ -464,7 +464,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
if (skb_shinfo(entry->skb)->nr_frags > 0)
count += skb_shinfo(entry->skb)->nr_frags;
- tsnep_tx_unmap(tx, count);
+ tsnep_tx_unmap(tx, tx->read, count);
if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
(__le32_to_cpu(entry->desc_wb->properties) &
@@ -1282,7 +1282,7 @@ MODULE_DEVICE_TABLE(of, tsnep_of_match);
static struct platform_driver tsnep_driver = {
.driver = {
.name = TSNEP,
- .of_match_table = of_match_ptr(tsnep_of_match),
+ .of_match_table = tsnep_of_match,
},
.probe = tsnep_probe,
.remove = tsnep_remove,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 45634579adb6..a770bab4d1ed 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2886,6 +2886,7 @@ static void dpaa_adjust_link(struct net_device *net_dev)
/* The Aquantia PHYs are capable of performing rate adaptation */
#define PHY_VEND_AQUANTIA 0x03a1b400
+#define PHY_VEND_AQUANTIA2 0x31c31c00
static int dpaa_phy_init(struct net_device *net_dev)
{
@@ -2893,6 +2894,7 @@ static int dpaa_phy_init(struct net_device *net_dev)
struct mac_device *mac_dev;
struct phy_device *phy_dev;
struct dpaa_priv *priv;
+ u32 phy_vendor;
priv = netdev_priv(net_dev);
mac_dev = priv->mac_dev;
@@ -2905,9 +2907,11 @@ static int dpaa_phy_init(struct net_device *net_dev)
return -ENODEV;
}
+ phy_vendor = phy_dev->drv->phy_id & GENMASK(31, 10);
/* Unless the PHY is capable of rate adaptation */
if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
- ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
+ (phy_vendor != PHY_VEND_AQUANTIA &&
+ phy_vendor != PHY_VEND_AQUANTIA2)) {
/* remove any features not supported by the controller */
ethtool_convert_legacy_u32_to_link_mode(mask,
mac_dev->if_support);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index cd9ec80522e7..75d51572693d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1660,8 +1660,8 @@ static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
buf_array[i] = addr;
/* tracing point */
- trace_dpaa2_eth_buf_seed(priv->net_dev,
- page, DPAA2_ETH_RX_BUF_RAW_SIZE,
+ trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, priv->rx_buf_size,
bpid);
}
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index a139f2e9d59f..e0e8dfd13793 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -9,7 +9,6 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o $(common-objs)
-fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
fsl-enetc-ierb-y := enetc_ierb.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 4470a4a3e4c3..9f5b921039bd 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2432,7 +2432,7 @@ int enetc_close(struct net_device *ndev)
return 0;
}
-static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_mqprio_qopt *mqprio = type_data;
@@ -2486,25 +2486,6 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
return 0;
}
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
-{
- switch (type) {
- case TC_SETUP_QDISC_MQPRIO:
- return enetc_setup_tc_mqprio(ndev, type_data);
- case TC_SETUP_QDISC_TAPRIO:
- return enetc_setup_tc_taprio(ndev, type_data);
- case TC_SETUP_QDISC_CBS:
- return enetc_setup_tc_cbs(ndev, type_data);
- case TC_SETUP_QDISC_ETF:
- return enetc_setup_tc_txtime(ndev, type_data);
- case TC_SETUP_BLOCK:
- return enetc_setup_tc_psfp(ndev, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
@@ -2600,29 +2581,6 @@ static int enetc_set_rss(struct net_device *ndev, int en)
return 0;
}
-static int enetc_set_psfp(struct net_device *ndev, int en)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int err;
-
- if (en) {
- err = enetc_psfp_enable(priv);
- if (err)
- return err;
-
- priv->active_offloads |= ENETC_F_QCI;
- return 0;
- }
-
- err = enetc_psfp_disable(priv);
- if (err)
- return err;
-
- priv->active_offloads &= ~ENETC_F_QCI;
-
- return 0;
-}
-
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -2641,11 +2599,9 @@ static void enetc_enable_txvlan(struct net_device *ndev, bool en)
enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
}
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features)
+void enetc_set_features(struct net_device *ndev, netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
- int err = 0;
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
@@ -2657,11 +2613,6 @@ int enetc_set_features(struct net_device *ndev,
if (changed & NETIF_F_HW_VLAN_CTAG_TX)
enetc_enable_txvlan(ndev,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
-
- if (changed & NETIF_F_HW_TC)
- err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
-
- return err;
}
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 29922c20531f..2cfe6944ebd3 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -393,11 +393,9 @@ void enetc_start(struct net_device *ndev);
void enetc_stop(struct net_device *ndev);
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
struct net_device_stats *enetc_get_stats(struct net_device *ndev);
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features);
+void enetc_set_features(struct net_device *ndev, netdev_features_t features);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data);
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
@@ -465,6 +463,7 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
int enetc_psfp_init(struct enetc_ndev_priv *priv);
int enetc_psfp_clean(struct enetc_ndev_priv *priv);
+int enetc_set_psfp(struct net_device *ndev, bool en);
static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
{
@@ -540,4 +539,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
{
return 0;
}
+
+static inline int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ return 0;
+}
#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index c4a0e836d4f0..bb7750222691 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -709,6 +709,13 @@ static int enetc_pf_set_features(struct net_device *ndev,
{
netdev_features_t changed = ndev->features ^ features;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (changed & NETIF_F_HW_TC) {
+ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+ if (err)
+ return err;
+ }
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
struct enetc_pf *pf = enetc_si_priv(priv->si);
@@ -722,7 +729,28 @@ static int enetc_pf_set_features(struct net_device *ndev,
if (changed & NETIF_F_LOOPBACK)
enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return enetc_setup_tc_taprio(ndev, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return enetc_setup_tc_cbs(ndev, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return enetc_setup_tc_txtime(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return enetc_setup_tc_psfp(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static const struct net_device_ops enetc_ndev_ops = {
@@ -739,7 +767,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
.ndo_set_features = enetc_pf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
- .ndo_setup_tc = enetc_setup_tc,
+ .ndo_setup_tc = enetc_pf_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
};
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 582a663ed0ba..f8a2f02ce22d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -1517,6 +1517,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
+int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (en) {
+ err = enetc_psfp_enable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads |= ENETC_F_QCI;
+ return 0;
+ }
+
+ err = enetc_psfp_disable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads &= ~ENETC_F_QCI;
+
+ return 0;
+}
+
int enetc_psfp_init(struct enetc_ndev_priv *priv)
{
if (epsfp.psfp_sfi_bitmap)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 17924305afa2..dfcaac302e24 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -88,7 +88,20 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
static int enetc_vf_set_features(struct net_device *ndev,
netdev_features_t features)
{
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/* Probing/ Init */
@@ -100,7 +113,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_mac_address = enetc_vf_set_mac_addr,
.ndo_set_features = enetc_vf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
- .ndo_setup_tc = enetc_setup_tc,
+ .ndo_setup_tc = enetc_vf_setup_tc,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ed7301b69169..a5fed00cb971 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -16,6 +16,7 @@
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
+#include <linux/pm_qos.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
@@ -498,6 +499,9 @@ struct bufdesc_ex {
/* i.MX8MQ SoC integration mix wakeup interrupt signal into "int2" interrupt line. */
#define FEC_QUIRK_WAKEUP_FROM_INT2 (1 << 22)
+/* i.MX6Q adds pm_qos support */
+#define FEC_QUIRK_HAS_PMQOS BIT(23)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
@@ -608,6 +612,7 @@ struct fec_enet_private {
struct delayed_work time_keep;
struct regulator *reg_phy;
struct fec_stop_mode_gpr stop_gpr;
+ struct pm_qos_request pm_qos_req;
unsigned int tx_align;
unsigned int rx_align;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e8e2aa1e7f01..92c55e1a5507 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -111,7 +111,8 @@ static const struct fec_devinfo fec_imx6q_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_HAS_PMQOS,
};
static const struct fec_devinfo fec_mvf600_info = {
@@ -3210,6 +3211,9 @@ fec_enet_open(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used();
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
+
napi_enable(&fep->napi);
phy_start(ndev->phydev);
netif_tx_start_all_queues(ndev);
@@ -3251,6 +3255,9 @@ fec_enet_close(struct net_device *ndev)
fec_enet_update_ethtool_stats(ndev);
fec_enet_clk_enable(ndev, false);
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_remove_request(&fep->pm_qos_req);
+
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
pm_runtime_mark_last_busy(&fep->pdev->dev);
pm_runtime_put_autosuspend(&fep->pdev->dev);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 5ddb769bdfb4..a7f4c3c29f3e 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -924,7 +924,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Start with safe defaults for link connection */
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
- priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(&op->dev) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
prop = of_get_property(np, "current-speed", &prop_size);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index f85b5e81dfc1..95f778cce98c 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -100,8 +100,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
dev_set_drvdata(dev, bus);
/* set MII speed */
- out_be32(&priv->regs->mii_speed,
- ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
+ out_be32(&priv->regs->mii_speed, ((mpc5xxx_get_bus_frequency(dev) >> 20) / 5) << 1);
err = of_mdiobus_register(bus, np);
if (err)
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7d49c28215f3..3dc3c0b626c2 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -135,11 +135,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- tempval = readl(fep->hwp + FEC_ATIME);
+ tempval = fep->cc.read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 152f4d83765a..d37d7a19a759 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -102,7 +102,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
struct resource res;
struct mii_bus *new_bus;
struct fec_info *fec;
- int (*get_bus_freq)(struct device_node *);
+ int (*get_bus_freq)(struct device *);
int ret = -ENOMEM, clock, speed;
match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
@@ -136,7 +136,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
}
if (get_bus_freq) {
- clock = get_bus_freq(ofdev->dev.of_node);
+ clock = get_bus_freq(&ofdev->dev);
if (!clock) {
/* Use maximum divider if clock is unknown */
dev_warn(&ofdev->dev, "could not determine IPS clock\n");
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
index 53b7e95213a8..671f51135c26 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
@@ -206,9 +206,9 @@ struct funeth_rxq {
#define FUN_QSTAT_READ(q, seq, stats_copy) \
do { \
- seq = u64_stats_fetch_begin(&(q)->syncp); \
+ seq = u64_stats_fetch_begin_irq(&(q)->syncp); \
stats_copy = (q)->stats; \
- } while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
+ } while (u64_stats_fetch_retry_irq(&(q)->syncp, (seq)))
#define FUN_INT_NAME_LEN (IFNAMSIZ + 16)
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 50b384910c83..7b9a2d9d9624 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -177,14 +177,14 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct gve_rx_ring *rx = &priv->rx[ring];
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
tmp_rx_pkts = rx->rpackets;
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
rx_pkts += tmp_rx_pkts;
rx_bytes += tmp_rx_bytes;
@@ -198,10 +198,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (priv->tx) {
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
tmp_tx_pkts = priv->tx[ring].pkt_done;
tmp_tx_bytes = priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
tx_pkts += tmp_tx_pkts;
tx_bytes += tmp_tx_bytes;
@@ -259,13 +259,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->fill_cnt - rx->cnt;
do {
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
data[i++] = rx->rx_cont_packet_cnt;
@@ -331,9 +331,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
}
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
tmp_tx_bytes = tx->bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 6cafee55efc3..044db3ebb071 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -51,10 +51,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
do {
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
packets = priv->rx[ring].rpackets;
bytes = priv->rx[ring].rbytes;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
s->rx_packets += packets;
s->rx_bytes += bytes;
@@ -64,10 +64,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
packets = priv->tx[ring].pkt_done;
bytes = priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
s->tx_packets += packets;
s->tx_bytes += bytes;
@@ -1274,9 +1274,9 @@ void gve_handle_report_stats(struct gve_priv *priv)
}
do {
- start = u64_stats_fetch_begin(&priv->tx[idx].statss);
+ start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss);
tx_bytes = priv->tx[idx].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
+ } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start));
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(TX_WAKE_CNT),
.value = cpu_to_be64(priv->tx[idx].wake_queue),
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8c939628e2d8..2e6461b0ea8b 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
int err;
err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
- &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL);
+ &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC);
if (err)
return err;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index a866bea65110..e5828a658caf 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -74,14 +74,14 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&rxq_stats->syncp);
stats->pkts = rxq_stats->pkts;
stats->bytes = rxq_stats->bytes;
stats->errors = rxq_stats->csum_errors +
rxq_stats->other_errors;
stats->csum_errors = rxq_stats->csum_errors;
stats->other_errors = rxq_stats->other_errors;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start));
}
/**
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 5051cdff2384..3b6c7b585737 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -99,14 +99,14 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&txq_stats->syncp);
stats->pkts = txq_stats->pkts;
stats->bytes = txq_stats->bytes;
stats->tx_busy = txq_stats->tx_busy;
stats->tx_wake = txq_stats->tx_wake;
stats->tx_dropped = txq_stats->tx_dropped;
stats->big_frags_pkts = txq_stats->big_frags_pkts;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start));
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index ea2bb0140a6e..10d7a982a5b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -177,6 +177,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
"Cannot locate client instance close routine\n");
return;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n");
+ return;
+ }
cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
i40e_client_release_qvlist(&cdev->lan_info);
@@ -429,7 +433,6 @@ void i40e_client_subtask(struct i40e_pf *pf)
/* Remove failed client instance */
clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state);
- i40e_client_del_instance(pf);
return;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 156e92c43780..e9cd0fa6a0d2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -4485,7 +4485,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
(struct in6_addr *)&ipv6_full_mask))
new_mask |= I40E_L3_V6_DST_MASK;
else if (ipv6_addr_any((struct in6_addr *)
- &usr_ip6_spec->ip6src))
+ &usr_ip6_spec->ip6dst))
new_mask &= ~I40E_L3_V6_DST_MASK;
else
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b36bf9c3e1e4..e3d9804aeb25 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -384,7 +384,9 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
break;
default:
- netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
+ set_bit(__I40E_DOWN_REQUESTED, pf->state);
+ set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
break;
}
@@ -5907,6 +5909,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)
}
/**
+ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
+ * @vsi: Pointer to vsi structure
+ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
+ *
+ * Helper function to convert units before send to set BW limit
+ **/
+static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
+{
+ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
+ } else {
+ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ }
+
+ return max_tx_rate;
+}
+
+/**
* i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
* @vsi: VSI to be configured
* @seid: seid of the channel/VSI
@@ -5928,10 +5950,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
max_tx_rate, seid);
return -EINVAL;
}
- if (max_tx_rate && max_tx_rate < 50) {
+ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
dev_warn(&pf->pdev->dev,
"Setting max tx rate to minimum usable value of 50Mbps.\n");
- max_tx_rate = 50;
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
}
/* Tx rate credits are in values of 50Mbps, 0 is disabled */
@@ -6657,6 +6679,9 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
vsi->tc_seid_map[i] = ch->seid;
}
}
+
+ /* reset to reconfigure TX queue contexts */
+ i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true);
return ret;
err_free:
@@ -8219,9 +8244,9 @@ config_tc:
if (i40e_is_tc_mqprio_enabled(pf)) {
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (!ret) {
u64 credits = max_tx_rate;
@@ -10966,10 +10991,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
u64 credits = 0;
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (ret)
goto end_unlock;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f6ba97a0166e..69e67eb6aea7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3203,11 +3203,13 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
protocol = vlan_get_protocol(skb);
- if (eth_p_mpls(protocol))
+ if (eth_p_mpls(protocol)) {
ip.hdr = skb_inner_network_header(skb);
- else
+ l4.hdr = skb_checksum_start(skb);
+ } else {
ip.hdr = skb_network_header(skb);
- l4.hdr = skb_checksum_start(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
/* set the tx_flags to indicate the IP protocol type. this is
* required so that checksum header computation below is accurate.
@@ -3686,7 +3688,8 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
u8 prio;
/* is DCB enabled at all? */
- if (vsi->tc_config.numtc == 1)
+ if (vsi->tc_config.numtc == 1 ||
+ i40e_is_tc_mqprio_enabled(vsi->back))
return netdev_pick_tx(netdev, skb, sb_dev);
prio = skb->priority;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4f184c50f6e8..7e9f6a69eb10 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2039,6 +2039,25 @@ static void i40e_del_qch(struct i40e_vf *vf)
}
/**
+ * i40e_vc_get_max_frame_size
+ * @vf: pointer to the VF
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ **/
+static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
+{
+ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
+
+ if (vf->port_vlan_id)
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
* i40e_vc_get_vf_resources_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2139,6 +2158,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
if (vf->lan_vsi_idx) {
vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index cd4e6a22d0f9..9ffbd24d83cb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -324,6 +324,7 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -354,12 +355,17 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_asq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_asq_bufs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
goto init_adminq_exit;
+init_free_asq_bufs:
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
init_adminq_free_rings:
iavf_free_adminq_asq(hw);
@@ -383,6 +389,7 @@ init_adminq_exit:
static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -413,12 +420,16 @@ static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_arq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_arq_bufs;
/* success! */
hw->aq.arq.count = hw->aq.num_arq_entries;
goto init_adminq_exit;
+init_free_arq_bufs:
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
init_adminq_free_rings:
iavf_free_adminq_arq(hw);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 45d097a164ad..0c89f16bf1e2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1077,7 +1077,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
- bool handle_mac = iavf_is_mac_set_handled(netdev, addr->sa_data);
int ret;
if (!is_valid_ether_addr(addr->sa_data))
@@ -1094,10 +1093,9 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
return 0;
}
- if (handle_mac)
- goto done;
-
- ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, false, msecs_to_jiffies(2500));
+ ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
+ iavf_is_mac_set_handled(netdev, addr->sa_data),
+ msecs_to_jiffies(2500));
/* If ret < 0 then it means wait was interrupted.
* If ret == 0 then it means we got a timeout.
@@ -1111,7 +1109,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
if (!ret)
return -EAGAIN;
-done:
if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
return -EACCES;
@@ -2367,7 +2364,7 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
err = iavf_get_vf_config(adapter);
if (err == -EALREADY) {
err = iavf_send_vf_config_msg(adapter);
- goto err_alloc;
+ goto err;
} else if (err == -EINVAL) {
/* We only get -EINVAL if the device is in a very bad
* state or if we've been disabled for previous bad
@@ -2877,6 +2874,11 @@ static void iavf_reset_task(struct work_struct *work)
int i = 0, err;
bool running;
+ /* Detach interface to avoid subsequent NDO callbacks */
+ rtnl_lock();
+ netif_device_detach(netdev);
+ rtnl_unlock();
+
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
@@ -2884,7 +2886,7 @@ static void iavf_reset_task(struct work_struct *work)
if (adapter->state != __IAVF_REMOVE)
queue_work(iavf_wq, &adapter->reset_task);
- return;
+ goto reset_finish;
}
while (!mutex_trylock(&adapter->client_lock))
@@ -2954,7 +2956,6 @@ continue_reset:
if (running) {
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
iavf_napi_disable_all(adapter);
}
@@ -3084,14 +3085,21 @@ continue_reset:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- return;
+ goto reset_finish;
reset_err:
+ if (running) {
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
+ iavf_free_traffic_irqs(adapter);
+ }
+ iavf_disable_vf(adapter);
+
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- if (running)
- iavf_change_state(adapter, __IAVF_RUNNING);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
- iavf_close(netdev);
+reset_finish:
+ rtnl_lock();
+ netif_device_attach(netdev);
+ rtnl_unlock();
}
/**
@@ -4085,8 +4093,17 @@ static int iavf_open(struct net_device *netdev)
return -EIO;
}
- while (!mutex_trylock(&adapter->crit_lock))
+ while (!mutex_trylock(&adapter->crit_lock)) {
+ /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
+ * is already taken and iavf_open is called from an upper
+ * device's notifier reacting on NETDEV_REGISTER event.
+ * We have to leave here to avoid dead lock.
+ */
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
+ return -EBUSY;
+
usleep_range(500, 1000);
+ }
if (adapter->state != __IAVF_DOWN) {
err = -EBUSY;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 06d18797d25a..18b6a702a1d6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
{
u32 head, tail;
+ /* underlying hardware might not allow access and/or always return
+ * 0 for the head/tail registers so just use the cached values
+ */
head = ring->next_to_clean;
- tail = readl(ring->tail);
+ tail = ring->next_to_use;
if (head != tail)
return (head < tail) ?
@@ -1390,7 +1393,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
#endif
struct sk_buff *skb;
- if (!rx_buffer)
+ if (!rx_buffer || !size)
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
@@ -1548,7 +1551,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- if (rx_buffer)
+ if (rx_buffer && size)
rx_buffer->pagecnt_bias++;
break;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 15ee85dc33bd..5a9e6563923e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -269,11 +269,14 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
void iavf_configure_queues(struct iavf_adapter *adapter)
{
struct virtchnl_vsi_queue_config_info *vqci;
- struct virtchnl_queue_pair_info *vqpi;
+ int i, max_frame = adapter->vf_res->max_mtu;
int pairs = adapter->num_active_queues;
- int i, max_frame = IAVF_MAX_RXBUFFER;
+ struct virtchnl_queue_pair_info *vqpi;
size_t len;
+ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
+ max_frame = IAVF_MAX_RXBUFFER;
+
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index cc5b85afd437..841fa149c407 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -684,8 +684,8 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
* ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Rx ring to use
*
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise.
+ * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
+ * present, NULL otherwise.
*/
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
{
@@ -699,23 +699,33 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
}
/**
- * ice_tx_xsk_pool - get XSK buffer pool bound to a ring
- * @ring: Tx ring to use
+ * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
*
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise. Tx equivalent of ice_xsk_pool.
+ * Sets XSK buff pool pointer on XDP ring.
+ *
+ * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
+ * queue id. Reason for doing so is that queue vectors might have assigned more
+ * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
+ * carries a pointer to one of these XDP rings for its own purposes, such as
+ * handling XDP_TX action, therefore we can piggyback here on the
+ * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
*/
-static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
+static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
{
- struct ice_vsi *vsi = ring->vsi;
- u16 qid;
+ struct ice_tx_ring *ring;
- qid = ring->q_index - vsi->alloc_txq;
+ ring = vsi->rx_rings[qid]->xdp_ring;
+ if (!ring)
+ return;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
- return NULL;
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
+ ring->xsk_pool = NULL;
+ return;
+ }
- return xsk_get_pool_from_qid(vsi->netdev, qid);
+ ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 136d7911adb4..1e3243808178 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -7,18 +7,6 @@
#include "ice_dcb_lib.h"
#include "ice_sriov.h"
-static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
-{
- rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
- return !!rx_ring->xdp_buf;
-}
-
-static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
-{
- rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
- return !!rx_ring->rx_buf;
-}
-
/**
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
@@ -519,11 +507,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id);
- kfree(ring->rx_buf);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
- if (!ice_alloc_rx_buf_zc(ring))
- return -ENOMEM;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
@@ -538,8 +523,6 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
- if (!ice_alloc_rx_buf(ring))
- return -ENOMEM;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index 85a94483c2ed..40e678cfb507 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -62,7 +62,7 @@ ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
int result;
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i (rc=%d)\n",
vsi->vsi_num, result);
@@ -86,7 +86,7 @@ ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
int result;
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error clearing promisc mode on VSI %i (rc=%d)\n",
vsi->vsi_num, result);
@@ -109,7 +109,7 @@ ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
int result;
result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n",
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
@@ -132,7 +132,7 @@ ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
int result;
result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i for VID %u (rc=%d)\n",
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a830f7f9aed0..58d483e2f539 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -914,7 +914,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
*/
static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
+ u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
u16 num_txq_per_tc, num_rxq_per_tc;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
@@ -981,23 +981,25 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* at least 1)
*/
if (offset)
- vsi->num_rxq = offset;
+ rx_count = offset;
else
- vsi->num_rxq = num_rxq_per_tc;
+ rx_count = num_rxq_per_tc;
- if (vsi->num_rxq > vsi->alloc_rxq) {
+ if (rx_count > vsi->alloc_rxq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
- vsi->num_rxq, vsi->alloc_rxq);
+ rx_count, vsi->alloc_rxq);
return -EINVAL;
}
- vsi->num_txq = tx_count;
- if (vsi->num_txq > vsi->alloc_txq) {
+ if (tx_count > vsi->alloc_txq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
- vsi->num_txq, vsi->alloc_txq);
+ tx_count, vsi->alloc_txq);
return -EINVAL;
}
+ vsi->num_txq = tx_count;
+ vsi->num_rxq = rx_count;
+
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
/* since there is a chance that num_rxq could have been changed
@@ -1986,8 +1988,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
if (ret)
return ret;
- ice_for_each_xdp_txq(vsi, i)
- vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
+ ice_for_each_rxq(vsi, i)
+ ice_tx_xsk_pool(vsi, i);
return ret;
}
@@ -3181,7 +3183,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
pf = vsi->back;
vtype = vsi->type;
- if (WARN_ON(vtype == ICE_VSI_VF) && !vsi->vf)
+ if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
ice_vsi_init_vlan_ops(vsi);
@@ -3490,6 +3492,7 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
+ u16 new_txq, new_rxq;
u8 netdev_tc = 0;
int i;
@@ -3530,21 +3533,24 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
}
}
- /* Set actual Tx/Rx queue pairs */
- vsi->num_txq = offset + qcount_tx;
- if (vsi->num_txq > vsi->alloc_txq) {
+ new_txq = offset + qcount_tx;
+ if (new_txq > vsi->alloc_txq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
- vsi->num_txq, vsi->alloc_txq);
+ new_txq, vsi->alloc_txq);
return -EINVAL;
}
- vsi->num_rxq = offset + qcount_rx;
- if (vsi->num_rxq > vsi->alloc_rxq) {
+ new_rxq = offset + qcount_rx;
+ if (new_rxq > vsi->alloc_rxq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
- vsi->num_rxq, vsi->alloc_rxq);
+ new_rxq, vsi->alloc_rxq);
return -EINVAL;
}
+ /* Set actual Tx/Rx queue pairs */
+ vsi->num_txq = new_txq;
+ vsi->num_rxq = new_rxq;
+
/* Setup queue TC[0].qmap for given VSI context */
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
@@ -3576,6 +3582,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
+ struct ice_tc_cfg old_tc_cfg;
struct ice_vsi_ctx *ctx;
struct device *dev;
int i, ret = 0;
@@ -3600,6 +3607,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
max_txqs[i] = vsi->num_txq;
}
+ memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
vsi->tc_cfg.ena_tc = ena_tc;
vsi->tc_cfg.numtc = num_tc;
@@ -3616,8 +3624,10 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
else
ret = ice_vsi_setup_q_map(vsi, ctx);
- if (ret)
+ if (ret) {
+ memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
goto out;
+ }
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
@@ -4062,7 +4072,11 @@ int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
if (err && err != -EEXIST)
return err;
- return 0;
+ /* when deleting the last VLAN filter, make sure to disable the VLAN
+ * promisc mode so the filter isn't left by accident
+ */
+ return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index eb40526ee179..e109cb93886b 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -267,8 +267,10 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
promisc_m, 0);
}
+ if (status && status != -EEXIST)
+ return status;
- return status;
+ return 0;
}
/**
@@ -2397,8 +2399,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
return -EBUSY;
}
- ice_unplug_aux_dev(pf);
-
switch (reset) {
case ICE_RESET_PFR:
set_bit(ICE_PFR_REQ, pf->state);
@@ -2579,7 +2579,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
spin_lock_init(&xdp_ring->tx_lock);
for (j = 0; j < xdp_ring->count; j++) {
tx_desc = ICE_TX_DESC(xdp_ring, j);
@@ -2587,13 +2586,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
}
}
- ice_for_each_rxq(vsi, i) {
- if (static_key_enabled(&ice_xdp_locking_key))
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
- else
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
- }
-
return 0;
free_xdp_rings:
@@ -2683,6 +2675,23 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
xdp_rings_rem -= xdp_rings_per_v;
}
+ ice_for_each_rxq(vsi, i) {
+ if (static_key_enabled(&ice_xdp_locking_key)) {
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+ } else {
+ struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
+ struct ice_tx_ring *ring;
+
+ ice_for_each_tx_ring(ring, q_vector->tx) {
+ if (ice_ring_is_xdp(ring)) {
+ vsi->rx_rings[i]->xdp_ring = ring;
+ break;
+ }
+ }
+ }
+ ice_tx_xsk_pool(vsi, i);
+ }
+
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
@@ -2887,10 +2896,18 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
+ /* reallocate Rx queues that are used for zero-copy */
+ xdp_ring_err = ice_realloc_zc_buf(vsi, true);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
+ /* reallocate Rx queues that were used for zero-copy */
+ xdp_ring_err = ice_realloc_zc_buf(vsi, false);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
} else {
/* safe to call even when prog == vsi->xdp_prog as
* dev_xdp_install in net/core/dev.c incremented prog's
@@ -3573,6 +3590,14 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
usleep_range(1000, 2000);
+ ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, vid);
+ if (ret) {
+ netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags |= IFF_ALLMULTI;
+ }
+
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
/* Make sure VLAN delete is successful before updating VLAN
@@ -3886,7 +3911,7 @@ static int ice_init_pf(struct ice_pf *pf)
pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
if (!pf->avail_rxqs) {
- devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
+ bitmap_free(pf->avail_txqs);
pf->avail_txqs = NULL;
return -ENOMEM;
}
@@ -6624,7 +6649,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
*/
int ice_down(struct ice_vsi *vsi)
{
- int i, tx_err, rx_err, link_err = 0, vlan_err = 0;
+ int i, tx_err, rx_err, vlan_err = 0;
WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
@@ -6658,20 +6683,13 @@ int ice_down(struct ice_vsi *vsi)
ice_napi_disable_all(vsi);
- if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
- link_err = ice_force_phys_link_state(vsi, false);
- if (link_err)
- netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
- vsi->vsi_num, link_err);
- }
-
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
- if (tx_err || rx_err || link_err || vlan_err) {
+ if (tx_err || rx_err || vlan_err) {
netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
return -EIO;
@@ -6833,6 +6851,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+
if (vsi->type == ICE_VSI_PF) {
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
@@ -8865,6 +8885,16 @@ int ice_stop(struct net_device *netdev)
return -EBUSY;
}
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
+ int link_err = ice_force_phys_link_state(vsi, false);
+
+ if (link_err) {
+ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
+ vsi->vsi_num, link_err);
+ return -EIO;
+ }
+ }
+
ice_vsi_close(vsi);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index fce204693dbb..3808034f7e7e 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -4445,6 +4445,13 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
goto free_fltr_list;
list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
+ /* Avoid enabling or disabling VLAN zero twice when in double
+ * VLAN mode
+ */
+ if (ice_is_dvm_ena(hw) &&
+ list_itr->fltr_info.l_data.vlan.tpid == 0)
+ continue;
+
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
if (rm_vlan_promisc)
status = ice_clear_vsi_promisc(hw, vsi_handle,
@@ -4452,7 +4459,7 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
else
status = ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id);
- if (status)
+ if (status && status != -EEXIST)
break;
}
@@ -4971,7 +4978,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
- bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
+ bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
/* For each profile we are going to associate the recipe with, add the
* recipes that are associated with that profile. This will give us
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 836dce840712..97453d1dfafe 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -610,7 +610,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (test_bit(ICE_VSI_DOWN, vsi->state))
return -ENETDOWN;
- if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
+ if (!ice_is_xdp_ena_vsi(vsi))
return -ENXIO;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -621,6 +621,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdp_ring = vsi->xdp_rings[queue_index];
spin_lock(&xdp_ring->tx_lock);
} else {
+ /* Generally, should not happen */
+ if (unlikely(queue_index >= vsi->num_xdp_txq))
+ return -ENXIO;
xdp_ring = vsi->xdp_rings[queue_index];
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 8fd7c3e37f5e..0abeed092de1 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -571,8 +571,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
- if (WARN_ON(!vsi))
+ if (!vsi) {
+ dev_dbg(dev, "VF is already removed\n");
return -EINVAL;
+ }
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
ice_vsi_stop_all_rx_rings(vsi);
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
@@ -762,13 +764,16 @@ static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
{
struct ice_vsi_vlan_ops *vlan_ops;
- int err;
+ int err = 0;
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
- err = vlan_ops->ena_tx_filtering(vsi);
- if (err)
- return err;
+ /* Allow VF with VLAN 0 only to send all tagged traffic */
+ if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vlan_ops->ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
return ice_cfg_mac_antispoof(vsi, true);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 094e3c97a1ea..2b4c791b6cba 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -2288,6 +2288,15 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
/* Enable VLAN filtering on first non-zero VLAN */
if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
+ if (vf->spoofchk) {
+ status = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n",
+ vid, status);
+ goto error_param;
+ }
+ }
if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
@@ -2333,8 +2342,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
}
/* Disable VLAN filtering when only VLAN 0 is left */
- if (!ice_vsi_has_non_zero_vlans(vsi))
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ vsi->inner_vlan_ops.dis_tx_filtering(vsi);
vsi->inner_vlan_ops.dis_rx_filtering(vsi);
+ }
if (vlan_promisc)
ice_vf_dis_vlan_promisc(vsi, &vlan);
@@ -2838,6 +2849,13 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
if (vlan_promisc)
ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
vc_vlan = &vlan_fltr->inner;
@@ -2853,8 +2871,17 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
/* no support for VLAN promiscuous on inner VLAN unless
* we are in Single VLAN Mode (SVM)
*/
- if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
- ice_vf_dis_vlan_promisc(vsi, &vlan);
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+ }
}
}
@@ -2931,6 +2958,13 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
if (err)
return err;
}
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
vc_vlan = &vlan_fltr->inner;
@@ -2946,10 +2980,19 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
/* no support for VLAN promiscuous on inner VLAN unless
* we are in Single VLAN Mode (SVM)
*/
- if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
- err = ice_vf_ena_vlan_promisc(vsi, &vlan);
- if (err)
- return err;
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc) {
+ err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (err)
+ return err;
+ }
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid) {
+ err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 49ba8bfdbf04..03ce85f6e6df 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -192,6 +192,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
+ ice_clean_rx_ring(rx_ring);
ice_qvec_toggle_napi(vsi, q_vector, false);
ice_qp_clean_rings(vsi, q_idx);
@@ -243,7 +244,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
+ ice_tx_xsk_pool(vsi, q_idx);
}
err = ice_vsi_cfg_rxq(rx_ring);
@@ -317,6 +318,62 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
}
/**
+ * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
+ * @rx_ring: Rx ring
+ * @pool_present: is pool for XSK present
+ *
+ * Try allocating memory and return ENOMEM, if failed to allocate.
+ * If allocation was successful, substitute buffer with allocated one.
+ * Returns 0 on success, negative on failure
+ */
+static int
+ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
+{
+ size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
+ sizeof(*rx_ring->rx_buf);
+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
+
+ if (!sw_ring)
+ return -ENOMEM;
+
+ if (pool_present) {
+ kfree(rx_ring->rx_buf);
+ rx_ring->rx_buf = NULL;
+ rx_ring->xdp_buf = sw_ring;
+ } else {
+ kfree(rx_ring->xdp_buf);
+ rx_ring->xdp_buf = NULL;
+ rx_ring->rx_buf = sw_ring;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
+ * @vsi: Current VSI
+ * @zc: is zero copy set
+ *
+ * Reallocate buffer for rx_rings that might be used by XSK.
+ * XDP requires more memory, than rx_buf provides.
+ * Returns 0 on success, negative on failure
+ */
+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
+{
+ struct ice_rx_ring *rx_ring;
+ unsigned long q;
+
+ for_each_set_bit(q, vsi->af_xdp_zc_qps,
+ max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
+ rx_ring = vsi->rx_rings[q];
+ if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
* ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
* @vsi: Current VSI
* @pool: buffer pool to enable/associate to a ring, NULL to disable
@@ -329,6 +386,12 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
bool if_running, pool_present = !!pool;
int ret = 0, pool_failure = 0;
+ if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
+ netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
+ pool_failure = -EINVAL;
+ goto failure;
+ }
+
if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
!is_power_of_2(vsi->tx_rings[qid]->count)) {
netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
@@ -339,11 +402,17 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
if (if_running) {
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
+
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
goto xsk_pool_if_up;
}
+
+ ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
+ if (ret)
+ goto xsk_pool_if_up;
}
pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
@@ -353,7 +422,7 @@ xsk_pool_if_up:
if (if_running) {
ret = ice_qp_ena(vsi, qid);
if (!ret && pool_present)
- napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
+ napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
else if (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
@@ -944,13 +1013,13 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
if (!ice_is_xdp_ena_vsi(vsi))
return -EINVAL;
- if (queue_id >= vsi->num_txq)
+ if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
return -EINVAL;
- if (!vsi->xdp_rings[queue_id]->xsk_pool)
- return -EINVAL;
+ ring = vsi->rx_rings[queue_id]->xdp_ring;
- ring = vsi->xdp_rings[queue_id];
+ if (!ring->xsk_pool)
+ return -EINVAL;
/* The idea here is that if NAPI is running, mark a miss, so
* it will run again. If not, trigger an interrupt and
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 21faec8e97db..4edbe81eb646 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -27,6 +27,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget);
+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
#else
static inline bool
ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
@@ -72,5 +73,12 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
+
+static inline int
+ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
+ bool __always_unused zc)
+{
+ return 0;
+}
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 2d3daf022651..015b78144114 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -664,6 +664,8 @@ struct igb_adapter {
struct igb_mac_addr *mac_table;
struct vf_mac_filter vf_macs;
struct vf_mac_filter *vf_mac_list;
+ /* lock for VF resources */
+ spinlock_t vfs_lock;
};
/* flags controlling PTP/1588 function */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d8b836a85cc3..2796e81d2726 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3637,6 +3637,7 @@ static int igb_disable_sriov(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
@@ -3649,12 +3650,13 @@ static int igb_disable_sriov(struct pci_dev *pdev)
pci_disable_sriov(pdev);
msleep(500);
}
-
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
kfree(adapter->vf_mac_list);
adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
wrfl();
msleep(100);
@@ -3814,7 +3816,9 @@ static void igb_remove(struct pci_dev *pdev)
igb_release_hw_control(adapter);
#ifdef CONFIG_PCI_IOV
+ rtnl_lock();
igb_disable_sriov(pdev);
+ rtnl_unlock();
#endif
unregister_netdev(netdev);
@@ -3974,6 +3978,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
spin_lock_init(&adapter->nfc_lock);
spin_lock_init(&adapter->stats64_lock);
+
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
@@ -7958,8 +7965,10 @@ unlock:
static void igb_msg_task(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
/* process any reset requests */
if (!igb_check_for_rst(hw, vf))
@@ -7973,6 +7982,7 @@ static void igb_msg_task(struct igb_adapter *adapter)
if (!igb_check_for_ack(hw, vf))
igb_rcv_ack_from_vf(adapter, vf);
}
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 9f06896a049b..f8605f57bd06 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1214,7 +1214,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
struct cyclecounter cc;
unsigned long flags;
u32 incval = 0;
- u32 tsauxc = 0;
u32 fuse0 = 0;
/* For some of the boards below this mask is technically incorrect.
@@ -1249,18 +1248,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
cc.read = ixgbe_ptp_read_X550;
-
- /* enable SYSTIME counter */
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
- tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
- tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
- IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
-
- IXGBE_WRITE_FLUSH(hw);
break;
case ixgbe_mac_X540:
cc.read = ixgbe_ptp_read_82599;
@@ -1293,6 +1280,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
+ * @adapter: the ixgbe private board structure
+ *
+ * Initialize and start the SYSTIME registers.
+ */
+static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 tsauxc;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ case ixgbe_mac_X550:
+ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+
+ /* Reset interrupt settings */
+ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
+
+ /* Activate the SYSTIME counter */
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
+ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB:
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+ break;
+ default:
+ /* Other devices aren't supported */
+ return;
+ };
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
* ixgbe_ptp_reset
* @adapter: the ixgbe private board structure
*
@@ -1318,6 +1349,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
ixgbe_ptp_start_cyclecounter(adapter);
+ ixgbe_ptp_init_systime(adapter);
+
spin_lock_irqsave(&adapter->tmreg_lock, flags);
timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
ktime_to_ns(ktime_get_real()));
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 5edb68a8aab1..57f27cc7724e 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -193,6 +193,7 @@ static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int
ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
if (!ch->rx_buff[ch->dma.desc]) {
+ ch->rx_buff[ch->dma.desc] = buf;
ret = -ENOMEM;
goto skip;
}
@@ -239,6 +240,12 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
}
skb = build_skb(buf, priv->rx_skb_size);
+ if (!skb) {
+ skb_free_frag(buf);
+ net_dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
skb_reserve(skb, NET_SKB_PAD);
skb_put(skb, len);
@@ -288,7 +295,7 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
continue;
if (ret != XRX200_DMA_PACKET_COMPLETE)
- return ret;
+ break;
rx++;
} else {
break;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 4a3baa7e0142..0eec05d905eb 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -700,10 +700,10 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
{
- struct dentry *mvpp2_dir, *mvpp2_root;
+ static struct dentry *mvpp2_root;
+ struct dentry *mvpp2_dir;
int ret, i;
- mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
if (!mvpp2_root)
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 6809b8b4c556..7282a826d81e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2580,6 +2580,12 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
rvu_reset_lmt_map_tbl(rvu, pcifunc);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ /* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
+ * entries, check and free the MCAM entries explicitly to avoid leak.
+ * Since LF is detached use LF number as -1.
+ */
+ rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
+
mutex_unlock(&rvu->flr_lock);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 583ead4dd246..1e348fd0d930 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -1097,6 +1097,9 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
/* Delete multicast and promisc MCAM entries */
@@ -1136,6 +1139,9 @@ bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bo
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
/* Enables only broadcast match entry. Promisc/Allmulti are enabled
* in set_rx_mode mbox handler.
*/
@@ -1675,7 +1681,7 @@ static void npc_load_kpu_profile(struct rvu *rvu)
* Firmware database method.
* Default KPU profile.
*/
- if (!request_firmware(&fw, kpu_profile, rvu->dev)) {
+ if (!request_firmware_direct(&fw, kpu_profile, rvu->dev)) {
dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
kpu_profile);
rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
@@ -1939,6 +1945,7 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
{
+ struct npc_mcam_kex *mkex = rvu->kpu.mkex;
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
u64 nibble_ena, rx_kex, tx_kex;
@@ -1951,15 +1958,15 @@ static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
mcam->counters.max--;
mcam->rx_miss_act_cntr = mcam->counters.max;
- rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
- tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
+ rx_kex = mkex->keyx_cfg[NIX_INTF_RX];
+ tx_kex = mkex->keyx_cfg[NIX_INTF_TX];
nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena);
if (nibble_ena) {
tx_kex &= ~NPC_PARSE_NIBBLE;
tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
- npc_mkex_default.keyx_cfg[NIX_INTF_TX] = tx_kex;
+ mkex->keyx_cfg[NIX_INTF_TX] = tx_kex;
}
/* Configure RX interfaces */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index a400aa22da79..7c4e1acd0f77 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -467,7 +467,8 @@ do { \
NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
- NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ /* SMAC follows the DMAC(which is 6 bytes) */
+ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
/* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index fb8db5888d2f..d686c7b6252f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -632,6 +632,12 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
@@ -641,11 +647,12 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
- req->num_regs++;
- req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
-
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL1) {
/* Default config for TL1.
* For VF this is always ignored.
@@ -1591,6 +1598,8 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
for (schq = 0; schq < rsp->schq[lvl]; schq++)
pf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq];
+
+ pf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
}
EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index e795f9ee76dd..b28029cc4316 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -195,6 +195,7 @@ struct otx2_hw {
u16 sqb_size;
/* NIX */
+ u8 txschq_link_cfg_lvl;
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
u32 dwrr_mtu;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index ede3e53b9790..a895862b4821 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -368,6 +368,7 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
if (!sw->np)
return 0;
+ of_node_get(sw->np);
ports = of_find_node_by_name(sw->np, "ports");
for_each_child_of_node(ports, node) {
@@ -417,6 +418,7 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
}
out:
+ of_node_put(node);
of_node_put(ports);
return err;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index f538a749ebd4..59470d99f522 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -872,6 +872,7 @@ static void prestera_pci_remove(struct pci_dev *pdev)
static const struct pci_device_id prestera_pci_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC80C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xCC1E) },
{ }
};
MODULE_DEVICE_TABLE(pci, prestera_pci_devices);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index d9426b01f462..b344632beadd 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1458,7 +1458,7 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
static bool mtk_page_pool_enabled(struct mtk_eth *eth)
{
- return !eth->hwlro;
+ return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
}
static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
@@ -1732,7 +1732,7 @@ static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
case XDP_TX: {
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
- if (mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
+ if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
act = XDP_DROP;
break;
@@ -1891,10 +1891,19 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->dev = netdev;
bytes += skb->len;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ if (hash != MTK_RXD5_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
rxdcsum = &trxd.rxd3;
- else
+ } else {
+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+ if (hash != MTK_RXD4_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
rxdcsum = &trxd.rxd4;
+ }
if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1902,16 +1911,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
- if (hash != MTK_RXD4_FOE_ENTRY) {
- hash = jhash_1word(hash, 0);
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
- }
-
reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- mtk_ppe_check_skb(eth->ppe, skb,
- trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
+ mtk_ppe_check_skb(eth->ppe, skb, hash);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 7405c97cda66..ecf85e9ed824 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -314,6 +314,11 @@
#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
#define RX_DMA_SPECIAL_TAG BIT(22)
+/* PDMA descriptor rxd5 */
+#define MTK_RXD5_FOE_ENTRY GENMASK(14, 0)
+#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
+#define MTK_RXD5_SRC_PORT GENMASK(29, 26)
+
#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index dab8f3f771f8..cfe804bc8d20 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -412,7 +412,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
if (entry->hash != 0xffff) {
ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
- MTK_FOE_STATE_BIND);
+ MTK_FOE_STATE_UNBIND);
dma_wmb();
}
entry->hash = 0xffff;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 1f5cf1c9a947..69ffce04d630 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -293,6 +293,9 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
if (!ppe)
return;
+ if (hash > MTK_PPE_HASH_MASK)
+ return;
+
now = (u16)jiffies;
diff = now - ppe->foe_check_time[hash];
if (diff < HZ / 10)
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 42c96c9d7fb1..dcb9eb1899ce 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -463,7 +463,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = min(
bitmap_weight(actv_ports.ports, dev->caps.num_ports),
- dev->caps.num_ports);
+ (unsigned int) dev->caps.num_ports);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
size = dev->caps.function_caps; /* set PF behaviours */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
index 37522352e4b2..c8e5ca65bb6e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -79,6 +79,10 @@ tc_act_police_offload(struct mlx5e_priv *priv,
struct mlx5e_flow_meter_handle *meter;
int err = 0;
+ err = mlx5e_policer_validate(&fl_act->action, act, fl_act->extack);
+ if (err)
+ return err;
+
err = fill_meter_params_from_act(act, &params);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index d87bbb0be7c8..e6f64d890fb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -506,7 +506,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
int err;
attr.ttl = tun_key->ttl;
- attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ attr.fl.fl6.flowlabel = ip6_make_flowinfo(tun_key->tos, tun_key->label);
attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
attr.fl.fl6.saddr = tun_key->u.ipv6.src;
@@ -620,7 +620,7 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
attr.ttl = tun_key->ttl;
- attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ attr.fl.fl6.flowlabel = ip6_make_flowinfo(tun_key->tos, tun_key->label);
attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
attr.fl.fl6.saddr = tun_key->u.ipv6.src;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 6b6c7044b64a..3a1f76eac542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -246,7 +246,7 @@ static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv
static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
struct list_head *list, int size)
{
- struct mlx5e_ktls_offload_context_tx *obj;
+ struct mlx5e_ktls_offload_context_tx *obj, *n;
struct mlx5e_async_ctx *bulk_async;
int i;
@@ -255,7 +255,7 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
return;
i = 0;
- list_for_each_entry(obj, list, list_node) {
+ list_for_each_entry_safe(obj, n, list, list_node) {
mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
i++;
}
@@ -808,6 +808,7 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
+ struct net_device *tls_netdev;
struct tls_context *tls_ctx;
int datalen;
u32 seq;
@@ -819,7 +820,12 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
mlx5e_tx_mpwqe_ensure_complete(sq);
tls_ctx = tls_get_ctx(skb->sk);
- if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
+ tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
+ /* Don't WARN on NULL: if tls_device_down is running in parallel,
+ * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+ * true. Rather continue processing this packet.
+ */
+ if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
goto err_out;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index e2a9b9be5c1f..e0ce5a233d0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1395,10 +1395,11 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
}
return fs;
-err_free_fs:
- kvfree(fs);
+
err_free_vlan:
mlx5e_fs_vlan_free(fs);
+err_free_fs:
+ kvfree(fs);
err:
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d858667736a3..02eb2f0fa2ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3682,7 +3682,9 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable)
int err = 0;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
- if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
+ int tc_flag = mlx5e_is_uplink_rep(priv) ? MLX5_TC_FLAG(ESW_OFFLOAD) :
+ MLX5_TC_FLAG(NIC_OFFLOAD);
+ if (!enable && mlx5e_tc_num_filters(priv, tc_flag)) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
@@ -4769,14 +4771,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
/* RQ */
mlx5e_build_rq_params(mdev, params);
- /* HW LRO */
- if (MLX5_CAP_ETH(mdev, lro_cap) &&
- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- /* No XSK params: checking the availability of striding RQ in general. */
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- params->packet_merge.type = slow_pci_heuristic(mdev) ?
- MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO;
- }
params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 4c1599de652c..759f7d3c2cfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -662,6 +662,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->mqprio.num_tc = 1;
params->tunneled_offload_en = false;
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ params->vlan_strip_disable = true;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
}
@@ -696,6 +698,13 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
+ return -ENOMEM;
+ }
+
mlx5e_build_rep_params(netdev);
mlx5e_timestamp_init(priv);
@@ -708,12 +717,21 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
+ return -ENOMEM;
+ }
+
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
mlx5e_vxlan_set_netdev_info(priv);
- return mlx5e_init_rep(mdev, netdev);
+ mlx5e_build_rep_params(netdev);
+ mlx5e_timestamp_init(priv);
+ return 0;
}
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
@@ -836,13 +854,6 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->fs = mlx5e_fs_init(priv->profile, mdev,
- !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
- if (!priv->fs) {
- netdev_err(priv->netdev, "FS allocation failed\n");
- return -ENOMEM;
- }
-
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index ed73132129aa..a9f4c652f859 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -427,7 +427,8 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
dest[dest_idx].vport.vhca_id =
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (mlx5_lag_mpesw_is_activated(esw->dev))
+ if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
+ mlx5_lag_mpesw_is_activated(esw->dev))
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
@@ -3115,8 +3116,10 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
MLX5_VPORT_UC_ADDR_CHANGE);
- if (err)
+ if (err) {
+ devl_unlock(devlink);
return;
+ }
}
esw->esw_funcs.num_vfs = new_num_vfs;
devl_unlock(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 0f34e3c80d1f..065102278cb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1067,30 +1067,32 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
struct net_device *netdev)
{
unsigned int fn = mlx5_get_dev_index(dev);
+ unsigned long flags;
if (fn >= ldev->ports)
return;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
ldev->tracker.netdev_state[fn].tx_enabled = 0;
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
}
static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
struct net_device *netdev)
{
+ unsigned long flags;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
for (i = 0; i < ldev->ports; i++) {
if (ldev->pf[i].netdev == netdev) {
ldev->pf[i].netdev = NULL;
break;
}
}
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
}
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
@@ -1234,7 +1236,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
mlx5_ldev_add_netdev(ldev, dev, netdev);
for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].dev)
+ if (!ldev->pf[i].netdev)
break;
if (i >= ldev->ports)
@@ -1246,12 +1248,13 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1260,12 +1263,13 @@ EXPORT_SYMBOL(mlx5_lag_is_roce);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_active(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1274,13 +1278,14 @@ EXPORT_SYMBOL(mlx5_lag_is_active);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_active(ldev) &&
dev == ldev->pf[MLX5_LAG_P1].dev;
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1289,12 +1294,13 @@ EXPORT_SYMBOL(mlx5_lag_is_master);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1303,13 +1309,14 @@ EXPORT_SYMBOL(mlx5_lag_is_sriov);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_sriov(ldev) &&
test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1352,9 +1359,10 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
{
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
+ unsigned long flags;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
@@ -1373,7 +1381,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
dev_hold(ndev);
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return ndev;
}
@@ -1383,10 +1391,11 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
u8 port = 0;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
@@ -1401,7 +1410,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
port = ldev->v2p_map[port * ldev->buckets];
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return port;
}
EXPORT_SYMBOL(mlx5_lag_get_slave_port);
@@ -1422,8 +1431,9 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
{
struct mlx5_core_dev *peer_dev = NULL;
struct mlx5_lag *ldev;
+ unsigned long flags;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!ldev)
goto unlock;
@@ -1433,7 +1443,7 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
ldev->pf[MLX5_LAG_P1].dev;
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return peer_dev;
}
EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
@@ -1446,6 +1456,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
struct mlx5_core_dev **mdev;
struct mlx5_lag *ldev;
+ unsigned long flags;
int num_ports;
int ret, i, j;
void *out;
@@ -1462,7 +1473,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
memset(values, 0, sizeof(*values) * num_counters);
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = ldev->ports;
@@ -1472,7 +1483,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
}
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
for (i = 0; i < num_ports; ++i) {
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index bec8d6d0b5f6..89b2d9cea33f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -494,6 +494,24 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
return err;
}
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ &val);
+
+ if (!err)
+ return val.vbool;
+
+ mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
+ return MLX5_CAP_GEN(dev, roce);
+}
+EXPORT_SYMBOL(mlx5_is_roce_on);
+
static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
{
void *set_hca_cap;
@@ -597,7 +615,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
if (MLX5_CAP_GEN(dev, roce_rw_supported))
- MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
+ MLX5_SET(cmd_hca_cap, set_hca_cap, roce,
+ mlx5_is_roce_on(dev));
max_uc_list = max_uc_list_get_devlink_param(dev);
if (max_uc_list > 0)
@@ -623,7 +642,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
*/
static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
{
- return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) ||
+ return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_on(dev)) ||
(!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce));
}
@@ -1530,7 +1549,9 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
+ lockdep_register_key(&dev->lock_key);
mutex_init(&dev->intf_state_mutex);
+ lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
@@ -1597,6 +1618,7 @@ err_timeout_init:
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
mutex_destroy(&dev->intf_state_mutex);
+ lockdep_unregister_key(&dev->lock_key);
return err;
}
@@ -1618,6 +1640,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
mutex_destroy(&dev->intf_state_mutex);
+ lockdep_unregister_key(&dev->lock_key);
}
static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index ec76a8b1acc1..60596357bfc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -376,8 +376,8 @@ retry:
goto out_dropped;
}
}
+ err = mlx5_cmd_check(dev, err, in, out);
if (err) {
- err = mlx5_cmd_check(dev, err, in, out);
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
goto out_dropped;
@@ -524,10 +524,13 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
dev->priv.reclaim_pages_discard += npages;
}
/* if triggered by FW event and failed by FW then ignore */
- if (event && err == -EREMOTEIO)
+ if (event && err == -EREMOTEIO) {
err = 0;
+ goto out_free;
+ }
+
+ err = mlx5_cmd_check(dev, err, in, out);
if (err) {
- err = mlx5_cmd_check(dev, err, in, out);
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index ee2e1b7c1310..c0e6c487c63c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -159,11 +159,11 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
devl_lock(devlink);
err = mlx5_device_enable_sriov(dev, num_vfs);
+ devl_unlock(devlink);
if (err) {
mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
return err;
}
- devl_unlock(devlink);
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
index 5fdf9b7179f5..5a1027b07215 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -75,6 +75,7 @@ struct mlxbf_gige {
struct net_device *netdev;
struct platform_device *pdev;
void __iomem *mdio_io;
+ void __iomem *clk_io;
struct mii_bus *mdiobus;
spinlock_t lock; /* for packet processing indices */
u16 rx_q_entries;
@@ -137,7 +138,8 @@ enum mlxbf_gige_res {
MLXBF_GIGE_RES_MDIO9,
MLXBF_GIGE_RES_GPIO0,
MLXBF_GIGE_RES_LLU,
- MLXBF_GIGE_RES_PLU
+ MLXBF_GIGE_RES_PLU,
+ MLXBF_GIGE_RES_CLK
};
/* Version of register data returned by mlxbf_gige_get_regs() */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
index 2e6c1b7af096..4aeb927c3715 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
@@ -22,10 +22,23 @@
#include <linux/property.h>
#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
#define MLXBF_GIGE_MDIO_GW_OFFSET 0x0
#define MLXBF_GIGE_MDIO_CFG_OFFSET 0x4
+#define MLXBF_GIGE_MDIO_FREQ_REFERENCE 156250000ULL
+#define MLXBF_GIGE_MDIO_COREPLL_CONST 16384ULL
+#define MLXBF_GIGE_MDC_CLK_NS 400
+#define MLXBF_GIGE_MDIO_PLL_I1CLK_REG1 0x4
+#define MLXBF_GIGE_MDIO_PLL_I1CLK_REG2 0x8
+#define MLXBF_GIGE_MDIO_CORE_F_SHIFT 0
+#define MLXBF_GIGE_MDIO_CORE_F_MASK GENMASK(25, 0)
+#define MLXBF_GIGE_MDIO_CORE_R_SHIFT 26
+#define MLXBF_GIGE_MDIO_CORE_R_MASK GENMASK(31, 26)
+#define MLXBF_GIGE_MDIO_CORE_OD_SHIFT 0
+#define MLXBF_GIGE_MDIO_CORE_OD_MASK GENMASK(3, 0)
+
/* Support clause 22 */
#define MLXBF_GIGE_MDIO_CL22_ST1 0x1
#define MLXBF_GIGE_MDIO_CL22_WRITE 0x1
@@ -50,27 +63,76 @@
#define MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16)
#define MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24)
+#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+
+#define MLXBF_GIGE_BF2_COREPLL_ADDR 0x02800c30
+#define MLXBF_GIGE_BF2_COREPLL_SIZE 0x0000000c
+
+static struct resource corepll_params[] = {
+ [MLXBF_GIGE_VERSION_BF2] = {
+ .start = MLXBF_GIGE_BF2_COREPLL_ADDR,
+ .end = MLXBF_GIGE_BF2_COREPLL_ADDR + MLXBF_GIGE_BF2_COREPLL_SIZE - 1,
+ .name = "COREPLL_RES"
+ },
+};
+
+/* Returns core clock i1clk in Hz */
+static u64 calculate_i1clk(struct mlxbf_gige *priv)
+{
+ u8 core_od, core_r;
+ u64 freq_output;
+ u32 reg1, reg2;
+ u32 core_f;
+
+ reg1 = readl(priv->clk_io + MLXBF_GIGE_MDIO_PLL_I1CLK_REG1);
+ reg2 = readl(priv->clk_io + MLXBF_GIGE_MDIO_PLL_I1CLK_REG2);
+
+ core_f = (reg1 & MLXBF_GIGE_MDIO_CORE_F_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_F_SHIFT;
+ core_r = (reg1 & MLXBF_GIGE_MDIO_CORE_R_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_R_SHIFT;
+ core_od = (reg2 & MLXBF_GIGE_MDIO_CORE_OD_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_OD_SHIFT;
+
+ /* Compute PLL output frequency as follow:
+ *
+ * CORE_F / 16384
+ * freq_output = freq_reference * ----------------------------
+ * (CORE_R + 1) * (CORE_OD + 1)
+ */
+ freq_output = div_u64((MLXBF_GIGE_MDIO_FREQ_REFERENCE * core_f),
+ MLXBF_GIGE_MDIO_COREPLL_CONST);
+ freq_output = div_u64(freq_output, (core_r + 1) * (core_od + 1));
+
+ return freq_output;
+}
+
/* Formula for encoding the MDIO period. The encoded value is
* passed to the MDIO config register.
*
- * mdc_clk = 2*(val + 1)*i1clk
+ * mdc_clk = 2*(val + 1)*(core clock in sec)
*
- * 400 ns = 2*(val + 1)*(((1/430)*1000) ns)
+ * i1clk is in Hz:
+ * 400 ns = 2*(val + 1)*(1/i1clk)
*
- * val = (((400 * 430 / 1000) / 2) - 1)
+ * val = (((400/10^9) / (1/i1clk) / 2) - 1)
+ * val = (400/2 * i1clk)/10^9 - 1
*/
-#define MLXBF_GIGE_I1CLK_MHZ 430
-#define MLXBF_GIGE_MDC_CLK_NS 400
+static u8 mdio_period_map(struct mlxbf_gige *priv)
+{
+ u8 mdio_period;
+ u64 i1clk;
-#define MLXBF_GIGE_MDIO_PERIOD (((MLXBF_GIGE_MDC_CLK_NS * MLXBF_GIGE_I1CLK_MHZ / 1000) / 2) - 1)
+ i1clk = calculate_i1clk(priv);
-#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, \
- MLXBF_GIGE_MDIO_PERIOD) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+ mdio_period = div_u64((MLXBF_GIGE_MDC_CLK_NS >> 1) * i1clk, 1000000000) - 1;
+
+ return mdio_period;
+}
static u32 mlxbf_gige_mdio_create_cmd(u16 data, int phy_add,
int phy_reg, u32 opcode)
@@ -117,6 +179,9 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
/* Only return ad bits of the gw register */
ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
+ /* The MDIO lock is set on read. To release it, clear gw register */
+ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
return ret;
}
@@ -124,9 +189,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
int phy_reg, u16 val)
{
struct mlxbf_gige *priv = bus->priv;
+ u32 temp;
u32 cmd;
int ret;
- u32 temp;
if (phy_reg & MII_ADDR_C45)
return -EOPNOTSUPP;
@@ -141,21 +206,50 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
5, 1000000);
+ /* The MDIO lock is set on read. To release it, clear gw register */
+ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
return ret;
}
+static void mlxbf_gige_mdio_cfg(struct mlxbf_gige *priv)
+{
+ u8 mdio_period;
+ u32 val;
+
+ mdio_period = mdio_period_map(priv);
+
+ val = MLXBF_GIGE_MDIO_CFG_VAL;
+ val |= FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
+ writel(val, priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+}
+
int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
{
struct device *dev = &pdev->dev;
+ struct resource *res;
int ret;
priv->mdio_io = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MDIO9);
if (IS_ERR(priv->mdio_io))
return PTR_ERR(priv->mdio_io);
- /* Configure mdio parameters */
- writel(MLXBF_GIGE_MDIO_CFG_VAL,
- priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+ /* clk resource shared with other drivers so cannot use
+ * devm_platform_ioremap_resource
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_CLK);
+ if (!res) {
+ /* For backward compatibility with older ACPI tables, also keep
+ * CLK resource internal to the driver.
+ */
+ res = &corepll_params[MLXBF_GIGE_VERSION_BF2];
+ }
+
+ priv->clk_io = devm_ioremap(dev, res->start, resource_size(res));
+ if (IS_ERR(priv->clk_io))
+ return PTR_ERR(priv->clk_io);
+
+ mlxbf_gige_mdio_cfg(priv);
priv->mdiobus = devm_mdiobus_alloc(dev);
if (!priv->mdiobus) {
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index 5fb33c9294bf..7be3a793984d 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -8,6 +8,8 @@
#ifndef __MLXBF_GIGE_REGS_H__
#define __MLXBF_GIGE_REGS_H__
+#define MLXBF_GIGE_VERSION 0x0000
+#define MLXBF_GIGE_VERSION_BF2 0x0
#define MLXBF_GIGE_STATUS 0x0010
#define MLXBF_GIGE_STATUS_READY BIT(0)
#define MLXBF_GIGE_INT_STATUS 0x0028
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index d9bf584234a6..bb1cd4bae82e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -328,7 +328,6 @@ static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
- struct devlink *devlink = priv_to_devlink(mlxsw_m->core);
u8 last_module = max_ports;
int i;
int err;
@@ -357,7 +356,6 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
}
/* Create port objects for each valid entry */
- devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
if (mlxsw_m->module_to_port[i] > 0) {
err = mlxsw_m_port_create(mlxsw_m,
@@ -367,7 +365,6 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
goto err_module_to_port_create;
}
}
- devl_unlock(devlink);
return 0;
@@ -377,7 +374,6 @@ err_module_to_port_create:
mlxsw_m_port_remove(mlxsw_m,
mlxsw_m->module_to_port[i]);
}
- devl_unlock(devlink);
i = max_ports;
err_module_to_port_map:
for (i--; i > 0; i--)
@@ -390,10 +386,8 @@ err_module_to_port_alloc:
static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_m->core);
int i;
- devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
if (mlxsw_m->module_to_port[i] > 0) {
mlxsw_m_port_remove(mlxsw_m,
@@ -401,7 +395,6 @@ static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
mlxsw_m_port_module_unmap(mlxsw_m, i);
}
}
- devl_unlock(devlink);
kfree(mlxsw_m->module_to_port);
kfree(mlxsw_m->ports);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 1e240cdd9cbd..30c7b0e15721 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1897,9 +1897,9 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
- mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 2e0b704b8a31..7b01b9c20722 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -46,6 +46,7 @@ struct mlxsw_sp2_ptp_state {
* enabled.
*/
struct hwtstamp_config config;
+ struct mutex lock; /* Protects 'config' and HW configuration. */
};
struct mlxsw_sp1_ptp_key {
@@ -1374,6 +1375,7 @@ struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
goto err_ptp_traps_set;
refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
+ mutex_init(&ptp_state->lock);
return &ptp_state->common;
err_ptp_traps_set:
@@ -1388,6 +1390,7 @@ void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ mutex_destroy(&ptp_state->lock);
mlxsw_sp_ptp_traps_unset(mlxsw_sp);
kfree(ptp_state);
}
@@ -1461,7 +1464,10 @@ int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+ mutex_lock(&ptp_state->lock);
*config = ptp_state->config;
+ mutex_unlock(&ptp_state->lock);
+
return 0;
}
@@ -1523,6 +1529,9 @@ mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
return -EINVAL;
}
+ if ((ing_types && !egr_types) || (!ing_types && egr_types))
+ return -EINVAL;
+
*p_ing_types = ing_types;
*p_egr_types = egr_types;
return 0;
@@ -1574,8 +1583,6 @@ static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
- ASSERT_RTNL();
-
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
@@ -1597,8 +1604,6 @@ static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
- ASSERT_RTNL();
-
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
@@ -1618,16 +1623,20 @@ err_ptp_disable:
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct hwtstamp_config *config)
{
+ struct mlxsw_sp2_ptp_state *ptp_state;
enum hwtstamp_rx_filters rx_filter;
struct hwtstamp_config new_config;
u16 new_ing_types, new_egr_types;
bool ptp_enabled;
int err;
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+ mutex_lock(&ptp_state->lock);
+
err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
&new_egr_types, &rx_filter);
if (err)
- return err;
+ goto err_get_message_types;
new_config.flags = config->flags;
new_config.tx_type = config->tx_type;
@@ -1640,11 +1649,11 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
new_egr_types, new_config);
if (err)
- return err;
+ goto err_configure_port;
} else if (!new_ing_types && !new_egr_types && ptp_enabled) {
err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
if (err)
- return err;
+ goto err_deconfigure_port;
}
mlxsw_sp_port->ptp.ing_types = new_ing_types;
@@ -1652,8 +1661,15 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
/* Notify the ioctl caller what we are actually timestamping. */
config->rx_filter = rx_filter;
+ mutex_unlock(&ptp_state->lock);
return 0;
+
+err_deconfigure_port:
+err_configure_port:
+err_get_message_types:
+ mutex_unlock(&ptp_state->lock);
+ return err;
}
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 2d1628fdefc1..a8b88230959a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -171,10 +171,11 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
{
}
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static inline int
+mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
return -EOPNOTSUPP;
}
@@ -231,10 +232,11 @@ static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static inline int
+mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 39904dacf4f0..b3472fb94617 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -423,7 +423,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
- 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0);
+ 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
+ 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index 6dea7f8c1481..51f8a0816377 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -425,7 +425,8 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
lan966x_ifh_get_src_port(skb->data, &src_port);
lan966x_ifh_get_timestamp(skb->data, &timestamp);
- WARN_ON(src_port >= lan966x->num_phys_ports);
+ if (WARN_ON(src_port >= lan966x->num_phys_ports))
+ goto free_skb;
skb->dev = lan966x->ports[src_port]->dev;
skb_pull(skb, IFH_LEN * sizeof(u32));
@@ -449,6 +450,8 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
return skb;
+free_skb:
+ kfree_skb(skb);
unmap_page:
dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr,
FDMA_DCB_STATUS_BLOCKL(db->status),
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 1d6e3b641b2e..d928b75f3780 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -710,7 +710,7 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
disable_irq(lan966x->xtr_irq);
lan966x->xtr_irq = -ENXIO;
- if (lan966x->ana_irq) {
+ if (lan966x->ana_irq > 0) {
disable_irq(lan966x->ana_irq);
lan966x->ana_irq = -ENXIO;
}
@@ -718,10 +718,10 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
if (lan966x->fdma)
devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
- if (lan966x->ptp_irq)
+ if (lan966x->ptp_irq > 0)
devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
- if (lan966x->ptp_ext_irq)
+ if (lan966x->ptp_ext_irq > 0)
devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
}
@@ -1049,7 +1049,7 @@ static int lan966x_probe(struct platform_device *pdev)
}
lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
- if (lan966x->ana_irq) {
+ if (lan966x->ana_irq > 0) {
err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
lan966x_ana_irq_handler, IRQF_ONESHOT,
"ana irq", lan966x);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 304f84aadc36..21844beba72d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -113,6 +113,8 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
/* This assumes STATUS_WORD_POS == 1, Status
* just after last data
*/
+ if (!byte_swap)
+ val = ntohl((__force __be32)val);
byte_cnt -= (4 - XTR_VALID_BYTES(val));
eof_flag = true;
break;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 5f9240182351..a6f99b4344d9 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -397,6 +397,11 @@ static void mana_gd_process_eq_events(void *arg)
break;
}
+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+ * reading eqe.
+ */
+ rmb();
+
mana_gd_process_eqe(eq);
eq->head++;
@@ -1134,6 +1139,11 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
if (WARN_ON_ONCE(owner_bits != new_bits))
return -1;
+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+ * reading completion info
+ */
+ rmb();
+
comp->wq_num = cqe->cqe_info.wq_num;
comp->is_sq = cqe->cqe_info.is_sq;
memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
@@ -1465,10 +1475,6 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-#ifndef PCI_VENDOR_ID_MICROSOFT
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#endif
-
static const struct pci_device_id mana_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index a3214a762e4b..9e57d23e57bf 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -62,9 +62,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
{
struct sockaddr *address = addr;
- if (!is_valid_ether_addr(address->sa_data))
- return -EADDRNOTAVAIL;
-
eth_hw_addr_set(ndev, address->sa_data);
moxart_update_mac_address(ndev);
@@ -74,11 +71,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
static void moxart_mac_free_memory(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- int i;
-
- for (i = 0; i < RX_DESC_NUM; i++)
- dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
- priv->rx_buf_size, DMA_FROM_DEVICE);
if (priv->tx_desc_base)
dma_free_coherent(&priv->pdev->dev,
@@ -147,11 +139,11 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
desc + RX_REG_OFFSET_DESC1);
priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
- priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+ priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
priv->rx_buf[i],
priv->rx_buf_size,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+ if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
netdev_err(ndev, "DMA mapping error\n");
moxart_desc_write(priv->rx_mapping[i],
@@ -172,9 +164,6 @@ static int moxart_mac_open(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- if (!is_valid_ether_addr(ndev->dev_addr))
- return -EADDRNOTAVAIL;
-
napi_enable(&priv->napi);
moxart_mac_reset(ndev);
@@ -193,6 +182,7 @@ static int moxart_mac_open(struct net_device *ndev)
static int moxart_mac_stop(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ int i;
napi_disable(&priv->napi);
@@ -204,6 +194,11 @@ static int moxart_mac_stop(struct net_device *ndev)
/* disable all functions */
writel(0, priv->base + REG_MAC_CTRL);
+ /* unmap areas mapped in moxart_mac_setup_desc_ring() */
+ for (i = 0; i < RX_DESC_NUM; i++)
+ dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+
return 0;
}
@@ -240,7 +235,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (len > RX_BUF_SIZE)
len = RX_BUF_SIZE;
- dma_sync_single_for_cpu(&ndev->dev,
+ dma_sync_single_for_cpu(&priv->pdev->dev,
priv->rx_mapping[rx_head],
priv->rx_buf_size, DMA_FROM_DEVICE);
skb = netdev_alloc_skb_ip_align(ndev, len);
@@ -294,7 +289,7 @@ static void moxart_tx_finished(struct net_device *ndev)
unsigned int tx_tail = priv->tx_tail;
while (tx_tail != tx_head) {
- dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+ dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
priv->tx_len[tx_tail], DMA_TO_DEVICE);
ndev->stats.tx_packets++;
@@ -358,9 +353,9 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
- priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+ priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+ if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
netdev_err(ndev, "DMA mapping error\n");
goto out_unlock;
}
@@ -379,7 +374,7 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = ETH_ZLEN;
}
- dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+ dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
priv->tx_buf_size, DMA_TO_DEVICE);
txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
@@ -488,12 +483,19 @@ static int moxart_mac_probe(struct platform_device *pdev)
}
ndev->base_addr = res->start;
+ ret = platform_get_ethdev_address(p_dev, ndev);
+ if (ret == -EPROBE_DEFER)
+ goto init_fail;
+ if (ret)
+ eth_hw_addr_random(ndev);
+ moxart_update_mac_address(ndev);
+
spin_lock_init(&priv->txlock);
priv->tx_buf_size = TX_BUF_SIZE;
priv->rx_buf_size = RX_BUF_SIZE;
- priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
+ priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->tx_desc_base) {
@@ -501,7 +503,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
+ priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->rx_desc_base) {
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index d4649e4ee0e7..306026e6aa11 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1860,16 +1860,20 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
if (sset != ETH_SS_STATS)
return;
- for (i = 0; i < ocelot->num_stats; i++)
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
ETH_GSTRING_LEN);
+ }
}
EXPORT_SYMBOL(ocelot_get_strings);
/* Caller must hold &ocelot->stats_lock */
static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
{
- unsigned int idx = port * ocelot->num_stats;
+ unsigned int idx = port * OCELOT_NUM_STATS;
struct ocelot_stats_region *region;
int err, j;
@@ -1877,9 +1881,8 @@ static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
list_for_each_entry(region, &ocelot->stats_regions, node) {
- err = ocelot_bulk_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
- region->offset, region->buf,
- region->count);
+ err = ocelot_bulk_read(ocelot, region->base, region->buf,
+ region->count);
if (err)
return err;
@@ -1906,13 +1909,13 @@ static void ocelot_check_stats_work(struct work_struct *work)
stats_work);
int i, err;
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
for (i = 0; i < ocelot->num_phys_ports; i++) {
err = ocelot_port_update_stats(ocelot, i);
if (err)
break;
}
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
if (err)
dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
@@ -1925,16 +1928,22 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
int i, err;
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
/* check and update now */
err = ocelot_port_update_stats(ocelot, port);
- /* Copy all counters */
- for (i = 0; i < ocelot->num_stats; i++)
- *data++ = ocelot->stats[port * ocelot->num_stats + i];
+ /* Copy all supported counters */
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ int index = port * OCELOT_NUM_STATS + i;
+
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ *data++ = ocelot->stats[index];
+ }
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
if (err)
dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
@@ -1943,10 +1952,16 @@ EXPORT_SYMBOL(ocelot_get_ethtool_stats);
int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
{
+ int i, num_stats = 0;
+
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
- return ocelot->num_stats;
+ for (i = 0; i < OCELOT_NUM_STATS; i++)
+ if (ocelot->stats_layout[i].name[0] != '\0')
+ num_stats++;
+
+ return num_stats;
}
EXPORT_SYMBOL(ocelot_get_sset_count);
@@ -1958,8 +1973,11 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->stats_regions);
- for (i = 0; i < ocelot->num_stats; i++) {
- if (region && ocelot->stats_layout[i].offset == last + 1) {
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ if (region && ocelot->stats_layout[i].reg == last + 4) {
region->count++;
} else {
region = devm_kzalloc(ocelot->dev, sizeof(*region),
@@ -1967,12 +1985,12 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
if (!region)
return -ENOMEM;
- region->offset = ocelot->stats_layout[i].offset;
+ region->base = ocelot->stats_layout[i].reg;
region->count = 1;
list_add_tail(&region->node, &ocelot->stats_regions);
}
- last = ocelot->stats_layout[i].offset;
+ last = ocelot->stats_layout[i].reg;
}
list_for_each_entry(region, &ocelot->stats_regions, node) {
@@ -3340,7 +3358,6 @@ static void ocelot_detect_features(struct ocelot *ocelot)
int ocelot_init(struct ocelot *ocelot)
{
- const struct ocelot_stat_layout *stat;
char queue_name[32];
int i, ret;
u32 port;
@@ -3353,17 +3370,13 @@ int ocelot_init(struct ocelot *ocelot)
}
}
- ocelot->num_stats = 0;
- for_each_stat(ocelot, stat)
- ocelot->num_stats++;
-
ocelot->stats = devm_kcalloc(ocelot->dev,
- ocelot->num_phys_ports * ocelot->num_stats,
+ ocelot->num_phys_ports * OCELOT_NUM_STATS,
sizeof(u64), GFP_KERNEL);
if (!ocelot->stats)
return -ENOMEM;
- mutex_init(&ocelot->stats_lock);
+ spin_lock_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
mutex_init(&ocelot->mact_lock);
mutex_init(&ocelot->fwd_domain_lock);
@@ -3511,7 +3524,6 @@ void ocelot_deinit(struct ocelot *ocelot)
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
destroy_workqueue(ocelot->owq);
- mutex_destroy(&ocelot->stats_lock);
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 5e6136e80282..330d30841cdc 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -725,37 +725,42 @@ static void ocelot_get_stats64(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
+ u64 *s;
- /* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port),
- SYS_STAT_CFG);
+ spin_lock(&ocelot->stats_lock);
+
+ s = &ocelot->stats[port * OCELOT_NUM_STATS];
/* Get Rx stats */
- stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS);
- stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) +
- ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) +
- ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) +
- ocelot_read(ocelot, SYS_COUNT_RX_LONGS) +
- ocelot_read(ocelot, SYS_COUNT_RX_64) +
- ocelot_read(ocelot, SYS_COUNT_RX_65_127) +
- ocelot_read(ocelot, SYS_COUNT_RX_128_255) +
- ocelot_read(ocelot, SYS_COUNT_RX_256_1023) +
- ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) +
- ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX);
- stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST);
+ stats->rx_bytes = s[OCELOT_STAT_RX_OCTETS];
+ stats->rx_packets = s[OCELOT_STAT_RX_SHORTS] +
+ s[OCELOT_STAT_RX_FRAGMENTS] +
+ s[OCELOT_STAT_RX_JABBERS] +
+ s[OCELOT_STAT_RX_LONGS] +
+ s[OCELOT_STAT_RX_64] +
+ s[OCELOT_STAT_RX_65_127] +
+ s[OCELOT_STAT_RX_128_255] +
+ s[OCELOT_STAT_RX_256_511] +
+ s[OCELOT_STAT_RX_512_1023] +
+ s[OCELOT_STAT_RX_1024_1526] +
+ s[OCELOT_STAT_RX_1527_MAX];
+ stats->multicast = s[OCELOT_STAT_RX_MULTICAST];
stats->rx_dropped = dev->stats.rx_dropped;
/* Get Tx stats */
- stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS);
- stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) +
- ocelot_read(ocelot, SYS_COUNT_TX_65_127) +
- ocelot_read(ocelot, SYS_COUNT_TX_128_511) +
- ocelot_read(ocelot, SYS_COUNT_TX_512_1023) +
- ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) +
- ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX);
- stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) +
- ocelot_read(ocelot, SYS_COUNT_TX_AGING);
- stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
+ stats->tx_bytes = s[OCELOT_STAT_TX_OCTETS];
+ stats->tx_packets = s[OCELOT_STAT_TX_64] +
+ s[OCELOT_STAT_TX_65_127] +
+ s[OCELOT_STAT_TX_128_255] +
+ s[OCELOT_STAT_TX_256_511] +
+ s[OCELOT_STAT_TX_512_1023] +
+ s[OCELOT_STAT_TX_1024_1526] +
+ s[OCELOT_STAT_TX_1527_MAX];
+ stats->tx_dropped = s[OCELOT_STAT_TX_DROPS] +
+ s[OCELOT_STAT_TX_AGED];
+ stats->collisions = s[OCELOT_STAT_TX_COLLISION];
+
+ spin_unlock(&ocelot->stats_lock);
}
static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 961f803aca19..9c488953f541 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -96,101 +96,379 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
};
-static const struct ocelot_stat_layout ocelot_stats_layout[] = {
- { .name = "rx_octets", .offset = 0x00, },
- { .name = "rx_unicast", .offset = 0x01, },
- { .name = "rx_multicast", .offset = 0x02, },
- { .name = "rx_broadcast", .offset = 0x03, },
- { .name = "rx_shorts", .offset = 0x04, },
- { .name = "rx_fragments", .offset = 0x05, },
- { .name = "rx_jabbers", .offset = 0x06, },
- { .name = "rx_crc_align_errs", .offset = 0x07, },
- { .name = "rx_sym_errs", .offset = 0x08, },
- { .name = "rx_frames_below_65_octets", .offset = 0x09, },
- { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, },
- { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, },
- { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, },
- { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, },
- { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, },
- { .name = "rx_frames_over_1526_octets", .offset = 0x0F, },
- { .name = "rx_pause", .offset = 0x10, },
- { .name = "rx_control", .offset = 0x11, },
- { .name = "rx_longs", .offset = 0x12, },
- { .name = "rx_classified_drops", .offset = 0x13, },
- { .name = "rx_red_prio_0", .offset = 0x14, },
- { .name = "rx_red_prio_1", .offset = 0x15, },
- { .name = "rx_red_prio_2", .offset = 0x16, },
- { .name = "rx_red_prio_3", .offset = 0x17, },
- { .name = "rx_red_prio_4", .offset = 0x18, },
- { .name = "rx_red_prio_5", .offset = 0x19, },
- { .name = "rx_red_prio_6", .offset = 0x1A, },
- { .name = "rx_red_prio_7", .offset = 0x1B, },
- { .name = "rx_yellow_prio_0", .offset = 0x1C, },
- { .name = "rx_yellow_prio_1", .offset = 0x1D, },
- { .name = "rx_yellow_prio_2", .offset = 0x1E, },
- { .name = "rx_yellow_prio_3", .offset = 0x1F, },
- { .name = "rx_yellow_prio_4", .offset = 0x20, },
- { .name = "rx_yellow_prio_5", .offset = 0x21, },
- { .name = "rx_yellow_prio_6", .offset = 0x22, },
- { .name = "rx_yellow_prio_7", .offset = 0x23, },
- { .name = "rx_green_prio_0", .offset = 0x24, },
- { .name = "rx_green_prio_1", .offset = 0x25, },
- { .name = "rx_green_prio_2", .offset = 0x26, },
- { .name = "rx_green_prio_3", .offset = 0x27, },
- { .name = "rx_green_prio_4", .offset = 0x28, },
- { .name = "rx_green_prio_5", .offset = 0x29, },
- { .name = "rx_green_prio_6", .offset = 0x2A, },
- { .name = "rx_green_prio_7", .offset = 0x2B, },
- { .name = "tx_octets", .offset = 0x40, },
- { .name = "tx_unicast", .offset = 0x41, },
- { .name = "tx_multicast", .offset = 0x42, },
- { .name = "tx_broadcast", .offset = 0x43, },
- { .name = "tx_collision", .offset = 0x44, },
- { .name = "tx_drops", .offset = 0x45, },
- { .name = "tx_pause", .offset = 0x46, },
- { .name = "tx_frames_below_65_octets", .offset = 0x47, },
- { .name = "tx_frames_65_to_127_octets", .offset = 0x48, },
- { .name = "tx_frames_128_255_octets", .offset = 0x49, },
- { .name = "tx_frames_256_511_octets", .offset = 0x4A, },
- { .name = "tx_frames_512_1023_octets", .offset = 0x4B, },
- { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, },
- { .name = "tx_frames_over_1526_octets", .offset = 0x4D, },
- { .name = "tx_yellow_prio_0", .offset = 0x4E, },
- { .name = "tx_yellow_prio_1", .offset = 0x4F, },
- { .name = "tx_yellow_prio_2", .offset = 0x50, },
- { .name = "tx_yellow_prio_3", .offset = 0x51, },
- { .name = "tx_yellow_prio_4", .offset = 0x52, },
- { .name = "tx_yellow_prio_5", .offset = 0x53, },
- { .name = "tx_yellow_prio_6", .offset = 0x54, },
- { .name = "tx_yellow_prio_7", .offset = 0x55, },
- { .name = "tx_green_prio_0", .offset = 0x56, },
- { .name = "tx_green_prio_1", .offset = 0x57, },
- { .name = "tx_green_prio_2", .offset = 0x58, },
- { .name = "tx_green_prio_3", .offset = 0x59, },
- { .name = "tx_green_prio_4", .offset = 0x5A, },
- { .name = "tx_green_prio_5", .offset = 0x5B, },
- { .name = "tx_green_prio_6", .offset = 0x5C, },
- { .name = "tx_green_prio_7", .offset = 0x5D, },
- { .name = "tx_aged", .offset = 0x5E, },
- { .name = "drop_local", .offset = 0x80, },
- { .name = "drop_tail", .offset = 0x81, },
- { .name = "drop_yellow_prio_0", .offset = 0x82, },
- { .name = "drop_yellow_prio_1", .offset = 0x83, },
- { .name = "drop_yellow_prio_2", .offset = 0x84, },
- { .name = "drop_yellow_prio_3", .offset = 0x85, },
- { .name = "drop_yellow_prio_4", .offset = 0x86, },
- { .name = "drop_yellow_prio_5", .offset = 0x87, },
- { .name = "drop_yellow_prio_6", .offset = 0x88, },
- { .name = "drop_yellow_prio_7", .offset = 0x89, },
- { .name = "drop_green_prio_0", .offset = 0x8A, },
- { .name = "drop_green_prio_1", .offset = 0x8B, },
- { .name = "drop_green_prio_2", .offset = 0x8C, },
- { .name = "drop_green_prio_3", .offset = 0x8D, },
- { .name = "drop_green_prio_4", .offset = 0x8E, },
- { .name = "drop_green_prio_5", .offset = 0x8F, },
- { .name = "drop_green_prio_6", .offset = 0x90, },
- { .name = "drop_green_prio_7", .offset = 0x91, },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static void ocelot_pll5_init(struct ocelot *ocelot)
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
index c2af4eb8ca5d..9cf82ecf191c 100644
--- a/drivers/net/ethernet/mscc/vsc7514_regs.c
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -180,13 +180,38 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_PAUSE, 0x00003c),
- REG(SYS_COUNT_RX_CONTROL, 0x000040),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
- REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
REG(SYS_COUNT_TX_UNICAST, 0x000104),
REG(SYS_COUNT_TX_MULTICAST, 0x000108),
@@ -196,11 +221,46 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
- REG(SYS_COUNT_TX_AGING, 0x000170),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
+ REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000214),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000218),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00021c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000220),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000234),
REG(SYS_RESET_CFG, 0x000508),
REG(SYS_CMID, 0x00050c),
REG(SYS_VLAN_ETYPE_CFG, 0x000510),
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 4e5df9f2c372..7b92026e1a6f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -127,10 +127,11 @@ static int nfp_policer_validate(const struct flow_action *action,
return -EOPNOTSUPP;
}
- if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE &&
+ act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
NL_SET_ERR_MSG_MOD(extack,
- "Offload not supported when conform action is not pipe or ok");
+ "Offload not supported when conform action is not continue, pipe or ok");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index cf4d6f1129fa..349a2b1a19a2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1630,21 +1630,21 @@ static void nfp_net_stat64(struct net_device *netdev,
unsigned int start;
do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
+ start = u64_stats_fetch_begin_irq(&r_vec->rx_sync);
data[0] = r_vec->rx_pkts;
data[1] = r_vec->rx_bytes;
data[2] = r_vec->rx_drops;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start));
stats->rx_packets += data[0];
stats->rx_bytes += data[1];
stats->rx_dropped += data[2];
do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
+ start = u64_stats_fetch_begin_irq(&r_vec->tx_sync);
data[0] = r_vec->tx_pkts;
data[1] = r_vec->tx_bytes;
data[2] = r_vec->tx_errors;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start));
stats->tx_packets += data[0];
stats->tx_bytes += data[1];
stats->tx_errors += data[2];
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index c922dfab8080..b1b1b648e40c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -649,7 +649,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
+ start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync);
data[0] = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
@@ -657,10 +657,10 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
tmp[5] = nn->r_vecs[i].hw_tls_rx;
- } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start));
do {
- start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
+ start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
tmp[6] = nn->r_vecs[i].hw_csum_tx;
@@ -670,7 +670,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[10] = nn->r_vecs[i].hw_tls_tx;
tmp[11] = nn->r_vecs[i].tls_tx_fallback;
tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
- } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;
@@ -1395,6 +1395,8 @@ nfp_port_get_module_info(struct net_device *netdev,
u8 data;
port = nfp_port_from_netdev(netdev);
+ /* update port state to get latest interface */
+ set_bit(NFP_PORT_CHANGED, &port->flags);
eth_port = nfp_port_get_eth_port(port);
if (!eth_port)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 34c0d2ddf9ef..a8286d0032d1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -874,7 +874,6 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
}
/* Adjust the start address to be cache size aligned */
- cache->id = id;
cache->addr = addr & ~(u64)(cache->size - 1);
/* Re-init to the new ID and address */
@@ -894,6 +893,8 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
return NULL;
}
+ cache->id = id;
+
exit:
/* Adjust offset */
*offset = addr - cache->addr;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index edd300033735..4cc38799eabc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -507,6 +507,7 @@ int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state)
if (nfp_nsp_get_abi_ver_minor(nsp) < 32) {
nfp_err(nfp_nsp_cpp(nsp),
"set id mode operation not supported, please update flash\n");
+ nfp_eth_config_cleanup_end(nsp);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 1443f788ee37..0be79c516781 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1564,8 +1564,67 @@ static int ionic_set_features(struct net_device *netdev,
return err;
}
+static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_MAC,
+ },
+ };
+
+ ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
+ return ionic_adminq_post_wait(lif, &ctx);
+}
+
+static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_getattr = {
+ .opcode = IONIC_CMD_LIF_GETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_MAC,
+ },
+ };
+ int err;
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
+ return 0;
+}
+
+static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
+{
+ u8 get_mac[ETH_ALEN];
+ int err;
+
+ err = ionic_set_attr_mac(lif, mac);
+ if (err)
+ return err;
+
+ err = ionic_get_attr_mac(lif, get_mac);
+ if (err)
+ return err;
+
+ /* To deal with older firmware that silently ignores the set attr mac:
+ * doesn't actually change the mac and doesn't return an error, so we
+ * do the get attr to verify whether or not the set actually happened
+ */
+ if (!ether_addr_equal(get_mac, mac))
+ return 1;
+
+ return 0;
+}
+
static int ionic_set_mac_address(struct net_device *netdev, void *sa)
{
+ struct ionic_lif *lif = netdev_priv(netdev);
struct sockaddr *addr = sa;
u8 *mac;
int err;
@@ -1574,6 +1633,14 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
if (ether_addr_equal(netdev->dev_addr, mac))
return 0;
+ err = ionic_program_mac(lif, mac);
+ if (err < 0)
+ return err;
+
+ if (err > 0)
+ netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
+ __func__);
+
err = eth_prepare_mac_addr_change(netdev, addr);
if (err)
return err;
@@ -2963,6 +3030,9 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
mutex_lock(&lif->queue_lock);
+ if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
+ dev_info(ionic->dev, "FW Up: clearing broken state\n");
+
err = ionic_qcqs_alloc(lif);
if (err)
goto err_unlock;
@@ -3169,6 +3239,7 @@ static int ionic_station_set(struct ionic_lif *lif)
.attr = IONIC_LIF_ATTR_MAC,
},
};
+ u8 mac_address[ETH_ALEN];
struct sockaddr addr;
int err;
@@ -3177,8 +3248,23 @@ static int ionic_station_set(struct ionic_lif *lif)
return err;
netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
ctx.comp.lif_getattr.mac);
- if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
- return 0;
+ ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
+
+ if (is_zero_ether_addr(mac_address)) {
+ eth_hw_addr_random(netdev);
+ netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
+ ether_addr_copy(mac_address, netdev->dev_addr);
+
+ err = ionic_program_mac(lif, mac_address);
+ if (err < 0)
+ return err;
+
+ if (err > 0) {
+ netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
+ __func__);
+ return 0;
+ }
+ }
if (!is_zero_ether_addr(netdev->dev_addr)) {
/* If the netdev mac is non-zero and doesn't match the default
@@ -3186,12 +3272,11 @@ static int ionic_station_set(struct ionic_lif *lif)
* likely here again after a fw-upgrade reset. We need to be
* sure the netdev mac is in our filter list.
*/
- if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
- netdev->dev_addr))
+ if (!ether_addr_equal(mac_address, netdev->dev_addr))
ionic_lif_addr_add(lif, netdev->dev_addr);
} else {
/* Update the netdev mac with the device's mac */
- memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
+ ether_addr_copy(addr.sa_data, mac_address);
addr.sa_family = AF_INET;
err = eth_prepare_mac_addr_change(netdev, &addr);
if (err) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 4029b4e021f8..56f93b030551 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -474,8 +474,8 @@ try_again:
ionic_opcode_to_str(opcode), opcode,
ionic_error_to_str(err), err);
- msleep(1000);
iowrite32(0, &idev->dev_cmd_regs->done);
+ msleep(1000);
iowrite32(1, &idev->dev_cmd_regs->doorbell);
goto try_again;
}
@@ -488,6 +488,8 @@ try_again:
return ionic_error_to_errno(err);
}
+ ionic_dev_cmd_clean(ionic);
+
return 0;
}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index b357ac4c56c5..7e32b04eb0c7 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1449,6 +1449,8 @@ static int ravb_phy_init(struct net_device *ndev)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
}
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 67ade78fb767..7fd8828d3a84 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2029,6 +2029,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
phy_set_max_speed(phydev, SPEED_100);
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index bc70c6abd6a5..58cf7cc54f40 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1273,7 +1273,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
bool removing;
int err = 0;
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 032b8c0bd788..5b4d661ab986 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -319,7 +319,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
- efx->tx_channel_offset = 1;
+ efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index 017212a40df3..f54ebd007286 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -320,7 +320,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
- efx->tx_channel_offset = 1;
+ efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c
index e166dcb9b99c..91e87594ed1e 100644
--- a/drivers/net/ethernet/sfc/siena/tx.c
+++ b/drivers/net/ethernet/sfc/siena/tx.c
@@ -336,7 +336,7 @@ netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
* previous packets out.
*/
if (!netdev_xmit_more())
- efx_tx_send_pending(tx_queue->channel);
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index d12474042c84..c5f88f7a7a04 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -549,7 +549,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
* previous packets out.
*/
if (!netdev_xmit_more())
- efx_tx_send_pending(tx_queue->channel);
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 3bf20211cceb..3829c2805b16 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1037,6 +1037,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
return ret;
}
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
phy_set_max_speed(phydev, SPEED_100);
@@ -2587,6 +2589,8 @@ static int smsc911x_suspend(struct device *dev)
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
+ if (!device_may_wakeup(dev))
+ phy_stop(ndev->phydev);
}
/* enable wake on LAN, energy detection and the external PME
@@ -2628,6 +2632,8 @@ static int smsc911x_resume(struct device *dev)
if (netif_running(ndev)) {
netif_device_attach(ndev);
netif_start_queue(ndev);
+ if (!device_may_wakeup(dev))
+ phy_start(ndev->phydev);
}
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 52f9ed8db9c9..9af25be42401 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -1134,9 +1134,8 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
stmmac_dvr_remove(&pdev->dev);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
-
- pcim_iounmap_regions(pdev, BIT(0));
}
static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index caa4bfc4c1d6..9b6138b11776 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -258,14 +258,18 @@ EXPORT_SYMBOL_GPL(stmmac_set_mac_addr);
/* Enable disable MAC RX/TX */
void stmmac_set_mac(void __iomem *ioaddr, bool enable)
{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
+ u32 old_val, value;
+
+ old_val = readl(ioaddr + MAC_CTRL_REG);
+ value = old_val;
if (enable)
value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
else
value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
- writel(value, ioaddr + MAC_CTRL_REG);
+ if (value != old_val)
+ writel(value, ioaddr + MAC_CTRL_REG);
}
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 070b5ef165eb..592d29abcb1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -986,10 +986,10 @@ static void stmmac_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- u32 ctrl;
+ u32 old_ctrl, ctrl;
- ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
- ctrl &= ~priv->hw->link.speed_mask;
+ old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+ ctrl = old_ctrl & ~priv->hw->link.speed_mask;
if (interface == PHY_INTERFACE_MODE_USXGMII) {
switch (speed) {
@@ -1064,7 +1064,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
if (tx_pause && rx_pause)
stmmac_mac_flow_ctrl(priv, duplex);
- writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ if (ctrl != old_ctrl)
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 8594ee839628..88aa0d310aee 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2020,9 +2020,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
- dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
- dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
/* Reuse original ring buffer. */
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index baa1f0a5cc37..b4a4fa0a58f8 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -7,12 +7,12 @@ config NET_VENDOR_WANGXUN
bool "Wangxun devices"
default y
help
- If you have a network (Ethernet) card belonging to this class, say Y.
+ If you have a network (Ethernet) card from Wangxun(R), say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about Intel cards. If you say Y, you will be asked for
- your specific card in the following questions.
+ the questions about Wangxun(R) cards. If you say Y, you will
+ be asked for your specific card in the following questions.
if NET_VENDOR_WANGXUN
diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h
index 76c4a709d73d..e97db826cdd4 100644
--- a/drivers/net/fddi/skfp/h/hwmtm.h
+++ b/drivers/net/fddi/skfp/h/hwmtm.h
@@ -348,7 +348,7 @@ do { \
* This macro is invoked by the OS-specific before it left the
* function mac_drv_rx_complete. This macro calls mac_drv_fill_rxd
* if the number of used RxDs is equal or lower than the
- * the given low water mark.
+ * given low water mark.
*
* para low_water low water mark of used RxD's
*
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 018d365f9deb..7962c37b3f14 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -797,7 +797,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct geneve_sock *gs4,
struct flowi4 *fl4,
const struct ip_tunnel_info *info,
- __be16 dport, __be16 sport)
+ __be16 dport, __be16 sport,
+ __u8 *full_tos)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
@@ -823,6 +824,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
use_cache = false;
}
fl4->flowi4_tos = RT_TOS(tos);
+ if (full_tos)
+ *full_tos = tos;
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
@@ -876,8 +879,7 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
use_cache = false;
}
- fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
- info->key.label);
+ fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label);
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
@@ -911,6 +913,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
const struct ip_tunnel_key *key = &info->key;
struct rtable *rt;
struct flowi4 fl4;
+ __u8 full_tos;
__u8 tos, ttl;
__be16 df = 0;
__be16 sport;
@@ -921,7 +924,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
- geneve->cfg.info.key.tp_dst, sport);
+ geneve->cfg.info.key.tp_dst, sport, &full_tos);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -965,7 +968,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
} else {
- tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
+ tos = ip_tunnel_ecn_encap(full_tos, ip_hdr(skb), skb);
if (geneve->cfg.ttl_inherit)
ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
else
@@ -1149,7 +1152,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
- geneve->cfg.info.key.tp_dst, sport);
+ geneve->cfg.info.key.tp_dst, sport, NULL);
if (IS_ERR(rt))
return PTR_ERR(rt);
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 6afdf1622944..5cf218c674a5 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1310,10 +1310,11 @@ static void adf7242_remove(struct spi_device *spi)
debugfs_remove_recursive(lp->debugfs_root);
+ ieee802154_unregister_hw(lp->hw);
+
cancel_delayed_work_sync(&lp->work);
destroy_workqueue(lp->wqueue);
- ieee802154_unregister_hw(lp->hw);
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
}
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 42c0b451088d..450b16ad40a4 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2293,7 +2293,7 @@ static int ca8210_set_csma_params(
* @retries: Number of retries
*
* Sets the number of times to retry a transmission if no acknowledgment was
- * was received from the other end when one was requested.
+ * received from the other end when one was requested.
*
* Return: 0 or linux error code
*/
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 1e1f40f628a0..c69b87d3837d 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -504,6 +504,7 @@ cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
goto err_tx;
if (status & CC2520_STATUS_TX_UNDERFLOW) {
+ rc = -EINVAL;
dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n");
goto err_tx;
}
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 1e9eae208e44..53a1dbeaffa6 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -568,7 +568,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
}
/* Align the address down and the size up to a page boundary */
- addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK;
+ addr = qcom_smem_virt_to_phys(virt);
phys = addr & PAGE_MASK;
size = PAGE_ALIGN(size + addr - phys);
iova = phys; /* We just want a direct mapping */
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index ec010cf2e816..6f874f99b910 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
@@ -352,7 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
@@ -360,7 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
index 6838e8065072..75d3fc0092e9 100644
--- a/drivers/net/ipa/ipa_qmi_msg.c
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -311,7 +311,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x12,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -332,7 +332,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x13,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -496,7 +496,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x1b,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_hash_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -517,7 +517,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x1c,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_hash_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index 495e85abe50b..9651aa59b596 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -86,9 +86,11 @@ enum ipa_platform_type {
IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */
};
-/* This defines the start and end offset of a range of memory. Both
- * fields are offsets relative to the start of IPA shared memory.
- * The end value is the last addressable byte *within* the range.
+/* This defines the start and end offset of a range of memory. The start
+ * value is a byte offset relative to the start of IPA shared memory. The
+ * end value is the last addressable unit *within* the range. Typically
+ * the end value is in units of bytes, however it can also be a maximum
+ * array index value.
*/
struct ipa_mem_bounds {
u32 start;
@@ -129,18 +131,19 @@ struct ipa_init_modem_driver_req {
u8 hdr_tbl_info_valid;
struct ipa_mem_bounds hdr_tbl_info;
- /* Routing table information. These define the location and size of
- * non-hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of non-hashable IPv4 and
+ * IPv6 routing tables. The start values are byte offsets relative
+ * to the start of IPA shared memory.
*/
u8 v4_route_tbl_info_valid;
- struct ipa_mem_array v4_route_tbl_info;
+ struct ipa_mem_bounds v4_route_tbl_info;
u8 v6_route_tbl_info_valid;
- struct ipa_mem_array v6_route_tbl_info;
+ struct ipa_mem_bounds v6_route_tbl_info;
/* Filter table information. These define the location of the
* non-hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ * byte offsets relative to the start of IPA shared memory.
*/
u8 v4_filter_tbl_start_valid;
u32 v4_filter_tbl_start;
@@ -181,18 +184,20 @@ struct ipa_init_modem_driver_req {
u8 zip_tbl_info_valid;
struct ipa_mem_bounds zip_tbl_info;
- /* Routing table information. These define the location and size
- * of hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of hashable IPv4 and IPv6
+ * routing tables (if supported by hardware). The start values are
+ * byte offsets relative to the start of IPA shared memory.
*/
u8 v4_hash_route_tbl_info_valid;
- struct ipa_mem_array v4_hash_route_tbl_info;
+ struct ipa_mem_bounds v4_hash_route_tbl_info;
u8 v6_hash_route_tbl_info_valid;
- struct ipa_mem_array v6_hash_route_tbl_info;
+ struct ipa_mem_bounds v6_hash_route_tbl_info;
/* Filter table information. These define the location and size
- * of hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ * of hashable IPv4 and IPv6 filter tables (if supported by hardware).
+ * The start values are byte offsets relative to the start of IPA
+ * shared memory.
*/
u8 v4_hash_filter_tbl_start_valid;
u32 v4_hash_filter_tbl_start;
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index a5b355384d4a..6f35438cda89 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -48,7 +48,7 @@ struct ipa;
*
* The offset of registers related to resource types is computed by a macro
* that is supplied a parameter "rt". The "rt" represents a resource type,
- * which is is a member of the ipa_resource_type_src enumerated type for
+ * which is a member of the ipa_resource_type_src enumerated type for
* source endpoint resources or the ipa_resource_type_dst enumerated type
* for destination endpoint resources.
*
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 2f5a58bfc529..69efe672ca52 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -108,8 +108,6 @@
/* Assignment of route table entries to the modem and AP */
#define IPA_ROUTE_MODEM_MIN 0
-#define IPA_ROUTE_MODEM_COUNT 8
-
#define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
#define IPA_ROUTE_AP_COUNT \
(IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index b6a9a0d79d68..1538e2e1732f 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -13,6 +13,9 @@ struct ipa;
/* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
#define IPA_FILTER_COUNT_MAX 14
+/* The number of route table entries allotted to the modem */
+#define IPA_ROUTE_MODEM_COUNT 8
+
/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
#define IPA_ROUTE_COUNT_MAX 15
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index dfeb5b392e64..bb1c298c1e78 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -495,7 +495,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
static int ipvlan_process_outbound(struct sk_buff *skb)
{
- struct ethhdr *ethh = eth_hdr(skb);
int ret = NET_XMIT_DROP;
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
@@ -505,6 +504,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
if (skb_mac_header_was_set(skb)) {
/* In this mode we dont care about
* multicast and broadcast traffic */
+ struct ethhdr *ethh = eth_hdr(skb);
+
if (is_multicast_ether_addr(ethh->h_dest)) {
pr_debug_ratelimited(
"Dropped {multi|broad}cast of type=[%x]\n",
@@ -589,7 +590,7 @@ out:
static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
- struct ethhdr *eth = eth_hdr(skb);
+ struct ethhdr *eth = skb_eth_hdr(skb);
struct ipvl_addr *addr;
void *lyr3h;
int addr_type;
@@ -619,6 +620,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
return dev_forward_skb(ipvlan->phy_dev, skb);
} else if (is_multicast_ether_addr(eth->h_dest)) {
+ skb_reset_mac_header(skb);
ipvlan_skb_crossing_ns(skb, NULL);
ipvlan_multicast_enqueue(ipvlan->port, skb, true);
return NET_XMIT_SUCCESS;
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index ef02f2cf5ce1..cbabca167a07 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -194,7 +194,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
.notifier_call = ipvtap_device_event,
};
-static int ipvtap_init(void)
+static int __init ipvtap_init(void)
{
int err;
@@ -228,7 +228,7 @@ out1:
}
module_init(ipvtap_init);
-static void ipvtap_exit(void)
+static void __exit ipvtap_exit(void)
{
rtnl_link_unregister(&ipvtap_link_ops);
unregister_netdevice_notifier(&ipvtap_notifier_block);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index f1683ce6b561..c6d271e5687e 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -162,6 +162,19 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
return sa;
}
+static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
+{
+ struct macsec_rx_sa *sa = NULL;
+ int an;
+
+ for (an = 0; an < MACSEC_NUM_AN; an++) {
+ sa = macsec_rxsa_get(rx_sc->sa[an]);
+ if (sa)
+ break;
+ }
+ return sa;
+}
+
static void free_rx_sc_rcu(struct rcu_head *head)
{
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
@@ -449,11 +462,6 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
return (struct macsec_eth_header *)skb_mac_header(skb);
}
-static sci_t dev_to_sci(struct net_device *dev, __be16 port)
-{
- return make_sci(dev->dev_addr, port);
-}
-
static void __macsec_pn_wrapped(struct macsec_secy *secy,
struct macsec_tx_sa *tx_sa)
{
@@ -500,18 +508,28 @@ static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
skb->protocol = eth_hdr(skb)->h_proto;
}
+static unsigned int macsec_msdu_len(struct sk_buff *skb)
+{
+ struct macsec_dev *macsec = macsec_priv(skb->dev);
+ struct macsec_secy *secy = &macsec->secy;
+ bool sci_present = macsec_skb_cb(skb)->has_sci;
+
+ return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
+}
+
static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
struct macsec_tx_sa *tx_sa)
{
+ unsigned int msdu_len = macsec_msdu_len(skb);
struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
u64_stats_update_begin(&txsc_stats->syncp);
if (tx_sc->encrypt) {
- txsc_stats->stats.OutOctetsEncrypted += skb->len;
+ txsc_stats->stats.OutOctetsEncrypted += msdu_len;
txsc_stats->stats.OutPktsEncrypted++;
this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
} else {
- txsc_stats->stats.OutOctetsProtected += skb->len;
+ txsc_stats->stats.OutOctetsProtected += msdu_len;
txsc_stats->stats.OutPktsProtected++;
this_cpu_inc(tx_sa->stats->OutPktsProtected);
}
@@ -541,9 +559,10 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
aead_request_free(macsec_skb_cb(skb)->req);
rcu_read_lock_bh();
- macsec_encrypt_finish(skb, dev);
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
- len = skb->len;
+ /* packet is encrypted/protected so tx_bytes must be calculated */
+ len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
+ macsec_encrypt_finish(skb, dev);
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
rcu_read_unlock_bh();
@@ -702,6 +721,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
macsec_skb_cb(skb)->req = req;
macsec_skb_cb(skb)->tx_sa = tx_sa;
+ macsec_skb_cb(skb)->has_sci = sci_present;
aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
dev_hold(skb->dev);
@@ -743,15 +763,17 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
+ secy->netdev->stats.rx_dropped++;
return false;
}
if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
+ unsigned int msdu_len = macsec_msdu_len(skb);
u64_stats_update_begin(&rxsc_stats->syncp);
if (hdr->tci_an & MACSEC_TCI_E)
- rxsc_stats->stats.InOctetsDecrypted += skb->len;
+ rxsc_stats->stats.InOctetsDecrypted += msdu_len;
else
- rxsc_stats->stats.InOctetsValidated += skb->len;
+ rxsc_stats->stats.InOctetsValidated += msdu_len;
u64_stats_update_end(&rxsc_stats->syncp);
}
@@ -764,6 +786,8 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotValid++;
u64_stats_update_end(&rxsc_stats->syncp);
+ this_cpu_inc(rx_sa->stats->InPktsNotValid);
+ secy->netdev->stats.rx_errors++;
return false;
}
@@ -856,9 +880,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
macsec_finalize_skb(skb, macsec->secy.icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+ len = skb->len;
macsec_reset_skb(skb, macsec->secy.netdev);
- len = skb->len;
if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
count_rx(dev, len);
@@ -1049,6 +1073,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
+ macsec->secy.netdev->stats.rx_dropped++;
continue;
}
@@ -1158,6 +1183,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsBadTag++;
u64_stats_update_end(&secy_stats->syncp);
+ secy->netdev->stats.rx_errors++;
goto drop_nosa;
}
@@ -1168,11 +1194,15 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
/* If validateFrames is Strict or the C bit in the
* SecTAG is set, discard
*/
+ struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
if (hdr->tci_an & MACSEC_TCI_C ||
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotUsingSA++;
u64_stats_update_end(&rxsc_stats->syncp);
+ secy->netdev->stats.rx_errors++;
+ if (active_rx_sa)
+ this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
goto drop_nosa;
}
@@ -1182,6 +1212,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsUnusedSA++;
u64_stats_update_end(&rxsc_stats->syncp);
+ if (active_rx_sa)
+ this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
goto deliver;
}
@@ -1202,6 +1234,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
+ macsec->secy.netdev->stats.rx_dropped++;
goto drop;
}
}
@@ -1230,6 +1263,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
deliver:
macsec_finalize_skb(skb, secy->icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+ len = skb->len;
macsec_reset_skb(skb, secy->netdev);
if (rx_sa)
@@ -1237,7 +1271,6 @@ deliver:
macsec_rxsc_put(rx_sc);
skb_orphan(skb);
- len = skb->len;
ret = gro_cells_receive(&macsec->gro_cells, skb);
if (ret == NET_RX_SUCCESS)
count_rx(dev, len);
@@ -1279,6 +1312,7 @@ nosci:
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoSCI++;
u64_stats_update_end(&secy_stats->syncp);
+ macsec->secy.netdev->stats.rx_errors++;
continue;
}
@@ -3404,6 +3438,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ len = skb->len;
skb = macsec_encrypt(skb, dev);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EINPROGRESS)
@@ -3414,7 +3449,6 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
macsec_encrypt_finish(skb, dev);
- len = skb->len;
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
return ret;
@@ -3622,7 +3656,6 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
out:
eth_hw_addr_set(dev, addr->sa_data);
- macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
@@ -3662,6 +3695,7 @@ static void macsec_get_stats64(struct net_device *dev,
s->rx_dropped = dev->stats.rx_dropped;
s->tx_dropped = dev->stats.tx_dropped;
+ s->rx_errors = dev->stats.rx_errors;
}
static int macsec_get_iflink(const struct net_device *dev)
@@ -3960,6 +3994,11 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
return false;
}
+static sci_t dev_to_sci(struct net_device *dev, __be16 port)
+{
+ return make_sci(dev->dev_addr, port);
+}
+
static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
{
struct macsec_dev *macsec = macsec_priv(dev);
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index 3e79c2c51929..1c1584fca632 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -47,7 +47,9 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
* just fall back to poll mode
*/
if (rc == -EPROBE_DEFER)
- rc = -ENODEV;
+ rc = driver_deferred_probe_check_state(&phy->mdio.dev);
+ if (rc == -EPROBE_DEFER)
+ return rc;
if (rc > 0) {
phy->irq = rc;
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 9e3c815a070f..796e9c7857d0 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -231,6 +231,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
return 0;
unregister:
+ of_node_put(child);
mdiobus_unregister(mdio);
return rc;
}
diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c
index 605a38e16db0..0e58aa7f0374 100644
--- a/drivers/net/netdevsim/hwstats.c
+++ b/drivers/net/netdevsim/hwstats.c
@@ -433,11 +433,11 @@ int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
goto err_remove_hwstats_recursive;
}
- debugfs_create_file("enable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+ debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_enable_fops.fops);
- debugfs_create_file("disable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+ debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_disable_fops.fops);
- debugfs_create_file("fail_next_enable", 0600, hwstats->l3_ddir, hwstats,
+ debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_fail_fops.fops);
INIT_DELAYED_WORK(&hwstats->traffic_dw,
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index e470e3398abc..9a1a5b203624 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -67,10 +67,10 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&ns->syncp);
+ start = u64_stats_fetch_begin_irq(&ns->syncp);
stats->tx_bytes = ns->tx_bytes;
stats->tx_packets = ns->tx_packets;
- } while (u64_stats_fetch_retry(&ns->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ns->syncp, start));
}
static int
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 8b7a46db30e0..7111e2e958e9 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -91,6 +91,9 @@
#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+#define VEND1_GLOBAL_GEN_STAT2 0xc831
+#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
+
#define VEND1_GLOBAL_RSVD_STAT1 0xc885
#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
@@ -125,6 +128,12 @@
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+/* Sleep and timeout for checking if the Processor-Intensive
+ * MDIO operation is finished
+ */
+#define AQR107_OP_IN_PROG_SLEEP 1000
+#define AQR107_OP_IN_PROG_TIMEOUT 100000
+
struct aqr107_hw_stat {
const char *name;
int reg;
@@ -597,16 +606,52 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
}
+static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
+{
+ int val, err;
+
+ /* The datasheet notes to wait at least 1ms after issuing a
+ * processor intensive operation before checking.
+ * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
+ * because that just determines the maximum time slept, not the minimum.
+ */
+ usleep_range(1000, 5000);
+
+ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_GEN_STAT2, val,
+ !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
+ AQR107_OP_IN_PROG_SLEEP,
+ AQR107_OP_IN_PROG_TIMEOUT, false);
+ if (err) {
+ phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
+ return err;
+ }
+
+ return 0;
+}
+
static int aqr107_suspend(struct phy_device *phydev)
{
- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
- MDIO_CTRL1_LPOWER);
+ int err;
+
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+ if (err)
+ return err;
+
+ return aqr107_wait_processor_intensive_op(phydev);
}
static int aqr107_resume(struct phy_device *phydev)
{
- return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
- MDIO_CTRL1_LPOWER);
+ int err;
+
+ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+ if (err)
+ return err;
+
+ return aqr107_wait_processor_intensive_op(phydev);
}
static int aqr107_probe(struct phy_device *phydev)
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 1e38039c5c56..6939563d3b7c 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -535,7 +535,7 @@ static int dp83867_of_init_io_impedance(struct phy_device *phydev)
cell = of_nvmem_cell_get(of_node, "io_impedance_ctrl");
if (IS_ERR(cell)) {
ret = PTR_ERR(cell);
- if (ret != -ENOENT)
+ if (ret != -ENOENT && ret != -EOPNOTSUPP)
return phydev_err_probe(phydev, ret,
"failed to get nvmem cell io_impedance_ctrl\n");
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 73f7962a37d3..c49062ad72c6 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -243,13 +243,7 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev)
irq_status == INTSRC_ENERGY_DETECT)
return IRQ_HANDLED;
- /* Give PHY some time before MAC starts sending data. This works
- * around an issue where network doesn't come up properly.
- */
- if (!(irq_status & INTSRC_LINK_DOWN))
- phy_queue_state_machine(phydev, msecs_to_jiffies(100));
- else
- phy_trigger_machine(phydev);
+ phy_trigger_machine(phydev);
return IRQ_HANDLED;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index e78d0bf69bc3..38234d7e14c5 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -2679,16 +2679,19 @@ static int lan8804_config_init(struct phy_device *phydev)
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
{
int irq_status, tsu_irq_status;
+ int ret = IRQ_NONE;
irq_status = phy_read(phydev, LAN8814_INTS);
- if (irq_status > 0 && (irq_status & LAN8814_INT_LINK))
- phy_trigger_machine(phydev);
-
if (irq_status < 0) {
phy_error(phydev);
return IRQ_NONE;
}
+ if (irq_status & LAN8814_INT_LINK) {
+ phy_trigger_machine(phydev);
+ ret = IRQ_HANDLED;
+ }
+
while (1) {
tsu_irq_status = lanphy_read_page_reg(phydev, 4,
LAN8814_INTR_STS_REG);
@@ -2697,12 +2700,15 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
(tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ |
LAN8814_INTR_STS_REG_1588_TSU1_ |
LAN8814_INTR_STS_REG_1588_TSU2_ |
- LAN8814_INTR_STS_REG_1588_TSU3_)))
+ LAN8814_INTR_STS_REG_1588_TSU3_))) {
lan8814_handle_ptp_interrupt(phydev);
- else
+ ret = IRQ_HANDLED;
+ } else {
break;
+ }
}
- return IRQ_HANDLED;
+
+ return ret;
}
static int lan8814_ack_interrupt(struct phy_device *phydev)
@@ -2873,12 +2879,18 @@ static int lan8814_config_init(struct phy_device *phydev)
return 0;
}
+/* It is expected that there will not be any 'lan8814_take_coma_mode'
+ * function called in suspend. Because the GPIO line can be shared, so if one of
+ * the phys goes back in coma mode, then all the other PHYs will go, which is
+ * wrong.
+ */
static int lan8814_release_coma_mode(struct phy_device *phydev)
{
struct gpio_desc *gpiod;
gpiod = devm_gpiod_get_optional(&phydev->mdio.dev, "coma-mode",
- GPIOD_OUT_HIGH_OPEN_DRAIN);
+ GPIOD_OUT_HIGH_OPEN_DRAIN |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(gpiod))
return PTR_ERR(gpiod);
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index d4c93d59bc53..8569a545e0a3 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -28,12 +28,16 @@
/* Interrupt Source Register */
#define LAN87XX_INTERRUPT_SOURCE (0x18)
+#define LAN87XX_INTERRUPT_SOURCE_2 (0x08)
/* Interrupt Mask Register */
#define LAN87XX_INTERRUPT_MASK (0x19)
#define LAN87XX_MASK_LINK_UP (0x0004)
#define LAN87XX_MASK_LINK_DOWN (0x0002)
+#define LAN87XX_INTERRUPT_MASK_2 (0x09)
+#define LAN87XX_MASK_COMM_RDY BIT(10)
+
/* MISC Control 1 Register */
#define LAN87XX_CTRL_1 (0x11)
#define LAN87XX_MASK_RGMII_TXC_DLY_EN (0x4000)
@@ -424,17 +428,55 @@ static int lan87xx_phy_config_intr(struct phy_device *phydev)
int rc, val = 0;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- /* unmask all source and clear them before enable */
- rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, 0x7FFF);
+ /* clear all interrupt */
+ rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ if (rc < 0)
+ return rc;
+
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
- val = LAN87XX_MASK_LINK_UP | LAN87XX_MASK_LINK_DOWN;
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
+ if (rc < 0)
+ return rc;
+
+ /* enable link down and comm ready interrupt */
+ val = LAN87XX_MASK_LINK_DOWN;
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ if (rc < 0)
+ return rc;
+
+ val = LAN87XX_MASK_COMM_RDY;
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
} else {
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
- if (rc)
+ if (rc < 0)
return rc;
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
}
return rc < 0 ? rc : 0;
@@ -444,6 +486,14 @@ static irqreturn_t lan87xx_handle_interrupt(struct phy_device *phydev)
{
int irq_status;
+ irq_status = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
irq_status = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
if (irq_status < 0) {
phy_error(phydev);
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 29b1df03f3e8..a87a4b3ffce4 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -190,44 +190,42 @@ EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced);
*/
static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
{
+ u16 adv_l_mask, adv_l = 0;
+ u16 adv_m_mask, adv_m = 0;
int changed = 0;
- u16 adv_l = 0;
- u16 adv_m = 0;
int ret;
+ adv_l_mask = MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP |
+ MDIO_AN_T1_ADV_L_PAUSE_ASYM;
+ adv_m_mask = MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L;
+
switch (phydev->master_slave_set) {
case MASTER_SLAVE_CFG_MASTER_FORCE:
+ adv_m |= MDIO_AN_T1_ADV_M_MST;
+ fallthrough;
case MASTER_SLAVE_CFG_SLAVE_FORCE:
adv_l |= MDIO_AN_T1_ADV_L_FORCE_MS;
break;
case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ adv_m |= MDIO_AN_T1_ADV_M_MST;
+ fallthrough;
case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
break;
case MASTER_SLAVE_CFG_UNKNOWN:
case MASTER_SLAVE_CFG_UNSUPPORTED:
- return 0;
+ /* if master/slave role is not specified, do not overwrite it */
+ adv_l_mask &= ~MDIO_AN_T1_ADV_L_FORCE_MS;
+ adv_m_mask &= ~MDIO_AN_T1_ADV_M_MST;
+ break;
default:
phydev_warn(phydev, "Unsupported Master/Slave mode\n");
return -EOPNOTSUPP;
}
- switch (phydev->master_slave_set) {
- case MASTER_SLAVE_CFG_MASTER_FORCE:
- case MASTER_SLAVE_CFG_MASTER_PREFERRED:
- adv_m |= MDIO_AN_T1_ADV_M_MST;
- break;
- case MASTER_SLAVE_CFG_SLAVE_FORCE:
- case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
- break;
- default:
- break;
- }
-
adv_l |= linkmode_adv_to_mii_t1_adv_l_t(phydev->advertising);
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_L,
- (MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP
- | MDIO_AN_T1_ADV_L_PAUSE_ASYM), adv_l);
+ adv_l_mask, adv_l);
if (ret < 0)
return ret;
if (ret > 0)
@@ -236,7 +234,7 @@ static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
adv_m |= linkmode_adv_to_mii_t1_adv_m_t(phydev->advertising);
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_M,
- MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L, adv_m);
+ adv_m_mask, adv_m);
if (ret < 0)
return ret;
if (ret > 0)
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index a74b320f5b27..12ff276b80ae 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -316,6 +316,12 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
phydev->suspended_by_mdio_bus = 0;
+ /* If we manged to get here with the PHY state machine in a state neither
+ * PHY_HALTED nor PHY_READY this is an indication that something went wrong
+ * and we should most likely be using MAC managed PM and we are not.
+ */
+ WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY);
+
ret = phy_init_hw(phydev);
if (ret < 0)
return ret;
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index dafd3e9ebbf8..c8791e9b451d 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1111,7 +1111,7 @@ plip_open(struct net_device *dev)
/* Any address will do - we take the first. We already
have the first two bytes filled with 0xfc, from
plip_init_dev(). */
- const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
+ const struct in_ifaddr *ifa = rtnl_dereference(in_dev->ifa_list);
if (ifa != NULL) {
dev_addr_mod(dev, 2, &ifa->ifa_local, 4);
}
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index c3d42062559d..9e75ed3f08ce 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -716,10 +716,20 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb_reset_mac_header(skb);
skb->protocol = eth_hdr(skb)->h_proto;
+ rcu_read_lock();
+ tap = rcu_dereference(q->tap);
+ if (!tap) {
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return total_len;
+ }
+ skb->dev = tap->dev;
+
if (vnet_hdr_len) {
err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
tap_is_little_endian(q));
if (err) {
+ rcu_read_unlock();
drop_reason = SKB_DROP_REASON_DEV_HDR;
goto err_kfree;
}
@@ -732,8 +742,6 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
- rcu_read_lock();
- tap = rcu_dereference(q->tap);
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
skb_zcopy_init(skb, msg_control);
@@ -742,14 +750,8 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
uarg->callback(NULL, uarg, false);
}
- if (tap) {
- skb->dev = tap->dev;
- dev_queue_xmit(skb);
- } else {
- kfree_skb(skb);
- }
+ dev_queue_xmit(skb);
rcu_read_unlock();
-
return total_len;
err_kfree:
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index aac133a1e27a..154a3c0a6dfd 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1275,10 +1275,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
}
}
- netif_addr_lock_bh(dev);
- dev_uc_sync_multiple(port_dev, dev);
- dev_mc_sync_multiple(port_dev, dev);
- netif_addr_unlock_bh(dev);
+ if (dev->flags & IFF_UP) {
+ netif_addr_lock_bh(dev);
+ dev_uc_sync_multiple(port_dev, dev);
+ dev_mc_sync_multiple(port_dev, dev);
+ netif_addr_unlock_bh(dev);
+ }
port->index = -1;
list_add_tail_rcu(&port->list, &team->port_list);
@@ -1349,8 +1351,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
netdev_rx_handler_unregister(port_dev);
team_port_disable_netpoll(port);
vlan_vids_del_by_dev(port_dev, dev);
- dev_uc_unsync(port_dev, dev);
- dev_mc_unsync(port_dev, dev);
+ if (dev->flags & IFF_UP) {
+ dev_uc_unsync(port_dev, dev);
+ dev_mc_unsync(port_dev, dev);
+ }
dev_close(port_dev);
team_port_leave(team, port);
@@ -1700,6 +1704,14 @@ static int team_open(struct net_device *dev)
static int team_close(struct net_device *dev)
{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ dev_uc_unsync(port->dev, dev);
+ dev_mc_unsync(port->dev, dev);
+ }
+
return 0;
}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 0ad468a00064..aff39bf3161d 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1680,7 +1680,7 @@ static const struct driver_info ax88179_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1693,7 +1693,7 @@ static const struct driver_info ax88178a_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1706,7 +1706,7 @@ static const struct driver_info cypress_GX3_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1719,7 +1719,7 @@ static const struct driver_info dlink_dub1312_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1732,7 +1732,7 @@ static const struct driver_info sitecom_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1745,7 +1745,7 @@ static const struct driver_info samsung_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1758,7 +1758,7 @@ static const struct driver_info lenovo_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1771,7 +1771,7 @@ static const struct driver_info belkin_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1784,7 +1784,7 @@ static const struct driver_info toshiba_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1797,7 +1797,7 @@ static const struct driver_info mct_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1810,7 +1810,7 @@ static const struct driver_info at_umc2000_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1823,7 +1823,7 @@ static const struct driver_info at_umc200_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1836,7 +1836,7 @@ static const struct driver_info at_umc2000sp_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 2de09ad5bac0..e11f70911acc 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -777,6 +777,13 @@ static const struct usb_device_id products[] = {
},
#endif
+/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 571a399c195d..0cb187def5bc 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1087,6 +1087,7 @@ static const struct usb_device_id products[] = {
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0801)}, /* Quectel RM520N */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1390,6 +1391,8 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
{QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */
{QMI_FIXED_INTF(0x1e2d, 0x00b9, 0)}, /* Cinterion MV31 RmNet based on new baseline */
+ {QMI_FIXED_INTF(0x1e2d, 0x00f3, 0)}, /* Cinterion MV32-W-A RmNet */
+ {QMI_FIXED_INTF(0x1e2d, 0x00f4, 0)}, /* Cinterion MV32-W-B RmNet */
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0f6efaabaa32..688905ea0a6d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -770,6 +770,7 @@ enum rtl8152_flags {
RX_EPROTO,
};
+#define DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK 0x3054
#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
#define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
@@ -5906,6 +5907,11 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ /* RX FIFO settings for OOB */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
+
rtl_disable(tp);
rtl_reset_bmu(tp);
@@ -6431,21 +6437,8 @@ static void r8156_fc_parameter(struct r8152 *tp)
u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
- switch (tp->version) {
- case RTL_VER_10:
- case RTL_VER_11:
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 8);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 8);
- break;
- case RTL_VER_12:
- case RTL_VER_13:
- case RTL_VER_15:
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
- break;
- default:
- break;
- }
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
}
static void rtl8156_change_mtu(struct r8152 *tp)
@@ -6557,6 +6550,11 @@ static void rtl8156_down(struct r8152 *tp)
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ /* RX FIFO settings for OOB */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 64 / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 1024 / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 4096 / 16);
+
rtl_disable(tp);
rtl_reset_bmu(tp);
@@ -9584,6 +9582,7 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
if (vendor_id == VENDOR_ID_LENOVO) {
switch (product_id) {
+ case DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK:
case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3:
@@ -9831,6 +9830,7 @@ static const struct usb_device_id rtl8152_table[] = {
REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082),
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 2cb833b3006a..466da01ba2e3 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -312,7 +312,6 @@ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
- struct netdev_queue *queue = NULL;
struct veth_rq *rq = NULL;
struct net_device *rcv;
int length = skb->len;
@@ -330,7 +329,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
rxq = skb_get_queue_mapping(skb);
if (rxq < rcv->real_num_rx_queues) {
rq = &rcv_priv->rq[rxq];
- queue = netdev_get_tx_queue(dev, rxq);
/* The napi pointer is available when an XDP program is
* attached or when GRO is enabled
@@ -342,8 +340,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
- if (queue)
- txq_trans_cond_update(queue);
if (!use_napi)
dev_lstats_add(dev, length);
} else {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ec8e1b3108c3..9cce7dec7366 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -135,6 +135,9 @@ struct send_queue {
struct virtnet_sq_stats stats;
struct napi_struct napi;
+
+ /* Record whether sq is in reset state. */
+ bool reset;
};
/* Internal representation of a receive virtqueue */
@@ -267,6 +270,12 @@ struct virtnet_info {
u8 duplex;
u32 speed;
+ /* Interrupt coalescing settings */
+ u32 tx_usecs;
+ u32 rx_usecs;
+ u32 tx_max_packets;
+ u32 rx_max_packets;
+
unsigned long guest_offloads;
unsigned long guest_offloads_capable;
@@ -284,6 +293,9 @@ struct padded_vnet_hdr {
char padding[12];
};
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+
static bool is_xdp_frame(void *ptr)
{
return (unsigned long)ptr & VIRTIO_XDP_FLAG;
@@ -1057,8 +1069,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
case XDP_TX:
stats->xdp_tx++;
xdpf = xdp_convert_buff_to_frame(&xdp);
- if (unlikely(!xdpf))
+ if (unlikely(!xdpf)) {
+ if (unlikely(xdp_page != page))
+ put_page(xdp_page);
goto err_xdp;
+ }
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
if (unlikely(!err)) {
xdp_return_frame_rx_napi(xdpf);
@@ -1196,7 +1211,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
if (!hdr_hash || !skb)
return;
- switch ((int)hdr_hash->hash_report) {
+ switch (__le16_to_cpu(hdr_hash->hash_report)) {
case VIRTIO_NET_HASH_REPORT_TCPv4:
case VIRTIO_NET_HASH_REPORT_UDPv4:
case VIRTIO_NET_HASH_REPORT_TCPv6:
@@ -1214,7 +1229,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
default:
rss_hash_type = PKT_HASH_TYPE_NONE;
}
- skb_set_hash(skb, (unsigned int)hdr_hash->hash_value, rss_hash_type);
+ skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
}
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
@@ -1625,6 +1640,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
return;
if (__netif_tx_trylock(txq)) {
+ if (sq->reset) {
+ __netif_tx_unlock(txq);
+ return;
+ }
+
do {
virtqueue_disable_cb(sq->vq);
free_old_xmit_skbs(sq, true);
@@ -1872,6 +1892,70 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int virtnet_rx_resize(struct virtnet_info *vi,
+ struct receive_queue *rq, u32 ring_num)
+{
+ bool running = netif_running(vi->dev);
+ int err, qindex;
+
+ qindex = rq - vi->rq;
+
+ if (running)
+ napi_disable(&rq->napi);
+
+ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
+
+ if (!try_fill_recv(vi, rq, GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
+
+ if (running)
+ virtnet_napi_enable(rq->vq, &rq->napi);
+ return err;
+}
+
+static int virtnet_tx_resize(struct virtnet_info *vi,
+ struct send_queue *sq, u32 ring_num)
+{
+ bool running = netif_running(vi->dev);
+ struct netdev_queue *txq;
+ int err, qindex;
+
+ qindex = sq - vi->sq;
+
+ if (running)
+ virtnet_napi_tx_disable(&sq->napi);
+
+ txq = netdev_get_tx_queue(vi->dev, qindex);
+
+ /* 1. wait all ximt complete
+ * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
+ */
+ __netif_tx_lock_bh(txq);
+
+ /* Prevent rx poll from accessing sq. */
+ sq->reset = true;
+
+ /* Prevent the upper layer from trying to send packets. */
+ netif_stop_subqueue(vi->dev, qindex);
+
+ __netif_tx_unlock_bh(txq);
+
+ err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+
+ __netif_tx_lock_bh(txq);
+ sq->reset = false;
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock_bh(txq);
+
+ if (running)
+ virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+ return err;
+}
+
/*
* Send command via the control virtqueue and check status. Commands
* supported by the hypervisor, as indicated by feature bits, should
@@ -2282,10 +2366,57 @@ static void virtnet_get_ringparam(struct net_device *dev,
{
struct virtnet_info *vi = netdev_priv(dev);
- ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
- ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
- ring->rx_pending = ring->rx_max_pending;
- ring->tx_pending = ring->tx_max_pending;
+ ring->rx_max_pending = vi->rq[0].vq->num_max;
+ ring->tx_max_pending = vi->sq[0].vq->num_max;
+ ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+}
+
+static int virtnet_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ u32 rx_pending, tx_pending;
+ struct receive_queue *rq;
+ struct send_queue *sq;
+ int i, err;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+
+ if (ring->rx_pending == rx_pending &&
+ ring->tx_pending == tx_pending)
+ return 0;
+
+ if (ring->rx_pending > vi->rq[0].vq->num_max)
+ return -EINVAL;
+
+ if (ring->tx_pending > vi->sq[0].vq->num_max)
+ return -EINVAL;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ rq = vi->rq + i;
+ sq = vi->sq + i;
+
+ if (ring->tx_pending != tx_pending) {
+ err = virtnet_tx_resize(vi, sq, ring->tx_pending);
+ if (err)
+ return err;
+ }
+
+ if (ring->rx_pending != rx_pending) {
+ err = virtnet_rx_resize(vi, rq, ring->rx_pending);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
}
static bool virtnet_commit_rss_command(struct virtnet_info *vi)
@@ -2615,27 +2746,89 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
return 0;
}
+static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ struct ethtool_coalesce *ec)
+{
+ struct scatterlist sgs_tx, sgs_rx;
+ struct virtio_net_ctrl_coal_tx coal_tx;
+ struct virtio_net_ctrl_coal_rx coal_rx;
+
+ coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+ coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+ sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
+ &sgs_tx))
+ return -EINVAL;
+
+ /* Save parameters */
+ vi->tx_usecs = ec->tx_coalesce_usecs;
+ vi->tx_max_packets = ec->tx_max_coalesced_frames;
+
+ coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+ coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+ sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
+ &sgs_rx))
+ return -EINVAL;
+
+ /* Save parameters */
+ vi->rx_usecs = ec->rx_coalesce_usecs;
+ vi->rx_max_packets = ec->rx_max_coalesced_frames;
+
+ return 0;
+}
+
+static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
+{
+ /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
+ * feature is negotiated.
+ */
+ if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
+ return -EOPNOTSUPP;
+
+ if (ec->tx_max_coalesced_frames > 1 ||
+ ec->rx_max_coalesced_frames != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
static int virtnet_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
- int i, napi_weight;
-
- if (ec->tx_max_coalesced_frames > 1 ||
- ec->rx_max_coalesced_frames != 1)
- return -EINVAL;
+ int ret, i, napi_weight;
+ bool update_napi = false;
+ /* Can't change NAPI weight if the link is up */
napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
if (napi_weight ^ vi->sq[0].napi.weight) {
if (dev->flags & IFF_UP)
return -EBUSY;
+ else
+ update_napi = true;
+ }
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
+ ret = virtnet_send_notf_coal_cmds(vi, ec);
+ else
+ ret = virtnet_coal_params_supported(ec);
+
+ if (ret)
+ return ret;
+
+ if (update_napi) {
for (i = 0; i < vi->max_queue_pairs; i++)
vi->sq[i].napi.weight = napi_weight;
}
- return 0;
+ return ret;
}
static int virtnet_get_coalesce(struct net_device *dev,
@@ -2643,16 +2836,19 @@ static int virtnet_get_coalesce(struct net_device *dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ethtool_coalesce ec_default = {
- .cmd = ETHTOOL_GCOALESCE,
- .rx_max_coalesced_frames = 1,
- };
struct virtnet_info *vi = netdev_priv(dev);
- memcpy(ec, &ec_default, sizeof(ec_default));
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ ec->rx_coalesce_usecs = vi->rx_usecs;
+ ec->tx_coalesce_usecs = vi->tx_usecs;
+ ec->tx_max_coalesced_frames = vi->tx_max_packets;
+ ec->rx_max_coalesced_frames = vi->rx_max_packets;
+ } else {
+ ec->rx_max_coalesced_frames = 1;
- if (vi->sq[0].napi.weight)
- ec->tx_max_coalesced_frames = 1;
+ if (vi->sq[0].napi.weight)
+ ec->tx_max_coalesced_frames = 1;
+ }
return 0;
}
@@ -2771,10 +2967,12 @@ static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
}
static const struct ethtool_ops virtnet_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS,
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
+ .set_ringparam = virtnet_set_ringparam,
.get_strings = virtnet_get_strings,
.get_sset_count = virtnet_get_sset_count,
.get_ethtool_stats = virtnet_get_ethtool_stats,
@@ -3168,6 +3366,27 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ if (!is_xdp_frame(buf))
+ dev_kfree_skb(buf);
+ else
+ xdp_return_frame(ptr_to_xdp(buf));
+}
+
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ struct virtnet_info *vi = vq->vdev->priv;
+ int i = vq2rxq(vq);
+
+ if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(&vi->rq[i], buf);
+ else
+ put_page(virt_to_head_page(buf));
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -3175,26 +3394,14 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (!is_xdp_frame(buf))
- dev_kfree_skb(buf);
- else
- xdp_return_frame(ptr_to_xdp(buf));
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_sq_free_unused_buf(vq, buf);
}
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->rq[i].vq;
-
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->mergeable_rx_bufs) {
- put_page(virt_to_head_page(buf));
- } else if (vi->big_packets) {
- give_pages(&vi->rq[i], buf);
- } else {
- put_page(virt_to_head_page(buf));
- }
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_rq_free_unused_buf(vq, buf);
}
}
@@ -3441,6 +3648,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
"VIRTIO_NET_F_CTRL_VQ") ||
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
"VIRTIO_NET_F_CTRL_VQ"))) {
return false;
}
@@ -3577,6 +3786,13 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ vi->rx_usecs = 0;
+ vi->tx_usecs = 0;
+ vi->tx_max_packets = 0;
+ vi->rx_max_packets = 0;
+ }
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
vi->has_rss_hash_report = true;
@@ -3811,7 +4027,7 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_CTRL_MAC_ADDR, \
VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
- VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT
+ VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL
static unsigned int features[] = {
VIRTNET_FEATURES,
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 90811ab851fd..c3285242f74f 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2321,7 +2321,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowi6_oif = oif;
fl6.daddr = *daddr;
fl6.saddr = *saddr;
- fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
+ fl6.flowlabel = ip6_make_flowinfo(tos, label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
fl6.fl6_dport = dport;
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index d0f3b6d7f408..5c804bcabfe6 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
if (attrs[WGPEER_A_ENDPOINT]) {
struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
+ struct endpoint endpoint = { { { 0 } } };
- if ((len == sizeof(struct sockaddr_in) &&
- addr->sa_family == AF_INET) ||
- (len == sizeof(struct sockaddr_in6) &&
- addr->sa_family == AF_INET6)) {
- struct endpoint endpoint = { { { 0 } } };
-
- memcpy(&endpoint.addr, addr, len);
+ if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
+ endpoint.addr4 = *(struct sockaddr_in *)addr;
+ wg_socket_set_peer_endpoint(peer, &endpoint);
+ } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
+ endpoint.addr6 = *(struct sockaddr_in6 *)addr;
wg_socket_set_peer_endpoint(peer, &endpoint);
}
}
diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
index ba87d294604f..d4bb40a695ab 100644
--- a/drivers/net/wireguard/selftest/ratelimiter.c
+++ b/drivers/net/wireguard/selftest/ratelimiter.c
@@ -6,29 +6,28 @@
#ifdef DEBUG
#include <linux/jiffies.h>
-#include <linux/hrtimer.h>
static const struct {
bool result;
- u64 nsec_to_sleep_before;
+ unsigned int msec_to_sleep_before;
} expected_results[] __initconst = {
[0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
[PACKETS_BURSTABLE] = { false, 0 },
- [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND },
+ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
[PACKETS_BURSTABLE + 2] = { false, 0 },
- [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
+ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
[PACKETS_BURSTABLE + 4] = { true, 0 },
[PACKETS_BURSTABLE + 5] = { false, 0 }
};
static __init unsigned int maximum_jiffies_at_index(int index)
{
- u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3;
+ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
int i;
for (i = 0; i <= index; ++i)
- total_nsecs += expected_results[i].nsec_to_sleep_before;
- return nsecs_to_jiffies(total_nsecs);
+ total_msecs += expected_results[i].msec_to_sleep_before;
+ return msecs_to_jiffies(total_msecs);
}
static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
@@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
loop_start_time = jiffies;
for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
- if (expected_results[i].nsec_to_sleep_before) {
- ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3),
- ns_to_ktime(expected_results[i].nsec_to_sleep_before));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME);
- }
+ if (expected_results[i].msec_to_sleep_before)
+ msleep(expected_results[i].msec_to_sleep_before);
if (time_is_before_jiffies(loop_start_time +
maximum_jiffies_at_index(i)))
@@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void)
if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
return true;
- BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0);
+ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
if (wg_ratelimiter_init())
goto out;
@@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void)
++test;
#endif
- for (trials = TRIALS_BEFORE_GIVING_UP;;) {
+ for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) {
int test_count = 0, ret;
ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 4714c86bb501..64e7a767d963 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -52,15 +52,12 @@ DECLARE_EVENT_CLASS(ath10k_log_event,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
- __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH10K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH10K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
@@ -92,16 +89,13 @@ TRACE_EVENT(ath10k_log_dbg,
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(unsigned int, level)
- __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->level = level;
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH10K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH10K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index a02e54735e88..76560587bea0 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -126,15 +126,12 @@ DECLARE_EVENT_CLASS(ath11k_log_event,
TP_STRUCT__entry(
__string(device, dev_name(ab->dev))
__string(driver, dev_driver_string(ab->dev))
- __dynamic_array(char, msg, ATH11K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ab->dev));
__assign_str(driver, dev_driver_string(ab->dev));
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH11K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH11K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
diff --git a/drivers/net/wireless/ath/ath6kl/trace.h b/drivers/net/wireless/ath/ath6kl/trace.h
index a3d3740419eb..231a94769ddb 100644
--- a/drivers/net/wireless/ath/ath6kl/trace.h
+++ b/drivers/net/wireless/ath/ath6kl/trace.h
@@ -253,13 +253,10 @@ DECLARE_EVENT_CLASS(ath6kl_log_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, ATH6KL_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH6KL_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH6KL_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -284,14 +281,11 @@ TRACE_EVENT(ath6kl_log_dbg,
TP_ARGS(level, vaf),
TP_STRUCT__entry(
__field(unsigned int, level)
- __dynamic_array(char, msg, ATH6KL_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH6KL_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH6KL_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/ath/trace.h b/drivers/net/wireless/ath/trace.h
index ba711644d27e..9935cf475b6d 100644
--- a/drivers/net/wireless/ath/trace.h
+++ b/drivers/net/wireless/ath/trace.h
@@ -40,16 +40,13 @@ TRACE_EVENT(ath_log,
TP_STRUCT__entry(
__string(device, wiphy_name(wiphy))
__string(driver, KBUILD_MODNAME)
- __dynamic_array(char, msg, ATH_DBG_MAX_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, wiphy_name(wiphy));
__assign_str(driver, KBUILD_MODNAME);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH_DBG_MAX_LEN,
- vaf->fmt,
- *vaf->va) >= ATH_DBG_MAX_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index 11c989e95880..201f44612c31 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -70,13 +70,10 @@ DECLARE_EVENT_CLASS(wil6210_log_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, WIL6210_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- WIL6210_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= WIL6210_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
index 338c66d0c5f8..5a139d7ed47a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
@@ -33,13 +33,11 @@ TRACE_EVENT(brcmf_err,
TP_ARGS(func, vaf),
TP_STRUCT__entry(
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
@@ -50,14 +48,12 @@ TRACE_EVENT(brcmf_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
index 0e8a69ab909f..488456420353 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
@@ -28,12 +28,10 @@ DECLARE_EVENT_CLASS(brcms_msg_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -64,14 +62,12 @@ TRACE_EVENT(brcms_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index c62f299b9e0a..d8a5dbf89a02 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
/* Repeat initial/next rate.
* For legacy IL_NUMBER_TRY == 1, this loop will not execute.
* For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0) {
+ while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) {
if (is_legacy(tbl_type.lq_type)) {
if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
ant_toggle_cnt++;
@@ -2422,8 +2422,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
cpu_to_le32(new_rate);
repeat_rate--;
idx++;
- if (idx >= LINK_QUAL_MAX_RETRY_NUM)
- goto out;
}
il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
@@ -2468,7 +2466,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
repeat_rate--;
}
-out:
lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index a647a406b87b..b20409f8c13a 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -140,6 +140,7 @@ config IWLMEI
depends on INTEL_MEI
depends on PM
depends on CFG80211
+ depends on BROKEN
help
Enables the iwlmei kernel module.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
index 7dd70011fd1e..1d6c292cf545 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
@@ -18,12 +18,10 @@ DECLARE_EVENT_CLASS(iwlwifi_msg_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -55,14 +53,12 @@ TRACE_EVENT(iwlwifi_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(function, function)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(function, function);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 5eb28f8ee87e..11536f115198 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1833,8 +1833,8 @@ static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
* If nss < MAX: we can set zeros in other streams
*/
if (nss > MAX_HE_SUPP_NSS) {
- IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
- MAX_HE_SUPP_NSS);
+ IWL_DEBUG_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
nss = MAX_HE_SUPP_NSS;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6e55f153ff26..1f301a5fb396 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -5060,6 +5060,10 @@ static int hwsim_virtio_handle_cmd(struct sk_buff *skb)
nlh = nlmsg_hdr(skb);
gnlh = nlmsg_data(nlh);
+
+ if (skb->len < nlh->nlmsg_len)
+ return -EINVAL;
+
err = genlmsg_parse(nlh, &hwsim_genl_family, tb, HWSIM_ATTR_MAX,
hwsim_genl_policy, NULL);
if (err) {
@@ -5102,7 +5106,8 @@ static void hwsim_virtio_rx_work(struct work_struct *work)
spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
skb->data = skb->head;
- skb_set_tail_pointer(skb, len);
+ skb_reset_tail_pointer(skb);
+ skb_put(skb, len);
hwsim_virtio_handle_cmd(skb);
spin_lock_irqsave(&hwsim_virtio_lock, flags);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 253cbc1956d1..6de13d641438 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -267,7 +267,8 @@ static void mt76_init_stream_cap(struct mt76_phy *phy,
}
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
- vht_cap->vht_mcs.tx_highest |=
+ if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
+ vht_cap->vht_mcs.tx_highest |=
cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index ad6c7d632eed..d6aae60c440d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -1088,7 +1088,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
offset %= 32;
val = mt76_rr(dev, addr);
- val >>= (tid % 32);
+ val >>= offset;
if (offset > 20) {
addr += 4;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index e1800674089a..576a0149251b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -261,7 +261,7 @@ int mt7921e_mac_reset(struct mt7921_dev *dev)
err = mt7921e_driver_own(dev);
if (err)
- return err;
+ goto out;
err = mt7921_run_firmware(dev);
if (err)
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index b89519ab9205..eb1d1ba3a443 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -635,7 +635,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
conn_info->req_ies_len = 0;
}
-inline void wilc_handle_disconnect(struct wilc_vif *vif)
+void wilc_handle_disconnect(struct wilc_vif *vif)
{
struct host_if_drv *hif_drv = vif->hif_drv;
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h
index 69ba1d469e9f..baa2881f4465 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.h
+++ b/drivers/net/wireless/microchip/wilc1000/hif.h
@@ -215,5 +215,6 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length);
void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct cfg80211_crypto_settings *crypto);
int wilc_set_default_mgmt_key_index(struct wilc_vif *vif, u8 index);
-inline void wilc_handle_disconnect(struct wilc_vif *vif);
+void wilc_handle_disconnect(struct wilc_vif *vif);
+
#endif
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index 43c085c74b7a..bb1a315a7b7e 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -245,6 +245,7 @@ struct wilc {
u8 *rx_buffer;
u32 rx_buffer_offset;
u8 *tx_buffer;
+ u32 *vmm_table;
struct txq_handle txq[NQUEUES];
int txq_entries;
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 600cc57e9da2..7390f94cd4ca 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -28,6 +28,7 @@ struct wilc_sdio {
u32 block_size;
bool isinit;
int has_thrpt_enh3;
+ u8 *cmd53_buf;
};
struct sdio_cmd52 {
@@ -47,6 +48,7 @@ struct sdio_cmd53 {
u32 count: 9;
u8 *buffer;
u32 block_size;
+ bool use_global_buf;
};
static const struct wilc_hif_func wilc_hif_sdio;
@@ -91,6 +93,8 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
{
struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev);
int size, ret;
+ struct wilc_sdio *sdio_priv = wilc->bus_data;
+ u8 *buf = cmd->buffer;
sdio_claim_host(func);
@@ -101,12 +105,23 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
else
size = cmd->count;
+ if (cmd->use_global_buf) {
+ if (size > sizeof(u32))
+ return -EINVAL;
+
+ buf = sdio_priv->cmd53_buf;
+ }
+
if (cmd->read_write) { /* write */
- ret = sdio_memcpy_toio(func, cmd->address,
- (void *)cmd->buffer, size);
+ if (cmd->use_global_buf)
+ memcpy(buf, cmd->buffer, size);
+
+ ret = sdio_memcpy_toio(func, cmd->address, buf, size);
} else { /* read */
- ret = sdio_memcpy_fromio(func, (void *)cmd->buffer,
- cmd->address, size);
+ ret = sdio_memcpy_fromio(func, buf, cmd->address, size);
+
+ if (cmd->use_global_buf)
+ memcpy(cmd->buffer, buf, size);
}
sdio_release_host(func);
@@ -128,6 +143,12 @@ static int wilc_sdio_probe(struct sdio_func *func,
if (!sdio_priv)
return -ENOMEM;
+ sdio_priv->cmd53_buf = kzalloc(sizeof(u32), GFP_KERNEL);
+ if (!sdio_priv->cmd53_buf) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
ret = wilc_cfg80211_init(&wilc, &func->dev, WILC_HIF_SDIO,
&wilc_hif_sdio);
if (ret)
@@ -161,6 +182,7 @@ dispose_irq:
irq_dispose_mapping(wilc->dev_irq_num);
wilc_netdev_cleanup(wilc);
free:
+ kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
return ret;
}
@@ -172,6 +194,7 @@ static void wilc_sdio_remove(struct sdio_func *func)
clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
+ kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
}
@@ -375,8 +398,9 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
cmd.address = WILC_SDIO_FBR_DATA_REG;
cmd.block_mode = 0;
cmd.increment = 1;
- cmd.count = 4;
+ cmd.count = sizeof(u32);
cmd.buffer = (u8 *)&data;
+ cmd.use_global_buf = true;
cmd.block_size = sdio_priv->block_size;
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret)
@@ -414,6 +438,7 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
nblk = size / block_size;
nleft = size % block_size;
+ cmd.use_global_buf = false;
if (nblk > 0) {
cmd.block_mode = 1;
cmd.increment = 1;
@@ -492,8 +517,9 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
cmd.address = WILC_SDIO_FBR_DATA_REG;
cmd.block_mode = 0;
cmd.increment = 1;
- cmd.count = 4;
+ cmd.count = sizeof(u32);
cmd.buffer = (u8 *)data;
+ cmd.use_global_buf = true;
cmd.block_size = sdio_priv->block_size;
ret = wilc_sdio_cmd53(wilc, &cmd);
@@ -535,6 +561,7 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
nblk = size / block_size;
nleft = size % block_size;
+ cmd.use_global_buf = false;
if (nblk > 0) {
cmd.block_mode = 1;
cmd.increment = 1;
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 947d9a0a494e..58bbf50081e4 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -714,7 +714,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
int ret = 0;
int counter;
int timeout;
- u32 vmm_table[WILC_VMM_TBL_SIZE];
+ u32 *vmm_table = wilc->vmm_table;
u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0};
const struct wilc_hif_func *func;
int srcu_idx;
@@ -1252,6 +1252,8 @@ void wilc_wlan_cleanup(struct net_device *dev)
while ((rqe = wilc_wlan_rxq_remove(wilc)))
kfree(rqe);
+ kfree(wilc->vmm_table);
+ wilc->vmm_table = NULL;
kfree(wilc->rx_buffer);
wilc->rx_buffer = NULL;
kfree(wilc->tx_buffer);
@@ -1489,6 +1491,14 @@ int wilc_wlan_init(struct net_device *dev)
goto fail;
}
+ if (!wilc->vmm_table)
+ wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
+
+ if (!wilc->vmm_table) {
+ ret = -ENOBUFS;
+ goto fail;
+ }
+
if (!wilc->tx_buffer)
wilc->tx_buffer = kmalloc(WILC_TX_BUFF_SIZE, GFP_KERNEL);
@@ -1513,7 +1523,8 @@ int wilc_wlan_init(struct net_device *dev)
return 0;
fail:
-
+ kfree(wilc->vmm_table);
+ wilc->vmm_table = NULL;
kfree(wilc->rx_buffer);
wilc->rx_buffer = NULL;
kfree(wilc->tx_buffer);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 990360d75cb6..e85b3c5d4acc 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -256,7 +256,6 @@ static void backend_disconnect(struct backend_info *be)
unsigned int queue_index;
xen_unregister_watchers(vif);
- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(vif);
#endif /* CONFIG_DEBUG_FS */
@@ -984,6 +983,7 @@ static int netback_remove(struct xenbus_device *dev)
struct backend_info *be = dev_get_drvdata(&dev->dev);
unregister_hotplug_status_watch(be);
+ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
backend_disconnect(be);
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index 2caf997f9bc9..07596bf5f7d6 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -310,6 +310,7 @@ static void pn532_uart_remove(struct serdev_device *serdev)
pn53x_unregister_nfc(pn532->priv);
serdev_device_close(serdev);
pn53x_common_clean(pn532->priv);
+ del_timer_sync(&pn532->cmd_timeout);
kfree_skb(pn532->recv_skb);
kfree(pn532);
}
diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c
index b019755e4e21..3ece49cb18ff 100644
--- a/drivers/ntb/hw/epf/ntb_hw_epf.c
+++ b/drivers/ntb/hw/epf/ntb_hw_epf.c
@@ -45,7 +45,6 @@
#define NTB_EPF_MIN_DB_COUNT 3
#define NTB_EPF_MAX_DB_COUNT 31
-#define NTB_EPF_MW_OFFSET 2
#define NTB_EPF_COMMAND_TIMEOUT 1000 /* 1 Sec */
@@ -67,6 +66,7 @@ struct ntb_epf_dev {
enum pci_barno ctrl_reg_bar;
enum pci_barno peer_spad_reg_bar;
enum pci_barno db_reg_bar;
+ enum pci_barno mw_bar;
unsigned int mw_count;
unsigned int spad_count;
@@ -92,6 +92,8 @@ struct ntb_epf_data {
enum pci_barno peer_spad_reg_bar;
/* BAR that contains Doorbell region and Memory window '1' */
enum pci_barno db_reg_bar;
+ /* BAR that contains memory windows*/
+ enum pci_barno mw_bar;
};
static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command,
@@ -411,7 +413,7 @@ static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return -EINVAL;
}
- bar = idx + NTB_EPF_MW_OFFSET;
+ bar = idx + ndev->mw_bar;
mw_size = pci_resource_len(ntb->pdev, bar);
@@ -453,7 +455,7 @@ static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
if (idx == 0)
offset = readl(ndev->ctrl_reg + NTB_EPF_MW1_OFFSET);
- bar = idx + NTB_EPF_MW_OFFSET;
+ bar = idx + ndev->mw_bar;
if (base)
*base = pci_resource_start(ndev->ntb.pdev, bar) + offset;
@@ -565,6 +567,7 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
struct pci_dev *pdev)
{
struct device *dev = ndev->dev;
+ size_t spad_sz, spad_off;
int ret;
pci_set_drvdata(pdev, ndev);
@@ -599,10 +602,16 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
goto err_dma_mask;
}
- ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
- if (!ndev->peer_spad_reg) {
- ret = -EIO;
- goto err_dma_mask;
+ if (ndev->peer_spad_reg_bar) {
+ ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
+ if (!ndev->peer_spad_reg) {
+ ret = -EIO;
+ goto err_dma_mask;
+ }
+ } else {
+ spad_sz = 4 * readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT);
+ spad_off = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET);
+ ndev->peer_spad_reg = ndev->ctrl_reg + spad_off + spad_sz;
}
ndev->db_reg = pci_iomap(pdev, ndev->db_reg_bar, 0);
@@ -657,6 +666,7 @@ static int ntb_epf_pci_probe(struct pci_dev *pdev,
enum pci_barno peer_spad_reg_bar = BAR_1;
enum pci_barno ctrl_reg_bar = BAR_0;
enum pci_barno db_reg_bar = BAR_2;
+ enum pci_barno mw_bar = BAR_2;
struct device *dev = &pdev->dev;
struct ntb_epf_data *data;
struct ntb_epf_dev *ndev;
@@ -671,17 +681,16 @@ static int ntb_epf_pci_probe(struct pci_dev *pdev,
data = (struct ntb_epf_data *)id->driver_data;
if (data) {
- if (data->peer_spad_reg_bar)
- peer_spad_reg_bar = data->peer_spad_reg_bar;
- if (data->ctrl_reg_bar)
- ctrl_reg_bar = data->ctrl_reg_bar;
- if (data->db_reg_bar)
- db_reg_bar = data->db_reg_bar;
+ peer_spad_reg_bar = data->peer_spad_reg_bar;
+ ctrl_reg_bar = data->ctrl_reg_bar;
+ db_reg_bar = data->db_reg_bar;
+ mw_bar = data->mw_bar;
}
ndev->peer_spad_reg_bar = peer_spad_reg_bar;
ndev->ctrl_reg_bar = ctrl_reg_bar;
ndev->db_reg_bar = db_reg_bar;
+ ndev->mw_bar = mw_bar;
ndev->dev = dev;
ntb_epf_init_struct(ndev, pdev);
@@ -729,6 +738,14 @@ static const struct ntb_epf_data j721e_data = {
.ctrl_reg_bar = BAR_0,
.peer_spad_reg_bar = BAR_1,
.db_reg_bar = BAR_2,
+ .mw_bar = BAR_2,
+};
+
+static const struct ntb_epf_data mx8_data = {
+ .ctrl_reg_bar = BAR_0,
+ .peer_spad_reg_bar = BAR_0,
+ .db_reg_bar = BAR_2,
+ .mw_bar = BAR_4,
};
static const struct pci_device_id ntb_epf_pci_tbl[] = {
@@ -737,6 +754,11 @@ static const struct pci_device_id ntb_epf_pci_tbl[] = {
.class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
.driver_data = (kernel_ulong_t)&j721e_data,
},
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x0809),
+ .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
+ .driver_data = (kernel_ulong_t)&mx8_data,
+ },
{ },
};
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index 733557231ed0..0ed6f809ff2e 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2406,7 +2406,7 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
"\t%hhu.\t", idx);
else
off += scnprintf(strbuf + off, size - off,
- "\t%hhu-%hhu.\t", idx, idx + cnt - 1);
+ "\t%hhu-%d.\t", idx, idx + cnt - 1);
off += scnprintf(strbuf + off, size - off, "%s BAR%hhu, ",
idt_get_mw_name(data), ndev->mws[idx].bar);
@@ -2435,7 +2435,7 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
"\t%hhu.\t", idx);
else
off += scnprintf(strbuf + off, size - off,
- "\t%hhu-%hhu.\t", idx, idx + cnt - 1);
+ "\t%hhu-%d.\t", idx, idx + cnt - 1);
off += scnprintf(strbuf + off, size - off,
"%s BAR%hhu, ", idt_get_mw_name(data),
@@ -2480,7 +2480,7 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
int src;
data = idt_ntb_msg_read(&ndev->ntb, &src, idx);
off += scnprintf(strbuf + off, size - off,
- "\t%hhu. 0x%08x from peer %hhu (Port %hhu)\n",
+ "\t%hhu. 0x%08x from peer %d (Port %hhu)\n",
idx, data, src, ndev->peers[src].port);
}
off += scnprintf(strbuf + off, size - off, "\n");
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index e5f14e20a9ff..84772013812b 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -763,7 +763,7 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
else if (pdev_is_gen3(ndev->ntb.pdev))
return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
- else if (pdev_is_gen4(ndev->ntb.pdev))
+ else if (pdev_is_gen4(ndev->ntb.pdev) || pdev_is_gen5(ndev->ntb.pdev))
return ndev_ntb4_debugfs_read(filp, ubuf, count, offp);
return -ENXIO;
@@ -1874,7 +1874,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
rc = gen3_init_dev(ndev);
if (rc)
goto err_init_dev;
- } else if (pdev_is_gen4(pdev)) {
+ } else if (pdev_is_gen4(pdev) || pdev_is_gen5(pdev)) {
ndev->ntb.ops = &intel_ntb4_ops;
rc = intel_ntb_init_pci(ndev, pdev);
if (rc)
@@ -1904,7 +1904,8 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
err_register:
ndev_deinit_debugfs(ndev);
- if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || pdev_is_gen4(pdev))
+ if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) ||
+ pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
xeon_deinit_dev(ndev);
err_init_dev:
intel_ntb_deinit_pci(ndev);
@@ -1920,7 +1921,8 @@ static void intel_ntb_pci_remove(struct pci_dev *pdev)
ntb_unregister_device(&ndev->ntb);
ndev_deinit_debugfs(ndev);
- if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || pdev_is_gen4(pdev))
+ if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) ||
+ pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
xeon_deinit_dev(ndev);
intel_ntb_deinit_pci(ndev);
kfree(ndev);
@@ -2047,6 +2049,8 @@ static const struct pci_device_id intel_ntb_pci_tbl[] = {
/* GEN4 */
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_ICX)},
+ /* GEN5 PCIe */
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_GNR)},
{0}
};
MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c
index 4081fc538ff4..22cac7975b3c 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen4.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c
@@ -197,7 +197,7 @@ int gen4_init_dev(struct intel_ntb_dev *ndev)
ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
if (pdev_is_ICX(pdev))
ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
- else if (pdev_is_SPR(pdev))
+ else if (pdev_is_SPR(pdev) || pdev_is_gen5(pdev))
ndev->ntb.topo = spr_ppd_topo(ndev, ppd1);
dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1,
ntb_topo_string(ndev->ntb.topo));
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index b233d1c6ba2d..da4d5fe55bab 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -70,6 +70,7 @@
#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C
#define PCI_DEVICE_ID_INTEL_NTB_B2B_ICX 0x347e
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_GNR 0x0db4
/* Ntb control and link status */
#define NTB_CTL_CFG_LOCK BIT(0)
@@ -228,4 +229,10 @@ static inline int pdev_is_gen4(struct pci_dev *pdev)
return 0;
}
+
+static inline int pdev_is_gen5(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_GNR;
+}
+
#endif
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index b7bf3f863d79..5ee0afa621a9 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -367,14 +367,16 @@ static ssize_t tool_fn_write(struct tool_ctx *tc,
u64 bits;
int n;
+ if (*offp)
+ return 0;
+
buf = kmalloc(size + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = simple_write_to_buffer(buf, size, offp, ubuf, size);
- if (ret < 0) {
+ if (copy_from_user(buf, ubuf, size)) {
kfree(buf);
- return ret;
+ return -EFAULT;
}
buf[size] = 0;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index f36efcc11f67..7e88cd242380 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -453,6 +453,21 @@ static void pmem_release_disk(void *__pmem)
put_disk(pmem->disk);
}
+static int pmem_pagemap_memory_failure(struct dev_pagemap *pgmap,
+ unsigned long pfn, unsigned long nr_pages, int mf_flags)
+{
+ struct pmem_device *pmem =
+ container_of(pgmap, struct pmem_device, pgmap);
+ u64 offset = PFN_PHYS(pfn) - pmem->phys_addr - pmem->data_offset;
+ u64 len = nr_pages << PAGE_SHIFT;
+
+ return dax_holder_notify_failure(pmem->dax_dev, offset, len, mf_flags);
+}
+
+static const struct dev_pagemap_ops fsdax_pagemap_ops = {
+ .memory_failure = pmem_pagemap_memory_failure,
+};
+
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns)
{
@@ -514,6 +529,7 @@ static int pmem_attach_disk(struct device *dev,
pmem->pfn_flags = PFN_DEV;
if (is_nd_pfn(dev)) {
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
@@ -527,6 +543,7 @@ static int pmem_attach_disk(struct device *dev,
pmem->pgmap.range.end = res->end;
pmem->pgmap.nr_range = 1;
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index d976260eca7a..473a71bbd9c9 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -133,7 +133,8 @@ static void nd_region_release(struct device *dev)
put_device(&nvdimm->dev);
}
free_percpu(nd_region->lane);
- memregion_free(nd_region->id);
+ if (!test_bit(ND_REGION_CXL, &nd_region->flags))
+ memregion_free(nd_region->id);
kfree(nd_region);
}
@@ -982,9 +983,14 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (!nd_region)
return NULL;
- nd_region->id = memregion_alloc(GFP_KERNEL);
- if (nd_region->id < 0)
- goto err_id;
+ /* CXL pre-assigns memregion ids before creating nvdimm regions */
+ if (test_bit(ND_REGION_CXL, &ndr_desc->flags)) {
+ nd_region->id = ndr_desc->memregion;
+ } else {
+ nd_region->id = memregion_alloc(GFP_KERNEL);
+ if (nd_region->id < 0)
+ goto err_id;
+ }
nd_region->lane = alloc_percpu(struct nd_percpu_lane);
if (!nd_region->lane)
@@ -1043,9 +1049,10 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
return nd_region;
- err_percpu:
- memregion_free(nd_region->id);
- err_id:
+err_percpu:
+ if (!test_bit(ND_REGION_CXL, &ndr_desc->flags))
+ memregion_free(nd_region->id);
+err_id:
kfree(nd_region);
return NULL;
}
@@ -1068,6 +1075,13 @@ struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
}
EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
+void nvdimm_region_delete(struct nd_region *nd_region)
+{
+ if (nd_region)
+ nd_device_unregister(&nd_region->dev, ND_SYNC);
+}
+EXPORT_SYMBOL_GPL(nvdimm_region_delete);
+
int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
{
int rc = 0;
diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
index 995b6cdc67ed..20da455d2ef6 100644
--- a/drivers/nvdimm/virtio_pmem.c
+++ b/drivers/nvdimm/virtio_pmem.c
@@ -81,17 +81,24 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
ndr_desc.res = &res;
ndr_desc.numa_node = nid;
ndr_desc.flush = async_pmem_flush;
+ ndr_desc.provider_data = vdev;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
+ /*
+ * The NVDIMM region could be available before the
+ * virtio_device_ready() that is called by
+ * virtio_dev_probe(), so we set device ready here.
+ */
+ virtio_device_ready(vdev);
nd_region = nvdimm_pmem_region_create(vpmem->nvdimm_bus, &ndr_desc);
if (!nd_region) {
dev_err(&vdev->dev, "failed to create nvdimm region\n");
err = -ENXIO;
goto out_nd;
}
- nd_region->provider_data = dev_to_virtio(nd_region->dev.parent->parent);
return 0;
out_nd:
+ virtio_reset_device(vdev);
nvdimm_bus_unregister(vpmem->nvdimm_bus);
out_vq:
vdev->config->del_vqs(vdev);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 70ebf27ad10e..66446f1e06cf 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4198,7 +4198,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
- if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
+ if (ctrl->ops->supports_pci_p2pdma &&
+ ctrl->ops->supports_pci_p2pdma(ctrl))
blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
ns->ctrl = ctrl;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bdc0ff7ed9ab..1bdf714dcd9e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -504,7 +504,6 @@ struct nvme_ctrl_ops {
unsigned int flags;
#define NVME_F_FABRICS (1 << 0)
#define NVME_F_METADATA_SUPPORTED (1 << 1)
-#define NVME_F_PCI_P2PDMA (1 << 2)
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
@@ -514,6 +513,7 @@ struct nvme_ctrl_ops {
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
void (*print_device_info)(struct nvme_ctrl *ctrl);
+ bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
};
/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index ca1560240123..98864b853eef 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -230,11 +230,10 @@ struct nvme_iod {
bool use_sgl;
int aborted;
int npages; /* In the PRP list. 0 means small pool in use */
- int nents; /* Used in scatterlist */
dma_addr_t first_dma;
unsigned int dma_len; /* length of single DMA segment mapping */
dma_addr_t meta_dma;
- struct scatterlist *sg;
+ struct sg_table sgt;
};
static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
@@ -524,7 +523,7 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
static void **nvme_pci_iod_list(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
+ return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req));
}
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
@@ -576,17 +575,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
}
}
-static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req));
- else
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
-}
-
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -597,9 +585,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
return;
}
- WARN_ON_ONCE(!iod->nents);
+ WARN_ON_ONCE(!iod->sgt.nents);
+
+ dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
- nvme_unmap_sg(dev, req);
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
iod->first_dma);
@@ -607,7 +596,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
nvme_free_sgls(dev, req);
else
nvme_free_prps(dev, req);
- mempool_free(iod->sg, dev->iod_mempool);
+ mempool_free(iod->sgt.sgl, dev->iod_mempool);
}
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
@@ -630,7 +619,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
int length = blk_rq_payload_bytes(req);
- struct scatterlist *sg = iod->sg;
+ struct scatterlist *sg = iod->sgt.sgl;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
@@ -702,16 +691,16 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_len = sg_dma_len(sg);
}
done:
- cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
return BLK_STS_OK;
free_prps:
nvme_free_prps(dev, req);
return BLK_STS_RESOURCE;
bad_sgl:
- WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
+ WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
"Invalid SGL for payload:%d nents:%d\n",
- blk_rq_payload_bytes(req), iod->nents);
+ blk_rq_payload_bytes(req), iod->sgt.nents);
return BLK_STS_IOERR;
}
@@ -737,12 +726,13 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
}
static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
- struct request *req, struct nvme_rw_command *cmd, int entries)
+ struct request *req, struct nvme_rw_command *cmd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
struct nvme_sgl_desc *sg_list;
- struct scatterlist *sg = iod->sg;
+ struct scatterlist *sg = iod->sgt.sgl;
+ unsigned int entries = iod->sgt.nents;
dma_addr_t sgl_dma;
int i = 0;
@@ -840,7 +830,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret = BLK_STS_RESOURCE;
- int nr_mapped;
+ int rc;
if (blk_rq_nr_phys_segments(req) == 1) {
struct bio_vec bv = req_bvec(req);
@@ -858,26 +848,25 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
}
iod->dma_len = 0;
- iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
- if (!iod->sg)
+ iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
+ if (!iod->sgt.sgl)
return BLK_STS_RESOURCE;
- sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
- iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
- if (!iod->nents)
+ sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
+ iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
+ if (!iod->sgt.orig_nents)
goto out_free_sg;
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
- iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
- else
- nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req), DMA_ATTR_NO_WARN);
- if (!nr_mapped)
+ rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
+ DMA_ATTR_NO_WARN);
+ if (rc) {
+ if (rc == -EREMOTEIO)
+ ret = BLK_STS_TARGET;
goto out_free_sg;
+ }
iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl)
- ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
+ ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
if (ret != BLK_STS_OK)
@@ -885,9 +874,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
return BLK_STS_OK;
out_unmap_sg:
- nvme_unmap_sg(dev, req);
+ dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
out_free_sg:
- mempool_free(iod->sg, dev->iod_mempool);
+ mempool_free(iod->sgt.sgl, dev->iod_mempool);
return ret;
}
@@ -911,7 +900,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
iod->aborted = 0;
iod->npages = -1;
- iod->nents = 0;
+ iod->sgt.nents = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret)
@@ -2992,7 +2981,6 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
}
-
static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
{
struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
@@ -3007,11 +2995,17 @@ static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
subsys->firmware_rev);
}
+static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+
+ return dma_pci_p2pdma_supported(dev->dev);
+}
+
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie",
.module = THIS_MODULE,
- .flags = NVME_F_METADATA_SUPPORTED |
- NVME_F_PCI_P2PDMA,
+ .flags = NVME_F_METADATA_SUPPORTED,
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64,
@@ -3019,6 +3013,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.submit_async_event = nvme_pci_submit_async_event,
.get_address = nvme_pci_get_address,
.print_device_info = nvme_pci_print_device_info,
+ .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
};
static int nvme_dev_map(struct nvme_dev *dev)
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 09fdcac87d17..4597bca43a6d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -415,7 +415,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
goto out_free_rsp;
- if (!ib_uses_virt_dma(ndev->device))
+ if (ib_dma_pci_p2p_dma_supported(ndev->device))
r->req.p2p_client = &ndev->device->dev;
r->send_sge.length = sizeof(*r->req.cqe);
r->send_sge.lkey = ndev->pd->local_dma_lkey;
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 94f017d808c4..96f0a12e507c 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -1045,26 +1045,29 @@ phys_addr_t __init of_dma_get_max_cpu_address(struct device_node *np)
*
* It returns true if "dma-coherent" property was found
* for this device in the DT, or if DMA is coherent by
- * default for OF devices on the current platform.
+ * default for OF devices on the current platform and no
+ * "dma-noncoherent" property was found for this device.
*/
bool of_dma_is_coherent(struct device_node *np)
{
struct device_node *node;
-
- if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
- return true;
+ bool is_coherent = IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT);
node = of_node_get(np);
while (node) {
if (of_property_read_bool(node, "dma-coherent")) {
- of_node_put(node);
- return true;
+ is_coherent = true;
+ break;
+ }
+ if (of_property_read_bool(node, "dma-noncoherent")) {
+ is_coherent = false;
+ break;
}
node = of_get_next_dma_parent(node);
}
of_node_put(node);
- return false;
+ return is_coherent;
}
EXPORT_SYMBOL_GPL(of_dma_is_coherent);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index e02a30c92719..1c573e7a60bc 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -314,7 +314,7 @@ static int unflatten_dt_nodes(const void *blob,
for (offset = 0;
offset >= 0 && depth >= initial_depth;
offset = fdt_next_node(blob, offset, &depth)) {
- if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
+ if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
continue;
if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
@@ -529,7 +529,7 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
if (!nomap)
- kmemleak_alloc_phys(base, size, 0, 0);
+ kmemleak_alloc_phys(base, size, 0);
}
else
pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
@@ -1024,6 +1024,7 @@ int __init early_init_dt_scan_chosen_stdout(void)
int l;
const struct earlycon_id *match;
const void *fdt = initial_boot_params;
+ int ret;
offset = fdt_path_offset(fdt, "/chosen");
if (offset < 0)
@@ -1056,7 +1057,8 @@ int __init early_init_dt_scan_chosen_stdout(void)
if (fdt_node_check_compatible(fdt, offset, match->compatible))
continue;
- if (of_setup_earlycon(match, offset, options) == 0)
+ ret = of_setup_earlycon(match, offset, options);
+ if (!ret || ret == -EALREADY)
return 0;
}
return -ENODEV;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 84063eaebb91..77d1ba3a4154 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -13,11 +13,12 @@
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/slab.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
#include "opp.h"
@@ -36,6 +37,9 @@ DEFINE_MUTEX(opp_table_lock);
/* Flag indicating that opp_tables list is being updated at the moment */
static bool opp_tables_busy;
+/* OPP ID allocator */
+static DEFINE_XARRAY_ALLOC1(opp_configs);
+
static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table)
{
struct opp_device *opp_dev;
@@ -93,6 +97,18 @@ struct opp_table *_find_opp_table(struct device *dev)
return opp_table;
}
+/*
+ * Returns true if multiple clocks aren't there, else returns false with WARN.
+ *
+ * We don't force clk_count == 1 here as there are users who don't have a clock
+ * representation in the OPP table and manage the clock configuration themselves
+ * in an platform specific way.
+ */
+static bool assert_single_clk(struct opp_table *opp_table)
+{
+ return !WARN_ON(opp_table->clk_count > 1);
+}
+
/**
* dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
* @opp: opp for which voltage has to be returned for
@@ -114,6 +130,31 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
/**
+ * dev_pm_opp_get_supplies() - Gets the supply information corresponding to an opp
+ * @opp: opp for which voltage has to be returned for
+ * @supplies: Placeholder for copying the supply information.
+ *
+ * Return: negative error number on failure, 0 otherwise on success after
+ * setting @supplies.
+ *
+ * This can be used for devices with any number of power supplies. The caller
+ * must ensure the @supplies array must contain space for each regulator.
+ */
+int dev_pm_opp_get_supplies(struct dev_pm_opp *opp,
+ struct dev_pm_opp_supply *supplies)
+{
+ if (IS_ERR_OR_NULL(opp) || !supplies) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ memcpy(supplies, opp->supplies,
+ sizeof(*supplies) * opp->opp_table->regulator_count);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_supplies);
+
+/**
* dev_pm_opp_get_power() - Gets the power corresponding to an opp
* @opp: opp for which power has to be returned for
*
@@ -152,7 +193,10 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
return 0;
}
- return opp->rate;
+ if (!assert_single_clk(opp->opp_table))
+ return 0;
+
+ return opp->rates[0];
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
@@ -398,6 +442,154 @@ int dev_pm_opp_get_opp_count(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
+/* Helpers to read keys */
+static unsigned long _read_freq(struct dev_pm_opp *opp, int index)
+{
+ return opp->rates[0];
+}
+
+static unsigned long _read_level(struct dev_pm_opp *opp, int index)
+{
+ return opp->level;
+}
+
+static unsigned long _read_bw(struct dev_pm_opp *opp, int index)
+{
+ return opp->bandwidth[index].peak;
+}
+
+/* Generic comparison helpers */
+static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key)
+{
+ if (opp_key == key) {
+ *opp = temp_opp;
+ return true;
+ }
+
+ return false;
+}
+
+static bool _compare_ceil(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key)
+{
+ if (opp_key >= key) {
+ *opp = temp_opp;
+ return true;
+ }
+
+ return false;
+}
+
+static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key)
+{
+ if (opp_key > key)
+ return true;
+
+ *opp = temp_opp;
+ return false;
+}
+
+/* Generic key finding helpers */
+static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
+ unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ /* Assert that the requirement is met */
+ if (assert && !assert(opp_table))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available == available) {
+ if (compare(&opp, temp_opp, read(temp_opp, index), *key))
+ break;
+ }
+ }
+
+ /* Increment the reference count of OPP */
+ if (!IS_ERR(opp)) {
+ *key = read(opp, index);
+ dev_pm_opp_get(opp);
+ }
+
+ mutex_unlock(&opp_table->lock);
+
+ return opp;
+}
+
+static struct dev_pm_opp *
+_find_key(struct device *dev, unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *opp;
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
+ PTR_ERR(opp_table));
+ return ERR_CAST(opp_table);
+ }
+
+ opp = _opp_table_find_key(opp_table, key, index, available, read,
+ compare, assert);
+
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+
+static struct dev_pm_opp *_find_key_exact(struct device *dev,
+ unsigned long key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ /*
+ * The value of key will be updated here, but will be ignored as the
+ * caller doesn't need it.
+ */
+ return _find_key(dev, &key, index, available, read, _compare_exact,
+ assert);
+}
+
+static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
+ unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ return _opp_table_find_key(opp_table, key, index, available, read,
+ _compare_ceil, assert);
+}
+
+static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
+ int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ return _find_key(dev, key, index, available, read, _compare_ceil,
+ assert);
+}
+
+static struct dev_pm_opp *_find_key_floor(struct device *dev,
+ unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ return _find_key(dev, key, index, available, read, _compare_floor,
+ assert);
+}
+
/**
* dev_pm_opp_find_freq_exact() - search for an exact frequency
* @dev: device for which we do this operation
@@ -422,61 +614,18 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
* use.
*/
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
- unsigned long freq,
- bool available)
+ unsigned long freq, bool available)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available == available &&
- temp_opp->rate == freq) {
- opp = temp_opp;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
+ return _find_key_exact(dev, freq, 0, available, _read_freq,
+ assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
unsigned long *freq)
{
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->rate >= *freq) {
- opp = temp_opp;
- *freq = opp->rate;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
-
- return opp;
+ return _opp_table_find_key_ceil(opp_table, freq, 0, true, _read_freq,
+ assert_single_clk);
}
/**
@@ -500,23 +649,7 @@ static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *opp;
-
- if (!dev || !freq) {
- dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- opp = _find_freq_ceil(opp_table, freq);
-
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
+ return _find_key_ceil(dev, freq, 0, true, _read_freq, assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
@@ -541,98 +674,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !freq) {
- dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available) {
- /* go to the next node, before choosing prev */
- if (temp_opp->rate > *freq)
- break;
- else
- opp = temp_opp;
- }
- }
-
- /* Increment the reference count of OPP */
- if (!IS_ERR(opp))
- dev_pm_opp_get(opp);
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- if (!IS_ERR(opp))
- *freq = opp->rate;
-
- return opp;
+ return _find_key_floor(dev, freq, 0, true, _read_freq, assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
/**
- * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for
- * target voltage.
- * @dev: Device for which we do this operation.
- * @u_volt: Target voltage.
- *
- * Search for OPP with highest (ceil) frequency and has voltage <= u_volt.
- *
- * Return: matching *opp, else returns ERR_PTR in case of error which should be
- * handled using IS_ERR.
- *
- * Error return values can be:
- * EINVAL: bad parameters
- *
- * The callers are required to call dev_pm_opp_put() for the returned OPP after
- * use.
- */
-struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
- unsigned long u_volt)
-{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !u_volt) {
- dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__,
- u_volt);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available) {
- if (temp_opp->supplies[0].u_volt > u_volt)
- break;
- opp = temp_opp;
- }
- }
-
- /* Increment the reference count of OPP */
- if (!IS_ERR(opp))
- dev_pm_opp_get(opp);
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
-
-/**
* dev_pm_opp_find_level_exact() - search for an exact level
* @dev: device for which we do this operation
* @level: level to search for
@@ -650,33 +696,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
unsigned int level)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->level == level) {
- opp = temp_opp;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
+ return _find_key_exact(dev, level, 0, true, _read_level, NULL);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
@@ -698,33 +718,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
unsigned int *level)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->level >= *level) {
- opp = temp_opp;
- *level = opp->level;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
+ unsigned long temp = *level;
+ struct dev_pm_opp *opp;
+ opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL);
+ *level = temp;
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
@@ -732,7 +730,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
/**
* dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth
* @dev: device for which we do this operation
- * @freq: start bandwidth
+ * @bw: start bandwidth
* @index: which bandwidth to compare, in case of OPPs with several values
*
* Search for the matching floor *available* OPP from a starting bandwidth
@@ -748,42 +746,14 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
* The callers are required to call dev_pm_opp_put() for the returned OPP after
* use.
*/
-struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
- unsigned int *bw, int index)
+struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw,
+ int index)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !bw) {
- dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- if (index >= opp_table->path_count)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->bandwidth) {
- if (temp_opp->bandwidth[index].peak >= *bw) {
- opp = temp_opp;
- *bw = opp->bandwidth[index].peak;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
+ unsigned long temp = *bw;
+ struct dev_pm_opp *opp;
+ opp = _find_key_ceil(dev, &temp, index, true, _read_bw, NULL);
+ *bw = temp;
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
@@ -791,7 +761,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
/**
* dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth
* @dev: device for which we do this operation
- * @freq: start bandwidth
+ * @bw: start bandwidth
* @index: which bandwidth to compare, in case of OPPs with several values
*
* Search for the matching floor *available* OPP from a starting bandwidth
@@ -810,41 +780,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
unsigned int *bw, int index)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !bw) {
- dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- if (index >= opp_table->path_count)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->bandwidth) {
- /* go to the next node, before choosing prev */
- if (temp_opp->bandwidth[index].peak > *bw)
- break;
- opp = temp_opp;
- }
- }
-
- /* Increment the reference count of OPP */
- if (!IS_ERR(opp))
- dev_pm_opp_get(opp);
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- if (!IS_ERR(opp))
- *bw = opp->bandwidth[index].peak;
+ unsigned long temp = *bw;
+ struct dev_pm_opp *opp;
+ opp = _find_key_floor(dev, &temp, index, true, _read_bw, NULL);
+ *bw = temp;
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor);
@@ -874,80 +814,97 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
return ret;
}
-static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
- unsigned long freq)
+static int
+_opp_config_clk_single(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data, bool scaling_down)
{
+ unsigned long *target = data;
+ unsigned long freq;
int ret;
- /* We may reach here for devices which don't change frequency */
- if (IS_ERR(clk))
- return 0;
+ /* One of target and opp must be available */
+ if (target) {
+ freq = *target;
+ } else if (opp) {
+ freq = opp->rates[0];
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
- ret = clk_set_rate(clk, freq);
+ ret = clk_set_rate(opp_table->clk, freq);
if (ret) {
dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
ret);
+ } else {
+ opp_table->rate_clk_single = freq;
}
return ret;
}
-static int _generic_set_opp_regulator(struct opp_table *opp_table,
- struct device *dev,
- struct dev_pm_opp *opp,
- unsigned long freq,
- int scaling_down)
+/*
+ * Simple implementation for configuring multiple clocks. Configure clocks in
+ * the order in which they are present in the array while scaling up.
+ */
+int dev_pm_opp_config_clks_simple(struct device *dev,
+ struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
{
- struct regulator *reg = opp_table->regulators[0];
- struct dev_pm_opp *old_opp = opp_table->current_opp;
+ int ret, i;
+
+ if (scaling_down) {
+ for (i = opp_table->clk_count - 1; i >= 0; i--) {
+ ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ return ret;
+ }
+ }
+ } else {
+ for (i = 0; i < opp_table->clk_count; i++) {
+ ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple);
+
+static int _opp_config_regulator_single(struct device *dev,
+ struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
+ struct regulator **regulators, unsigned int count)
+{
+ struct regulator *reg = regulators[0];
int ret;
/* This function only supports single regulator per device */
- if (WARN_ON(opp_table->regulator_count > 1)) {
+ if (WARN_ON(count > 1)) {
dev_err(dev, "multiple regulators are not supported\n");
return -EINVAL;
}
- /* Scaling up? Scale voltage before frequency */
- if (!scaling_down) {
- ret = _set_opp_voltage(dev, reg, opp->supplies);
- if (ret)
- goto restore_voltage;
- }
-
- /* Change frequency */
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
+ ret = _set_opp_voltage(dev, reg, new_opp->supplies);
if (ret)
- goto restore_voltage;
-
- /* Scaling down? Scale voltage after frequency */
- if (scaling_down) {
- ret = _set_opp_voltage(dev, reg, opp->supplies);
- if (ret)
- goto restore_freq;
- }
+ return ret;
/*
* Enable the regulator after setting its voltages, otherwise it breaks
* some boot-enabled regulators.
*/
- if (unlikely(!opp_table->enabled)) {
+ if (unlikely(!new_opp->opp_table->enabled)) {
ret = regulator_enable(reg);
if (ret < 0)
dev_warn(dev, "Failed to enable regulator: %d", ret);
}
return 0;
-
-restore_freq:
- if (_generic_set_opp_clk_only(dev, opp_table->clk, old_opp->rate))
- dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_opp->rate);
-restore_voltage:
- /* This shouldn't harm even if the voltages weren't updated earlier */
- _set_opp_voltage(dev, reg, old_opp->supplies);
-
- return ret;
}
static int _set_opp_bw(const struct opp_table *opp_table,
@@ -978,36 +935,6 @@ static int _set_opp_bw(const struct opp_table *opp_table,
return 0;
}
-static int _set_opp_custom(const struct opp_table *opp_table,
- struct device *dev, struct dev_pm_opp *opp,
- unsigned long freq)
-{
- struct dev_pm_set_opp_data *data = opp_table->set_opp_data;
- struct dev_pm_opp *old_opp = opp_table->current_opp;
- int size;
-
- /*
- * We support this only if dev_pm_opp_set_regulators() was called
- * earlier.
- */
- if (opp_table->sod_supplies) {
- size = sizeof(*old_opp->supplies) * opp_table->regulator_count;
- memcpy(data->old_opp.supplies, old_opp->supplies, size);
- memcpy(data->new_opp.supplies, opp->supplies, size);
- data->regulator_count = opp_table->regulator_count;
- } else {
- data->regulator_count = 0;
- }
-
- data->regulators = opp_table->regulators;
- data->clk = opp_table->clk;
- data->dev = dev;
- data->old_opp.rate = old_opp->rate;
- data->new_opp.rate = freq;
-
- return opp_table->set_opp(data);
-}
-
static int _set_required_opp(struct device *dev, struct device *pd_dev,
struct dev_pm_opp *opp, int i)
{
@@ -1019,7 +946,7 @@ static int _set_required_opp(struct device *dev, struct device *pd_dev,
ret = dev_pm_genpd_set_performance_state(pd_dev, pstate);
if (ret) {
- dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n",
+ dev_err(dev, "Failed to set performance state of %s: %d (%d)\n",
dev_name(pd_dev), pstate, ret);
}
@@ -1138,7 +1065,7 @@ static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
}
static int _set_opp(struct device *dev, struct opp_table *opp_table,
- struct dev_pm_opp *opp, unsigned long freq)
+ struct dev_pm_opp *opp, void *clk_data, bool forced)
{
struct dev_pm_opp *old_opp;
int scaling_down, ret;
@@ -1153,18 +1080,17 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
old_opp = opp_table->current_opp;
/* Return early if nothing to do */
- if (old_opp == opp && opp_table->current_rate == freq &&
- opp_table->enabled) {
+ if (!forced && old_opp == opp && opp_table->enabled) {
dev_dbg(dev, "%s: OPPs are same, nothing to do\n", __func__);
return 0;
}
dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n",
- __func__, opp_table->current_rate, freq, old_opp->level,
+ __func__, old_opp->rates[0], opp->rates[0], old_opp->level,
opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0,
opp->bandwidth ? opp->bandwidth[0].peak : 0);
- scaling_down = _opp_compare_key(old_opp, opp);
+ scaling_down = _opp_compare_key(opp_table, old_opp, opp);
if (scaling_down == -1)
scaling_down = 0;
@@ -1181,23 +1107,38 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
dev_err(dev, "Failed to set bw: %d\n", ret);
return ret;
}
- }
- if (opp_table->set_opp) {
- ret = _set_opp_custom(opp_table, dev, opp, freq);
- } else if (opp_table->regulators) {
- ret = _generic_set_opp_regulator(opp_table, dev, opp, freq,
- scaling_down);
- } else {
- /* Only frequency scaling */
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
+ if (opp_table->config_regulators) {
+ ret = opp_table->config_regulators(dev, old_opp, opp,
+ opp_table->regulators,
+ opp_table->regulator_count);
+ if (ret) {
+ dev_err(dev, "Failed to set regulator voltages: %d\n",
+ ret);
+ return ret;
+ }
+ }
}
- if (ret)
- return ret;
+ if (opp_table->config_clks) {
+ ret = opp_table->config_clks(dev, opp_table, opp, clk_data, scaling_down);
+ if (ret)
+ return ret;
+ }
/* Scaling down? Configure required OPPs after frequency */
if (scaling_down) {
+ if (opp_table->config_regulators) {
+ ret = opp_table->config_regulators(dev, old_opp, opp,
+ opp_table->regulators,
+ opp_table->regulator_count);
+ if (ret) {
+ dev_err(dev, "Failed to set regulator voltages: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
ret = _set_opp_bw(opp_table, opp, dev);
if (ret) {
dev_err(dev, "Failed to set bw: %d\n", ret);
@@ -1217,7 +1158,6 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
/* Make sure current_opp doesn't get freed */
dev_pm_opp_get(opp);
opp_table->current_opp = opp;
- opp_table->current_rate = freq;
return ret;
}
@@ -1238,6 +1178,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
struct opp_table *opp_table;
unsigned long freq = 0, temp_freq;
struct dev_pm_opp *opp = NULL;
+ bool forced = false;
int ret;
opp_table = _find_opp_table(dev);
@@ -1255,7 +1196,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
* equivalent to a clk_set_rate()
*/
if (!_get_opp_count(opp_table)) {
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, target_freq);
+ ret = opp_table->config_clks(dev, opp_table, NULL,
+ &target_freq, false);
goto put_opp_table;
}
@@ -1276,12 +1218,22 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
__func__, freq, ret);
goto put_opp_table;
}
+
+ /*
+ * An OPP entry specifies the highest frequency at which other
+ * properties of the OPP entry apply. Even if the new OPP is
+ * same as the old one, we may still reach here for a different
+ * value of the frequency. In such a case, do not abort but
+ * configure the hardware to the desired frequency forcefully.
+ */
+ forced = opp_table->rate_clk_single != target_freq;
}
- ret = _set_opp(dev, opp_table, opp, freq);
+ ret = _set_opp(dev, opp_table, opp, &target_freq, forced);
if (target_freq)
dev_pm_opp_put(opp);
+
put_opp_table:
dev_pm_opp_put_opp_table(opp_table);
return ret;
@@ -1309,7 +1261,7 @@ int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
return PTR_ERR(opp_table);
}
- ret = _set_opp(dev, opp_table, opp, opp ? opp->rate : 0);
+ ret = _set_opp(dev, opp_table, opp, NULL, false);
dev_pm_opp_put_opp_table(opp_table);
return ret;
@@ -1366,6 +1318,8 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
INIT_LIST_HEAD(&opp_table->dev_list);
INIT_LIST_HEAD(&opp_table->lazy);
+ opp_table->clk = ERR_PTR(-ENODEV);
+
/* Mark regulator count uninitialized */
opp_table->regulator_count = -1;
@@ -1412,20 +1366,38 @@ static struct opp_table *_update_opp_table_clk(struct device *dev,
int ret;
/*
- * Return early if we don't need to get clk or we have already tried it
+ * Return early if we don't need to get clk or we have already done it
* earlier.
*/
- if (!getclk || IS_ERR(opp_table) || opp_table->clk)
+ if (!getclk || IS_ERR(opp_table) || !IS_ERR(opp_table->clk) ||
+ opp_table->clks)
return opp_table;
/* Find clk for the device */
opp_table->clk = clk_get(dev, NULL);
ret = PTR_ERR_OR_ZERO(opp_table->clk);
- if (!ret)
+ if (!ret) {
+ opp_table->config_clks = _opp_config_clk_single;
+ opp_table->clk_count = 1;
return opp_table;
+ }
if (ret == -ENOENT) {
+ /*
+ * There are few platforms which don't want the OPP core to
+ * manage device's clock settings. In such cases neither the
+ * platform provides the clks explicitly to us, nor the DT
+ * contains a valid clk entry. The OPP nodes in DT may still
+ * contain "opp-hz" property though, which we need to parse and
+ * allow the platform to find an OPP based on freq later on.
+ *
+ * This is a simple solution to take care of such corner cases,
+ * i.e. make the clk_count 1, which lets us allocate space for
+ * frequency in opp->rates and also parse the entries in DT.
+ */
+ opp_table->clk_count = 1;
+
dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
return opp_table;
}
@@ -1528,7 +1500,7 @@ static void _opp_table_kref_release(struct kref *kref)
_of_clear_opp_table(opp_table);
- /* Release clk */
+ /* Release automatically acquired single clk */
if (!IS_ERR(opp_table->clk))
clk_put(opp_table->clk);
@@ -1581,7 +1553,7 @@ static void _opp_kref_release(struct kref *kref)
* frequency/voltage list.
*/
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
- _of_opp_free_required_opps(opp_table, opp);
+ _of_clear_opp(opp_table, opp);
opp_debug_remove_one(opp);
kfree(opp);
}
@@ -1613,10 +1585,13 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
if (IS_ERR(opp_table))
return;
+ if (!assert_single_clk(opp_table))
+ goto put_table;
+
mutex_lock(&opp_table->lock);
list_for_each_entry(iter, &opp_table->opp_list, node) {
- if (iter->rate == freq) {
+ if (iter->rates[0] == freq) {
opp = iter;
break;
}
@@ -1634,6 +1609,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
__func__, freq);
}
+put_table:
/* Drop the reference taken by _find_opp_table() */
dev_pm_opp_put_opp_table(opp_table);
}
@@ -1720,26 +1696,31 @@ void dev_pm_opp_remove_all_dynamic(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
-struct dev_pm_opp *_opp_allocate(struct opp_table *table)
+struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table)
{
struct dev_pm_opp *opp;
- int supply_count, supply_size, icc_size;
+ int supply_count, supply_size, icc_size, clk_size;
/* Allocate space for at least one supply */
- supply_count = table->regulator_count > 0 ? table->regulator_count : 1;
+ supply_count = opp_table->regulator_count > 0 ?
+ opp_table->regulator_count : 1;
supply_size = sizeof(*opp->supplies) * supply_count;
- icc_size = sizeof(*opp->bandwidth) * table->path_count;
+ clk_size = sizeof(*opp->rates) * opp_table->clk_count;
+ icc_size = sizeof(*opp->bandwidth) * opp_table->path_count;
/* allocate new OPP node and supplies structures */
- opp = kzalloc(sizeof(*opp) + supply_size + icc_size, GFP_KERNEL);
-
+ opp = kzalloc(sizeof(*opp) + supply_size + clk_size + icc_size, GFP_KERNEL);
if (!opp)
return NULL;
- /* Put the supplies at the end of the OPP structure as an empty array */
+ /* Put the supplies, bw and clock at the end of the OPP structure */
opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
+
+ opp->rates = (unsigned long *)(opp->supplies + supply_count);
+
if (icc_size)
- opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->supplies + supply_count);
+ opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->rates + opp_table->clk_count);
+
INIT_LIST_HEAD(&opp->node);
return opp;
@@ -1770,15 +1751,57 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
return true;
}
-int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
+static int _opp_compare_rate(struct opp_table *opp_table,
+ struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
+{
+ int i;
+
+ for (i = 0; i < opp_table->clk_count; i++) {
+ if (opp1->rates[i] != opp2->rates[i])
+ return opp1->rates[i] < opp2->rates[i] ? -1 : 1;
+ }
+
+ /* Same rates for both OPPs */
+ return 0;
+}
+
+static int _opp_compare_bw(struct opp_table *opp_table, struct dev_pm_opp *opp1,
+ struct dev_pm_opp *opp2)
{
- if (opp1->rate != opp2->rate)
- return opp1->rate < opp2->rate ? -1 : 1;
- if (opp1->bandwidth && opp2->bandwidth &&
- opp1->bandwidth[0].peak != opp2->bandwidth[0].peak)
- return opp1->bandwidth[0].peak < opp2->bandwidth[0].peak ? -1 : 1;
+ int i;
+
+ for (i = 0; i < opp_table->path_count; i++) {
+ if (opp1->bandwidth[i].peak != opp2->bandwidth[i].peak)
+ return opp1->bandwidth[i].peak < opp2->bandwidth[i].peak ? -1 : 1;
+ }
+
+ /* Same bw for both OPPs */
+ return 0;
+}
+
+/*
+ * Returns
+ * 0: opp1 == opp2
+ * 1: opp1 > opp2
+ * -1: opp1 < opp2
+ */
+int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1,
+ struct dev_pm_opp *opp2)
+{
+ int ret;
+
+ ret = _opp_compare_rate(opp_table, opp1, opp2);
+ if (ret)
+ return ret;
+
+ ret = _opp_compare_bw(opp_table, opp1, opp2);
+ if (ret)
+ return ret;
+
if (opp1->level != opp2->level)
return opp1->level < opp2->level ? -1 : 1;
+
+ /* Duplicate OPPs */
return 0;
}
@@ -1798,7 +1821,7 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
* loop.
*/
list_for_each_entry(opp, &opp_table->opp_list, node) {
- opp_cmp = _opp_compare_key(new_opp, opp);
+ opp_cmp = _opp_compare_key(opp_table, new_opp, opp);
if (opp_cmp > 0) {
*head = &opp->node;
continue;
@@ -1809,8 +1832,8 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
/* Duplicate OPPs */
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
- __func__, opp->rate, opp->supplies[0].u_volt,
- opp->available, new_opp->rate,
+ __func__, opp->rates[0], opp->supplies[0].u_volt,
+ opp->available, new_opp->rates[0],
new_opp->supplies[0].u_volt, new_opp->available);
/* Should we compare voltages for all regulators here ? */
@@ -1831,7 +1854,7 @@ void _required_opps_available(struct dev_pm_opp *opp, int count)
opp->available = false;
pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
- __func__, opp->required_opps[i]->np, opp->rate);
+ __func__, opp->required_opps[i]->np, opp->rates[0]);
return;
}
}
@@ -1847,7 +1870,7 @@ void _required_opps_available(struct dev_pm_opp *opp, int count)
* should be considered an error by the callers of _opp_add().
*/
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
- struct opp_table *opp_table, bool rate_not_available)
+ struct opp_table *opp_table)
{
struct list_head *head;
int ret;
@@ -1872,7 +1895,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
if (!_opp_supported_by_regulators(new_opp, opp_table)) {
new_opp->available = false;
dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
- __func__, new_opp->rate);
+ __func__, new_opp->rates[0]);
}
/* required-opps not fully initialized yet */
@@ -1913,12 +1936,15 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
unsigned long tol;
int ret;
+ if (!assert_single_clk(opp_table))
+ return -EINVAL;
+
new_opp = _opp_allocate(opp_table);
if (!new_opp)
return -ENOMEM;
/* populate the opp table */
- new_opp->rate = freq;
+ new_opp->rates[0] = freq;
tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
new_opp->supplies[0].u_volt = u_volt;
new_opp->supplies[0].u_volt_min = u_volt - tol;
@@ -1926,7 +1952,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
new_opp->available = true;
new_opp->dynamic = dynamic;
- ret = _opp_add(dev, new_opp, opp_table, false);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret) {
/* Don't return error for duplicate OPPs */
if (ret == -EBUSY)
@@ -1948,7 +1974,7 @@ free_opp:
}
/**
- * dev_pm_opp_set_supported_hw() - Set supported platforms
+ * _opp_set_supported_hw() - Set supported platforms
* @dev: Device for which supported-hw has to be set.
* @versions: Array of hierarchy of versions to match.
* @count: Number of elements in the array.
@@ -1958,87 +1984,42 @@ free_opp:
* OPPs, which are available for those versions, based on its 'opp-supported-hw'
* property.
*/
-struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
- const u32 *versions, unsigned int count)
+static int _opp_set_supported_hw(struct opp_table *opp_table,
+ const u32 *versions, unsigned int count)
{
- struct opp_table *opp_table;
-
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
/* Another CPU that shares the OPP table has set the property ? */
if (opp_table->supported_hw)
- return opp_table;
+ return 0;
opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
GFP_KERNEL);
- if (!opp_table->supported_hw) {
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(-ENOMEM);
- }
+ if (!opp_table->supported_hw)
+ return -ENOMEM;
opp_table->supported_hw_count = count;
- return opp_table;
+ return 0;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
/**
- * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
- * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
+ * _opp_put_supported_hw() - Releases resources blocked for supported hw
+ * @opp_table: OPP table returned by _opp_set_supported_hw().
*
* This is required only for the V2 bindings, and is called for a matching
- * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
+ * _opp_set_supported_hw(). Until this is called, the opp_table structure
* will not be freed.
*/
-void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
-{
- if (unlikely(!opp_table))
- return;
-
- kfree(opp_table->supported_hw);
- opp_table->supported_hw = NULL;
- opp_table->supported_hw_count = 0;
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
-
-static void devm_pm_opp_supported_hw_release(void *data)
+static void _opp_put_supported_hw(struct opp_table *opp_table)
{
- dev_pm_opp_put_supported_hw(data);
-}
-
-/**
- * devm_pm_opp_set_supported_hw() - Set supported platforms
- * @dev: Device for which supported-hw has to be set.
- * @versions: Array of hierarchy of versions to match.
- * @count: Number of elements in the array.
- *
- * This is a resource-managed variant of dev_pm_opp_set_supported_hw().
- *
- * Return: 0 on success and errorno otherwise.
- */
-int devm_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
- unsigned int count)
-{
- struct opp_table *opp_table;
-
- opp_table = dev_pm_opp_set_supported_hw(dev, versions, count);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
-
- return devm_add_action_or_reset(dev, devm_pm_opp_supported_hw_release,
- opp_table);
+ if (opp_table->supported_hw) {
+ kfree(opp_table->supported_hw);
+ opp_table->supported_hw = NULL;
+ opp_table->supported_hw_count = 0;
+ }
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_set_supported_hw);
/**
- * dev_pm_opp_set_prop_name() - Set prop-extn name
+ * _opp_set_prop_name() - Set prop-extn name
* @dev: Device for which the prop-name has to be set.
* @name: name to postfix to properties.
*
@@ -2047,53 +2028,36 @@ EXPORT_SYMBOL_GPL(devm_pm_opp_set_supported_hw);
* which the extension will apply are opp-microvolt and opp-microamp. OPP core
* should postfix the property name with -<name> while looking for them.
*/
-struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+static int _opp_set_prop_name(struct opp_table *opp_table, const char *name)
{
- struct opp_table *opp_table;
-
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
/* Another CPU that shares the OPP table has set the property ? */
- if (opp_table->prop_name)
- return opp_table;
-
- opp_table->prop_name = kstrdup(name, GFP_KERNEL);
if (!opp_table->prop_name) {
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(-ENOMEM);
+ opp_table->prop_name = kstrdup(name, GFP_KERNEL);
+ if (!opp_table->prop_name)
+ return -ENOMEM;
}
- return opp_table;
+ return 0;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
/**
- * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
- * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
+ * _opp_put_prop_name() - Releases resources blocked for prop-name
+ * @opp_table: OPP table returned by _opp_set_prop_name().
*
* This is required only for the V2 bindings, and is called for a matching
- * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
+ * _opp_set_prop_name(). Until this is called, the opp_table structure
* will not be freed.
*/
-void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
+static void _opp_put_prop_name(struct opp_table *opp_table)
{
- if (unlikely(!opp_table))
- return;
-
- kfree(opp_table->prop_name);
- opp_table->prop_name = NULL;
-
- dev_pm_opp_put_opp_table(opp_table);
+ if (opp_table->prop_name) {
+ kfree(opp_table->prop_name);
+ opp_table->prop_name = NULL;
+ }
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
/**
- * dev_pm_opp_set_regulators() - Set regulator names for the device
+ * _opp_set_regulators() - Set regulator names for the device
* @dev: Device for which regulator name is being set.
* @names: Array of pointers to the names of the regulator.
* @count: Number of regulators.
@@ -2104,36 +2068,29 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
*
* This must be called before any OPPs are initialized for the device.
*/
-struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
- const char * const names[],
- unsigned int count)
+static int _opp_set_regulators(struct opp_table *opp_table, struct device *dev,
+ const char * const names[])
{
- struct dev_pm_opp_supply *supplies;
- struct opp_table *opp_table;
+ const char * const *temp = names;
struct regulator *reg;
- int ret, i;
+ int count = 0, ret, i;
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
+ /* Count number of regulators */
+ while (*temp++)
+ count++;
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- ret = -EBUSY;
- goto err;
- }
+ if (!count)
+ return -EINVAL;
/* Another CPU that shares the OPP table has set the regulators ? */
if (opp_table->regulators)
- return opp_table;
+ return 0;
opp_table->regulators = kmalloc_array(count,
sizeof(*opp_table->regulators),
GFP_KERNEL);
- if (!opp_table->regulators) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!opp_table->regulators)
+ return -ENOMEM;
for (i = 0; i < count; i++) {
reg = regulator_get_optional(dev, names[i]);
@@ -2149,21 +2106,11 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
opp_table->regulator_count = count;
- supplies = kmalloc_array(count * 2, sizeof(*supplies), GFP_KERNEL);
- if (!supplies) {
- ret = -ENOMEM;
- goto free_regulators;
- }
-
- mutex_lock(&opp_table->lock);
- opp_table->sod_supplies = supplies;
- if (opp_table->set_opp_data) {
- opp_table->set_opp_data->old_opp.supplies = supplies;
- opp_table->set_opp_data->new_opp.supplies = supplies + count;
- }
- mutex_unlock(&opp_table->lock);
+ /* Set generic config_regulators() for single regulators here */
+ if (count == 1)
+ opp_table->config_regulators = _opp_config_regulator_single;
- return opp_table;
+ return 0;
free_regulators:
while (i != 0)
@@ -2172,26 +2119,20 @@ free_regulators:
kfree(opp_table->regulators);
opp_table->regulators = NULL;
opp_table->regulator_count = -1;
-err:
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(ret);
+ return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
/**
- * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
- * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
+ * _opp_put_regulators() - Releases resources blocked for regulator
+ * @opp_table: OPP table returned from _opp_set_regulators().
*/
-void dev_pm_opp_put_regulators(struct opp_table *opp_table)
+static void _opp_put_regulators(struct opp_table *opp_table)
{
int i;
- if (unlikely(!opp_table))
- return;
-
if (!opp_table->regulators)
- goto put_opp_table;
+ return;
if (opp_table->enabled) {
for (i = opp_table->regulator_count - 1; i >= 0; i--)
@@ -2201,252 +2142,158 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_put(opp_table->regulators[i]);
- mutex_lock(&opp_table->lock);
- if (opp_table->set_opp_data) {
- opp_table->set_opp_data->old_opp.supplies = NULL;
- opp_table->set_opp_data->new_opp.supplies = NULL;
- }
-
- kfree(opp_table->sod_supplies);
- opp_table->sod_supplies = NULL;
- mutex_unlock(&opp_table->lock);
-
kfree(opp_table->regulators);
opp_table->regulators = NULL;
opp_table->regulator_count = -1;
-
-put_opp_table:
- dev_pm_opp_put_opp_table(opp_table);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
-static void devm_pm_opp_regulators_release(void *data)
+static void _put_clks(struct opp_table *opp_table, int count)
{
- dev_pm_opp_put_regulators(data);
-}
-
-/**
- * devm_pm_opp_set_regulators() - Set regulator names for the device
- * @dev: Device for which regulator name is being set.
- * @names: Array of pointers to the names of the regulator.
- * @count: Number of regulators.
- *
- * This is a resource-managed variant of dev_pm_opp_set_regulators().
- *
- * Return: 0 on success and errorno otherwise.
- */
-int devm_pm_opp_set_regulators(struct device *dev,
- const char * const names[],
- unsigned int count)
-{
- struct opp_table *opp_table;
+ int i;
- opp_table = dev_pm_opp_set_regulators(dev, names, count);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
+ for (i = count - 1; i >= 0; i--)
+ clk_put(opp_table->clks[i]);
- return devm_add_action_or_reset(dev, devm_pm_opp_regulators_release,
- opp_table);
+ kfree(opp_table->clks);
+ opp_table->clks = NULL;
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_set_regulators);
/**
- * dev_pm_opp_set_clkname() - Set clk name for the device
- * @dev: Device for which clk name is being set.
- * @name: Clk name.
- *
- * In order to support OPP switching, OPP layer needs to get pointer to the
- * clock for the device. Simple cases work fine without using this routine (i.e.
- * by passing connection-id as NULL), but for a device with multiple clocks
- * available, the OPP core needs to know the exact name of the clk to use.
+ * _opp_set_clknames() - Set clk names for the device
+ * @dev: Device for which clk names is being set.
+ * @names: Clk names.
+ *
+ * In order to support OPP switching, OPP layer needs to get pointers to the
+ * clocks for the device. Simple cases work fine without using this routine
+ * (i.e. by passing connection-id as NULL), but for a device with multiple
+ * clocks available, the OPP core needs to know the exact names of the clks to
+ * use.
*
* This must be called before any OPPs are initialized for the device.
*/
-struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
+static int _opp_set_clknames(struct opp_table *opp_table, struct device *dev,
+ const char * const names[],
+ config_clks_t config_clks)
{
- struct opp_table *opp_table;
- int ret;
+ const char * const *temp = names;
+ int count = 0, ret, i;
+ struct clk *clk;
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
+ /* Count number of clks */
+ while (*temp++)
+ count++;
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- ret = -EBUSY;
- goto err;
- }
+ /*
+ * This is a special case where we have a single clock, whose connection
+ * id name is NULL, i.e. first two entries are NULL in the array.
+ */
+ if (!count && !names[1])
+ count = 1;
- /* clk shouldn't be initialized at this point */
- if (WARN_ON(opp_table->clk)) {
- ret = -EBUSY;
- goto err;
- }
+ /* Fail early for invalid configurations */
+ if (!count || (!config_clks && count > 1))
+ return -EINVAL;
- /* Find clk for the device */
- opp_table->clk = clk_get(dev, name);
- if (IS_ERR(opp_table->clk)) {
- ret = dev_err_probe(dev, PTR_ERR(opp_table->clk),
- "%s: Couldn't find clock\n", __func__);
- goto err;
- }
+ /* Another CPU that shares the OPP table has set the clkname ? */
+ if (opp_table->clks)
+ return 0;
- return opp_table;
+ opp_table->clks = kmalloc_array(count, sizeof(*opp_table->clks),
+ GFP_KERNEL);
+ if (!opp_table->clks)
+ return -ENOMEM;
-err:
- dev_pm_opp_put_opp_table(opp_table);
+ /* Find clks for the device */
+ for (i = 0; i < count; i++) {
+ clk = clk_get(dev, names[i]);
+ if (IS_ERR(clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(clk),
+ "%s: Couldn't find clock with name: %s\n",
+ __func__, names[i]);
+ goto free_clks;
+ }
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
+ opp_table->clks[i] = clk;
+ }
-/**
- * dev_pm_opp_put_clkname() - Releases resources blocked for clk.
- * @opp_table: OPP table returned from dev_pm_opp_set_clkname().
- */
-void dev_pm_opp_put_clkname(struct opp_table *opp_table)
-{
- if (unlikely(!opp_table))
- return;
+ opp_table->clk_count = count;
+ opp_table->config_clks = config_clks;
- clk_put(opp_table->clk);
- opp_table->clk = ERR_PTR(-EINVAL);
+ /* Set generic single clk set here */
+ if (count == 1) {
+ if (!opp_table->config_clks)
+ opp_table->config_clks = _opp_config_clk_single;
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
+ /*
+ * We could have just dropped the "clk" field and used "clks"
+ * everywhere. Instead we kept the "clk" field around for
+ * following reasons:
+ *
+ * - avoiding clks[0] everywhere else.
+ * - not running single clk helpers for multiple clk usecase by
+ * mistake.
+ *
+ * Since this is single-clk case, just update the clk pointer
+ * too.
+ */
+ opp_table->clk = opp_table->clks[0];
+ }
-static void devm_pm_opp_clkname_release(void *data)
-{
- dev_pm_opp_put_clkname(data);
+ return 0;
+
+free_clks:
+ _put_clks(opp_table, i);
+ return ret;
}
/**
- * devm_pm_opp_set_clkname() - Set clk name for the device
- * @dev: Device for which clk name is being set.
- * @name: Clk name.
- *
- * This is a resource-managed variant of dev_pm_opp_set_clkname().
- *
- * Return: 0 on success and errorno otherwise.
+ * _opp_put_clknames() - Releases resources blocked for clks.
+ * @opp_table: OPP table returned from _opp_set_clknames().
*/
-int devm_pm_opp_set_clkname(struct device *dev, const char *name)
+static void _opp_put_clknames(struct opp_table *opp_table)
{
- struct opp_table *opp_table;
+ if (!opp_table->clks)
+ return;
- opp_table = dev_pm_opp_set_clkname(dev, name);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
+ opp_table->config_clks = NULL;
+ opp_table->clk = ERR_PTR(-ENODEV);
- return devm_add_action_or_reset(dev, devm_pm_opp_clkname_release,
- opp_table);
+ _put_clks(opp_table, opp_table->clk_count);
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_set_clkname);
/**
- * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
+ * _opp_set_config_regulators_helper() - Register custom set regulator helper.
* @dev: Device for which the helper is getting registered.
- * @set_opp: Custom set OPP helper.
+ * @config_regulators: Custom set regulator helper.
*
- * This is useful to support complex platforms (like platforms with multiple
- * regulators per device), instead of the generic OPP set rate helper.
+ * This is useful to support platforms with multiple regulators per device.
*
* This must be called before any OPPs are initialized for the device.
*/
-struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
+static int _opp_set_config_regulators_helper(struct opp_table *opp_table,
+ struct device *dev, config_regulators_t config_regulators)
{
- struct dev_pm_set_opp_data *data;
- struct opp_table *opp_table;
-
- if (!set_opp)
- return ERR_PTR(-EINVAL);
-
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(-EBUSY);
- }
-
/* Another CPU that shares the OPP table has set the helper ? */
- if (opp_table->set_opp)
- return opp_table;
+ if (!opp_table->config_regulators)
+ opp_table->config_regulators = config_regulators;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- mutex_lock(&opp_table->lock);
- opp_table->set_opp_data = data;
- if (opp_table->sod_supplies) {
- data->old_opp.supplies = opp_table->sod_supplies;
- data->new_opp.supplies = opp_table->sod_supplies +
- opp_table->regulator_count;
- }
- mutex_unlock(&opp_table->lock);
-
- opp_table->set_opp = set_opp;
-
- return opp_table;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
-
-/**
- * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for
- * set_opp helper
- * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
- *
- * Release resources blocked for platform specific set_opp helper.
- */
-void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
-{
- if (unlikely(!opp_table))
- return;
-
- opp_table->set_opp = NULL;
-
- mutex_lock(&opp_table->lock);
- kfree(opp_table->set_opp_data);
- opp_table->set_opp_data = NULL;
- mutex_unlock(&opp_table->lock);
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
-
-static void devm_pm_opp_unregister_set_opp_helper(void *data)
-{
- dev_pm_opp_unregister_set_opp_helper(data);
+ return 0;
}
/**
- * devm_pm_opp_register_set_opp_helper() - Register custom set OPP helper
- * @dev: Device for which the helper is getting registered.
- * @set_opp: Custom set OPP helper.
- *
- * This is a resource-managed version of dev_pm_opp_register_set_opp_helper().
+ * _opp_put_config_regulators_helper() - Releases resources blocked for
+ * config_regulators helper.
+ * @opp_table: OPP table returned from _opp_set_config_regulators_helper().
*
- * Return: 0 on success and errorno otherwise.
+ * Release resources blocked for platform specific config_regulators helper.
*/
-int devm_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
+static void _opp_put_config_regulators_helper(struct opp_table *opp_table)
{
- struct opp_table *opp_table;
-
- opp_table = dev_pm_opp_register_set_opp_helper(dev, set_opp);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
-
- return devm_add_action_or_reset(dev, devm_pm_opp_unregister_set_opp_helper,
- opp_table);
+ if (opp_table->config_regulators)
+ opp_table->config_regulators = NULL;
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_register_set_opp_helper);
-static void _opp_detach_genpd(struct opp_table *opp_table)
+static void _detach_genpd(struct opp_table *opp_table)
{
int index;
@@ -2466,7 +2313,7 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
}
/**
- * dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
+ * _opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
* @dev: Consumer device for which the genpd is getting attached.
* @names: Null terminated array of pointers containing names of genpd to attach.
* @virt_devs: Pointer to return the array of virtual devices.
@@ -2487,30 +2334,23 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
* The order of entries in the names array must match the order in which
* "required-opps" are added in DT.
*/
-struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
- const char * const *names, struct device ***virt_devs)
+static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
+ const char * const *names, struct device ***virt_devs)
{
- struct opp_table *opp_table;
struct device *virt_dev;
int index = 0, ret = -EINVAL;
const char * const *name = names;
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
if (opp_table->genpd_virt_devs)
- return opp_table;
+ return 0;
/*
* If the genpd's OPP table isn't already initialized, parsing of the
* required-opps fail for dev. We should retry this after genpd's OPP
* table is added.
*/
- if (!opp_table->required_opp_count) {
- ret = -EPROBE_DEFER;
- goto put_table;
- }
+ if (!opp_table->required_opp_count)
+ return -EPROBE_DEFER;
mutex_lock(&opp_table->genpd_virt_dev_lock);
@@ -2528,8 +2368,8 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
}
virt_dev = dev_pm_domain_attach_by_name(dev, *name);
- if (IS_ERR(virt_dev)) {
- ret = PTR_ERR(virt_dev);
+ if (IS_ERR_OR_NULL(virt_dev)) {
+ ret = PTR_ERR(virt_dev) ? : -ENODEV;
dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
goto err;
}
@@ -2543,73 +2383,230 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
*virt_devs = opp_table->genpd_virt_devs;
mutex_unlock(&opp_table->genpd_virt_dev_lock);
- return opp_table;
+ return 0;
err:
- _opp_detach_genpd(opp_table);
+ _detach_genpd(opp_table);
unlock:
mutex_unlock(&opp_table->genpd_virt_dev_lock);
+ return ret;
-put_table:
- dev_pm_opp_put_opp_table(opp_table);
-
- return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd);
/**
- * dev_pm_opp_detach_genpd() - Detach genpd(s) from the device.
- * @opp_table: OPP table returned by dev_pm_opp_attach_genpd().
+ * _opp_detach_genpd() - Detach genpd(s) from the device.
+ * @opp_table: OPP table returned by _opp_attach_genpd().
*
* This detaches the genpd(s), resets the virtual device pointers, and puts the
* OPP table.
*/
-void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
+static void _opp_detach_genpd(struct opp_table *opp_table)
{
- if (unlikely(!opp_table))
- return;
-
/*
* Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
* used in parallel.
*/
mutex_lock(&opp_table->genpd_virt_dev_lock);
- _opp_detach_genpd(opp_table);
+ _detach_genpd(opp_table);
mutex_unlock(&opp_table->genpd_virt_dev_lock);
-
- dev_pm_opp_put_opp_table(opp_table);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd);
-static void devm_pm_opp_detach_genpd(void *data)
+static void _opp_clear_config(struct opp_config_data *data)
{
- dev_pm_opp_detach_genpd(data);
+ if (data->flags & OPP_CONFIG_GENPD)
+ _opp_detach_genpd(data->opp_table);
+ if (data->flags & OPP_CONFIG_REGULATOR)
+ _opp_put_regulators(data->opp_table);
+ if (data->flags & OPP_CONFIG_SUPPORTED_HW)
+ _opp_put_supported_hw(data->opp_table);
+ if (data->flags & OPP_CONFIG_REGULATOR_HELPER)
+ _opp_put_config_regulators_helper(data->opp_table);
+ if (data->flags & OPP_CONFIG_PROP_NAME)
+ _opp_put_prop_name(data->opp_table);
+ if (data->flags & OPP_CONFIG_CLK)
+ _opp_put_clknames(data->opp_table);
+
+ dev_pm_opp_put_opp_table(data->opp_table);
+ kfree(data);
}
/**
- * devm_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual
- * device pointer
- * @dev: Consumer device for which the genpd is getting attached.
- * @names: Null terminated array of pointers containing names of genpd to attach.
- * @virt_devs: Pointer to return the array of virtual devices.
+ * dev_pm_opp_set_config() - Set OPP configuration for the device.
+ * @dev: Device for which configuration is being set.
+ * @config: OPP configuration.
*
- * This is a resource-managed version of dev_pm_opp_attach_genpd().
+ * This allows all device OPP configurations to be performed at once.
*
- * Return: 0 on success and errorno otherwise.
+ * This must be called before any OPPs are initialized for the device. This may
+ * be called multiple times for the same OPP table, for example once for each
+ * CPU that share the same table. This must be balanced by the same number of
+ * calls to dev_pm_opp_clear_config() in order to free the OPP table properly.
+ *
+ * This returns a token to the caller, which must be passed to
+ * dev_pm_opp_clear_config() to free the resources later. The value of the
+ * returned token will be >= 1 for success and negative for errors. The minimum
+ * value of 1 is chosen here to make it easy for callers to manage the resource.
*/
-int devm_pm_opp_attach_genpd(struct device *dev, const char * const *names,
- struct device ***virt_devs)
+int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
struct opp_table *opp_table;
+ struct opp_config_data *data;
+ unsigned int id;
+ int ret;
- opp_table = dev_pm_opp_attach_genpd(dev, names, virt_devs);
- if (IS_ERR(opp_table))
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ opp_table = _add_opp_table(dev, false);
+ if (IS_ERR(opp_table)) {
+ kfree(data);
return PTR_ERR(opp_table);
+ }
+
+ data->opp_table = opp_table;
+ data->flags = 0;
+
+ /* This should be called before OPPs are initialized */
+ if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* Configure clocks */
+ if (config->clk_names) {
+ ret = _opp_set_clknames(opp_table, dev, config->clk_names,
+ config->config_clks);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_CLK;
+ } else if (config->config_clks) {
+ /* Don't allow config callback without clocks */
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Configure property names */
+ if (config->prop_name) {
+ ret = _opp_set_prop_name(opp_table, config->prop_name);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_PROP_NAME;
+ }
+
+ /* Configure config_regulators helper */
+ if (config->config_regulators) {
+ ret = _opp_set_config_regulators_helper(opp_table, dev,
+ config->config_regulators);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_REGULATOR_HELPER;
+ }
- return devm_add_action_or_reset(dev, devm_pm_opp_detach_genpd,
- opp_table);
+ /* Configure supported hardware */
+ if (config->supported_hw) {
+ ret = _opp_set_supported_hw(opp_table, config->supported_hw,
+ config->supported_hw_count);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_SUPPORTED_HW;
+ }
+
+ /* Configure supplies */
+ if (config->regulator_names) {
+ ret = _opp_set_regulators(opp_table, dev,
+ config->regulator_names);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_REGULATOR;
+ }
+
+ /* Attach genpds */
+ if (config->genpd_names) {
+ ret = _opp_attach_genpd(opp_table, dev, config->genpd_names,
+ config->virt_devs);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_GENPD;
+ }
+
+ ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX),
+ GFP_KERNEL);
+ if (ret)
+ goto err;
+
+ return id;
+
+err:
+ _opp_clear_config(data);
+ return ret;
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_attach_genpd);
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_config);
+
+/**
+ * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration.
+ * @opp_table: OPP table returned from dev_pm_opp_set_config().
+ *
+ * This allows all device OPP configurations to be cleared at once. This must be
+ * called once for each call made to dev_pm_opp_set_config(), in order to free
+ * the OPPs properly.
+ *
+ * Currently the first call itself ends up freeing all the OPP configurations,
+ * while the later ones only drop the OPP table reference. This works well for
+ * now as we would never want to use an half initialized OPP table and want to
+ * remove the configurations together.
+ */
+void dev_pm_opp_clear_config(int token)
+{
+ struct opp_config_data *data;
+
+ /*
+ * This lets the callers call this unconditionally and keep their code
+ * simple.
+ */
+ if (unlikely(token <= 0))
+ return;
+
+ data = xa_erase(&opp_configs, token);
+ if (WARN_ON(!data))
+ return;
+
+ _opp_clear_config(data);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_clear_config);
+
+static void devm_pm_opp_config_release(void *token)
+{
+ dev_pm_opp_clear_config((unsigned long)token);
+}
+
+/**
+ * devm_pm_opp_set_config() - Set OPP configuration for the device.
+ * @dev: Device for which configuration is being set.
+ * @config: OPP configuration.
+ *
+ * This allows all device OPP configurations to be performed at once.
+ * This is a resource-managed variant of dev_pm_opp_set_config().
+ *
+ * Return: 0 on success and errorno otherwise.
+ */
+int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
+{
+ int token = dev_pm_opp_set_config(dev, config);
+
+ if (token < 0)
+ return token;
+
+ return devm_add_action_or_reset(dev, devm_pm_opp_config_release,
+ (void *) ((unsigned long) token));
+}
+EXPORT_SYMBOL_GPL(devm_pm_opp_set_config);
/**
* dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP.
@@ -2795,11 +2792,16 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
return r;
}
+ if (!assert_single_clk(opp_table)) {
+ r = -EINVAL;
+ goto put_table;
+ }
+
mutex_lock(&opp_table->lock);
/* Do we have the frequency? */
list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
- if (tmp_opp->rate == freq) {
+ if (tmp_opp->rates[0] == freq) {
opp = tmp_opp;
break;
}
@@ -2866,11 +2868,16 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
return r;
}
+ if (!assert_single_clk(opp_table)) {
+ r = -EINVAL;
+ goto put_table;
+ }
+
mutex_lock(&opp_table->lock);
/* Do we have the frequency? */
list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
- if (tmp_opp->rate == freq) {
+ if (tmp_opp->rates[0] == freq) {
opp = tmp_opp;
break;
}
@@ -2897,11 +2904,11 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
opp);
dev_pm_opp_put(opp);
- goto adjust_put_table;
+ goto put_table;
adjust_unlock:
mutex_unlock(&opp_table->lock);
-adjust_put_table:
+put_table:
dev_pm_opp_put_opp_table(opp_table);
return r;
}
diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c
index 5004335cf0de..3c3506021501 100644
--- a/drivers/opp/cpu.c
+++ b/drivers/opp/cpu.c
@@ -41,7 +41,7 @@
* the table if any of the mentioned functions have been invoked in the interim.
*/
int dev_pm_opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
+ struct cpufreq_frequency_table **opp_table)
{
struct dev_pm_opp *opp;
struct cpufreq_frequency_table *freq_table = NULL;
@@ -76,7 +76,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
freq_table[i].driver_data = i;
freq_table[i].frequency = CPUFREQ_TABLE_END;
- *table = &freq_table[0];
+ *opp_table = &freq_table[0];
out:
if (ret)
@@ -94,13 +94,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
* Free up the table allocated by dev_pm_opp_init_cpufreq_table
*/
void dev_pm_opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
+ struct cpufreq_frequency_table **opp_table)
{
- if (!table)
+ if (!opp_table)
return;
- kfree(*table);
- *table = NULL;
+ kfree(*opp_table);
+ *opp_table = NULL;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index 1b6e5c55c3ed..96a30a032c5f 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -74,6 +74,24 @@ static void opp_debug_create_bw(struct dev_pm_opp *opp,
}
}
+static void opp_debug_create_clks(struct dev_pm_opp *opp,
+ struct opp_table *opp_table,
+ struct dentry *pdentry)
+{
+ char name[12];
+ int i;
+
+ if (opp_table->clk_count == 1) {
+ debugfs_create_ulong("rate_hz", S_IRUGO, pdentry, &opp->rates[0]);
+ return;
+ }
+
+ for (i = 0; i < opp_table->clk_count; i++) {
+ snprintf(name, sizeof(name), "rate_hz_%d", i);
+ debugfs_create_ulong(name, S_IRUGO, pdentry, &opp->rates[i]);
+ }
+}
+
static void opp_debug_create_supplies(struct dev_pm_opp *opp,
struct opp_table *opp_table,
struct dentry *pdentry)
@@ -117,10 +135,11 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
* Get directory name for OPP.
*
* - Normally rate is unique to each OPP, use it to get unique opp-name.
- * - For some devices rate isn't available, use index instead.
+ * - For some devices rate isn't available or there are multiple, use
+ * index instead for them.
*/
- if (likely(opp->rate))
- id = opp->rate;
+ if (likely(opp_table->clk_count == 1 && opp->rates[0]))
+ id = opp->rates[0];
else
id = _get_opp_count(opp_table);
@@ -134,7 +153,6 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo);
debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend);
debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate);
- debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate);
debugfs_create_u32("level", S_IRUGO, d, &opp->level);
debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
&opp->clock_latency_ns);
@@ -142,6 +160,7 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
opp->of_name = of_node_full_name(opp->np);
debugfs_create_str("of_name", S_IRUGO, d, (char **)&opp->of_name);
+ opp_debug_create_clks(opp, opp_table, d);
opp_debug_create_supplies(opp, opp_table, d);
opp_debug_create_bw(opp, opp_table, d);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index eb89c9a75985..605d68673f92 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -242,20 +242,20 @@ void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
opp_table->np = opp_np;
_opp_table_alloc_required_tables(opp_table, dev, opp_np);
- of_node_put(opp_np);
}
void _of_clear_opp_table(struct opp_table *opp_table)
{
_opp_table_free_required_tables(opp_table);
+ of_node_put(opp_table->np);
}
/*
* Release all resources previously acquired with a call to
* _of_opp_alloc_required_opps().
*/
-void _of_opp_free_required_opps(struct opp_table *opp_table,
- struct dev_pm_opp *opp)
+static void _of_opp_free_required_opps(struct opp_table *opp_table,
+ struct dev_pm_opp *opp)
{
struct dev_pm_opp **required_opps = opp->required_opps;
int i;
@@ -275,6 +275,12 @@ void _of_opp_free_required_opps(struct opp_table *opp_table,
kfree(required_opps);
}
+void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp)
+{
+ _of_opp_free_required_opps(opp_table, opp);
+ of_node_put(opp->np);
+}
+
/* Populate all required OPPs which are part of "required-opps" list */
static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
struct dev_pm_opp *opp)
@@ -767,7 +773,51 @@ void dev_pm_opp_of_remove_table(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table,
+static int _read_rate(struct dev_pm_opp *new_opp, struct opp_table *opp_table,
+ struct device_node *np)
+{
+ struct property *prop;
+ int i, count, ret;
+ u64 *rates;
+
+ prop = of_find_property(np, "opp-hz", NULL);
+ if (!prop)
+ return -ENODEV;
+
+ count = prop->length / sizeof(u64);
+ if (opp_table->clk_count != count) {
+ pr_err("%s: Count mismatch between opp-hz and clk_count (%d %d)\n",
+ __func__, count, opp_table->clk_count);
+ return -EINVAL;
+ }
+
+ rates = kmalloc_array(count, sizeof(*rates), GFP_KERNEL);
+ if (!rates)
+ return -ENOMEM;
+
+ ret = of_property_read_u64_array(np, "opp-hz", rates, count);
+ if (ret) {
+ pr_err("%s: Error parsing opp-hz: %d\n", __func__, ret);
+ } else {
+ /*
+ * Rate is defined as an unsigned long in clk API, and so
+ * casting explicitly to its type. Must be fixed once rate is 64
+ * bit guaranteed in clk API.
+ */
+ for (i = 0; i < count; i++) {
+ new_opp->rates[i] = (unsigned long)rates[i];
+
+ /* This will happen for frequencies > 4.29 GHz */
+ WARN_ON(new_opp->rates[i] != rates[i]);
+ }
+ }
+
+ kfree(rates);
+
+ return ret;
+}
+
+static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *opp_table,
struct device_node *np, bool peak)
{
const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps";
@@ -780,9 +830,9 @@ static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table,
return -ENODEV;
count = prop->length / sizeof(u32);
- if (table->path_count != count) {
+ if (opp_table->path_count != count) {
pr_err("%s: Mismatch between %s and paths (%d %d)\n",
- __func__, name, count, table->path_count);
+ __func__, name, count, opp_table->path_count);
return -EINVAL;
}
@@ -808,34 +858,27 @@ out:
return ret;
}
-static int _read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table,
- struct device_node *np, bool *rate_not_available)
+static int _read_opp_key(struct dev_pm_opp *new_opp,
+ struct opp_table *opp_table, struct device_node *np)
{
bool found = false;
- u64 rate;
int ret;
- ret = of_property_read_u64(np, "opp-hz", &rate);
- if (!ret) {
- /*
- * Rate is defined as an unsigned long in clk API, and so
- * casting explicitly to its type. Must be fixed once rate is 64
- * bit guaranteed in clk API.
- */
- new_opp->rate = (unsigned long)rate;
+ ret = _read_rate(new_opp, opp_table, np);
+ if (!ret)
found = true;
- }
- *rate_not_available = !!ret;
+ else if (ret != -ENODEV)
+ return ret;
/*
* Bandwidth consists of peak and average (optional) values:
* opp-peak-kBps = <path1_value path2_value>;
* opp-avg-kBps = <path1_value path2_value>;
*/
- ret = _read_bw(new_opp, table, np, true);
+ ret = _read_bw(new_opp, opp_table, np, true);
if (!ret) {
found = true;
- ret = _read_bw(new_opp, table, np, false);
+ ret = _read_bw(new_opp, opp_table, np, false);
}
/* The properties were found but we failed to parse them */
@@ -881,13 +924,12 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
struct dev_pm_opp *new_opp;
u32 val;
int ret;
- bool rate_not_available = false;
new_opp = _opp_allocate(opp_table);
if (!new_opp)
return ERR_PTR(-ENOMEM);
- ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available);
+ ret = _read_opp_key(new_opp, opp_table, np);
if (ret < 0) {
dev_err(dev, "%s: opp key field not found\n", __func__);
goto free_opp;
@@ -895,14 +937,14 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
/* Check if the OPP supports hardware's hierarchy of versions or not */
if (!_opp_is_supported(dev, opp_table, np)) {
- dev_dbg(dev, "OPP not supported by hardware: %lu\n",
- new_opp->rate);
+ dev_dbg(dev, "OPP not supported by hardware: %s\n",
+ of_node_full_name(np));
goto free_opp;
}
new_opp->turbo = of_property_read_bool(np, "turbo-mode");
- new_opp->np = np;
+ new_opp->np = of_node_get(np);
new_opp->dynamic = false;
new_opp->available = true;
@@ -920,7 +962,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
if (opp_table->is_genpd)
new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp);
- ret = _opp_add(dev, new_opp, opp_table, rate_not_available);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret) {
/* Don't return error for duplicate OPPs */
if (ret == -EBUSY)
@@ -931,8 +973,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
/* OPP to select on device suspend */
if (of_property_read_bool(np, "opp-suspend")) {
if (opp_table->suspend_opp) {
- /* Pick the OPP with higher rate as suspend OPP */
- if (new_opp->rate > opp_table->suspend_opp->rate) {
+ /* Pick the OPP with higher rate/bw/level as suspend OPP */
+ if (_opp_compare_key(opp_table, new_opp, opp_table->suspend_opp) == 1) {
opp_table->suspend_opp->suspend = false;
new_opp->suspend = true;
opp_table->suspend_opp = new_opp;
@@ -947,7 +989,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu level:%u\n",
- __func__, new_opp->turbo, new_opp->rate,
+ __func__, new_opp->turbo, new_opp->rates[0],
new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns,
new_opp->level);
@@ -1084,7 +1126,7 @@ remove_static_opp:
return ret;
}
-static int _of_add_table_indexed(struct device *dev, int index, bool getclk)
+static int _of_add_table_indexed(struct device *dev, int index)
{
struct opp_table *opp_table;
int ret, count;
@@ -1100,7 +1142,7 @@ static int _of_add_table_indexed(struct device *dev, int index, bool getclk)
index = 0;
}
- opp_table = _add_opp_table_indexed(dev, index, getclk);
+ opp_table = _add_opp_table_indexed(dev, index, true);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
@@ -1124,11 +1166,11 @@ static void devm_pm_opp_of_table_release(void *data)
dev_pm_opp_of_remove_table(data);
}
-static int _devm_of_add_table_indexed(struct device *dev, int index, bool getclk)
+static int _devm_of_add_table_indexed(struct device *dev, int index)
{
int ret;
- ret = _of_add_table_indexed(dev, index, getclk);
+ ret = _of_add_table_indexed(dev, index);
if (ret)
return ret;
@@ -1156,7 +1198,7 @@ static int _devm_of_add_table_indexed(struct device *dev, int index, bool getclk
*/
int devm_pm_opp_of_add_table(struct device *dev)
{
- return _devm_of_add_table_indexed(dev, 0, true);
+ return _devm_of_add_table_indexed(dev, 0);
}
EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table);
@@ -1179,7 +1221,7 @@ EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table);
*/
int dev_pm_opp_of_add_table(struct device *dev)
{
- return _of_add_table_indexed(dev, 0, true);
+ return _of_add_table_indexed(dev, 0);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
@@ -1195,7 +1237,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
*/
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
- return _of_add_table_indexed(dev, index, true);
+ return _of_add_table_indexed(dev, index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
@@ -1208,42 +1250,10 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
*/
int devm_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
- return _devm_of_add_table_indexed(dev, index, true);
+ return _devm_of_add_table_indexed(dev, index);
}
EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table_indexed);
-/**
- * dev_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device
- * tree without getting clk for device.
- * @dev: device pointer used to lookup OPP table.
- * @index: Index number.
- *
- * Register the initial OPP table with the OPP library for given device only
- * using the "operating-points-v2" property. Do not try to get the clk for the
- * device.
- *
- * Return: Refer to dev_pm_opp_of_add_table() for return values.
- */
-int dev_pm_opp_of_add_table_noclk(struct device *dev, int index)
-{
- return _of_add_table_indexed(dev, index, false);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_noclk);
-
-/**
- * devm_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device
- * tree without getting clk for device.
- * @dev: device pointer used to lookup OPP table.
- * @index: Index number.
- *
- * This is a resource-managed variant of dev_pm_opp_of_add_table_noclk().
- */
-int devm_pm_opp_of_add_table_noclk(struct device *dev, int index)
-{
- return _devm_of_add_table_indexed(dev, index, false);
-}
-EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table_noclk);
-
/* CPU device specific helpers */
/**
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 45e3a55239a1..3a6e077df386 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -28,6 +28,27 @@ extern struct mutex opp_table_lock;
extern struct list_head opp_tables, lazy_opp_tables;
+/* OPP Config flags */
+#define OPP_CONFIG_CLK BIT(0)
+#define OPP_CONFIG_REGULATOR BIT(1)
+#define OPP_CONFIG_REGULATOR_HELPER BIT(2)
+#define OPP_CONFIG_PROP_NAME BIT(3)
+#define OPP_CONFIG_SUPPORTED_HW BIT(4)
+#define OPP_CONFIG_GENPD BIT(5)
+
+/**
+ * struct opp_config_data - data for set config operations
+ * @opp_table: OPP table
+ * @flags: OPP config flags
+ *
+ * This structure stores the OPP config information for each OPP table
+ * configuration by the callers.
+ */
+struct opp_config_data {
+ struct opp_table *opp_table;
+ unsigned int flags;
+};
+
/*
* Internal data structure organization with the OPP layer library is as
* follows:
@@ -58,7 +79,7 @@ extern struct list_head opp_tables, lazy_opp_tables;
* @suspend: true if suspend OPP
* @removed: flag indicating that OPP's reference is dropped by OPP core.
* @pstate: Device's power domain's performance state.
- * @rate: Frequency in hertz
+ * @rates: Frequencies in hertz
* @level: Performance level
* @supplies: Power supplies voltage/current values
* @bandwidth: Interconnect bandwidth values
@@ -81,7 +102,7 @@ struct dev_pm_opp {
bool suspend;
bool removed;
unsigned int pstate;
- unsigned long rate;
+ unsigned long *rates;
unsigned int level;
struct dev_pm_opp_supply *supplies;
@@ -138,7 +159,7 @@ enum opp_table_access {
* @clock_latency_ns_max: Max clock latency in nanoseconds.
* @parsed_static_opps: Count of devices for which OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
- * @current_rate: Currently configured frequency.
+ * @rate_clk_single: Currently configured frequency for single clk.
* @current_opp: Currently configured OPP for the table.
* @suspend_opp: Pointer to OPP to be used during device suspend.
* @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers.
@@ -149,7 +170,11 @@ enum opp_table_access {
* @supported_hw: Array of version number to support.
* @supported_hw_count: Number of elements in supported_hw array.
* @prop_name: A name to postfix to many DT properties, while parsing them.
- * @clk: Device's clock handle
+ * @config_clks: Platform specific config_clks() callback.
+ * @clks: Device's clock handles, for multiple clocks.
+ * @clk: Device's clock handle, for single clock.
+ * @clk_count: Number of clocks.
+ * @config_regulators: Platform specific config_regulators() callback.
* @regulators: Supply regulators
* @regulator_count: Number of power supply regulators. Its value can be -1
* (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt
@@ -159,9 +184,6 @@ enum opp_table_access {
* @enabled: Set to true if the device's resources are enabled/configured.
* @genpd_performance_state: Device's power domain support performance state.
* @is_genpd: Marks if the OPP table belongs to a genpd.
- * @set_opp: Platform specific set_opp callback
- * @sod_supplies: Set opp data supplies
- * @set_opp_data: Data to be passed to set_opp callback
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
*
@@ -188,7 +210,7 @@ struct opp_table {
unsigned int parsed_static_opps;
enum opp_table_access shared_opp;
- unsigned long current_rate;
+ unsigned long rate_clk_single;
struct dev_pm_opp *current_opp;
struct dev_pm_opp *suspend_opp;
@@ -200,7 +222,11 @@ struct opp_table {
unsigned int *supported_hw;
unsigned int supported_hw_count;
const char *prop_name;
+ config_clks_t config_clks;
+ struct clk **clks;
struct clk *clk;
+ int clk_count;
+ config_regulators_t config_regulators;
struct regulator **regulators;
int regulator_count;
struct icc_path **paths;
@@ -209,10 +235,6 @@ struct opp_table {
bool genpd_performance_state;
bool is_genpd;
- int (*set_opp)(struct dev_pm_set_opp_data *data);
- struct dev_pm_opp_supply *sod_supplies;
- struct dev_pm_set_opp_data *set_opp_data;
-
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
char dentry_name[NAME_MAX];
@@ -228,8 +250,8 @@ struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
void _opp_free(struct dev_pm_opp *opp);
-int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
-int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available);
+int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
+int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk);
@@ -245,14 +267,12 @@ static inline bool lazy_linking_pending(struct opp_table *opp_table)
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index);
void _of_clear_opp_table(struct opp_table *opp_table);
struct opp_table *_managed_opp(struct device *dev, int index);
-void _of_opp_free_required_opps(struct opp_table *opp_table,
- struct dev_pm_opp *opp);
+void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp);
#else
static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) {}
static inline void _of_clear_opp_table(struct opp_table *opp_table) {}
static inline struct opp_table *_managed_opp(struct device *dev, int index) { return NULL; }
-static inline void _of_opp_free_required_opps(struct opp_table *opp_table,
- struct dev_pm_opp *opp) {}
+static inline void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp) {}
#endif
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index bd4771f388ab..8f3f13fbbb25 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -36,11 +36,15 @@ struct ti_opp_supply_optimum_voltage_table {
* @vdd_table: Optimized voltage mapping table
* @num_vdd_table: number of entries in vdd_table
* @vdd_absolute_max_voltage_uv: absolute maximum voltage in UV for the supply
+ * @old_supplies: Placeholder for supplies information for old OPP.
+ * @new_supplies: Placeholder for supplies information for new OPP.
*/
struct ti_opp_supply_data {
struct ti_opp_supply_optimum_voltage_table *vdd_table;
u32 num_vdd_table;
u32 vdd_absolute_max_voltage_uv;
+ struct dev_pm_opp_supply old_supplies[2];
+ struct dev_pm_opp_supply new_supplies[2];
};
static struct ti_opp_supply_data opp_data;
@@ -266,27 +270,32 @@ static int _opp_set_voltage(struct device *dev,
return 0;
}
-/**
- * ti_opp_supply_set_opp() - do the opp supply transition
- * @data: information on regulators and new and old opps provided by
- * opp core to use in transition
- *
- * Return: If successful, 0, else appropriate error value.
- */
-static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
+/* Do the opp supply transition */
+static int ti_opp_config_regulators(struct device *dev,
+ struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
+ struct regulator **regulators, unsigned int count)
{
- struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
- struct dev_pm_opp_supply *old_supply_vbb = &data->old_opp.supplies[1];
- struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
- struct dev_pm_opp_supply *new_supply_vbb = &data->new_opp.supplies[1];
- struct device *dev = data->dev;
- unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
- struct clk *clk = data->clk;
- struct regulator *vdd_reg = data->regulators[0];
- struct regulator *vbb_reg = data->regulators[1];
+ struct dev_pm_opp_supply *old_supply_vdd = &opp_data.old_supplies[0];
+ struct dev_pm_opp_supply *old_supply_vbb = &opp_data.old_supplies[1];
+ struct dev_pm_opp_supply *new_supply_vdd = &opp_data.new_supplies[0];
+ struct dev_pm_opp_supply *new_supply_vbb = &opp_data.new_supplies[1];
+ struct regulator *vdd_reg = regulators[0];
+ struct regulator *vbb_reg = regulators[1];
+ unsigned long old_freq, freq;
int vdd_uv;
int ret;
+ /* We must have two regulators here */
+ WARN_ON(count != 2);
+
+ /* Fetch supplies and freq information from OPP core */
+ ret = dev_pm_opp_get_supplies(new_opp, opp_data.new_supplies);
+ WARN_ON(ret);
+
+ old_freq = dev_pm_opp_get_freq(old_opp);
+ freq = dev_pm_opp_get_freq(new_opp);
+ WARN_ON(!old_freq || !freq);
+
vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
new_supply_vdd->u_volt);
@@ -303,39 +312,24 @@ static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
if (ret)
goto restore_voltage;
- }
-
- /* Change frequency */
- dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
- __func__, old_freq, freq);
-
- ret = clk_set_rate(clk, freq);
- if (ret) {
- dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
- ret);
- goto restore_voltage;
- }
-
- /* Scaling down? Scale voltage after frequency */
- if (freq < old_freq) {
+ } else {
ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
if (ret)
- goto restore_freq;
+ goto restore_voltage;
ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
"vdd");
if (ret)
- goto restore_freq;
+ goto restore_voltage;
}
return 0;
-restore_freq:
- ret = clk_set_rate(clk, old_freq);
- if (ret)
- dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_freq);
restore_voltage:
+ /* Fetch old supplies information only if required */
+ ret = dev_pm_opp_get_supplies(old_opp, opp_data.old_supplies);
+ WARN_ON(ret);
+
/* This shouldn't harm even if the voltages weren't updated earlier */
if (old_supply_vdd->u_volt) {
ret = _opp_set_voltage(dev, old_supply_vbb, 0, vbb_reg, "vbb");
@@ -405,9 +399,8 @@ static int ti_opp_supply_probe(struct platform_device *pdev)
return ret;
}
- ret = PTR_ERR_OR_ZERO(dev_pm_opp_register_set_opp_helper(cpu_dev,
- ti_opp_supply_set_opp));
- if (ret)
+ ret = dev_pm_opp_set_config_regulators(cpu_dev, ti_opp_config_regulators);
+ if (ret < 0)
_free_optimized_voltages(dev, &opp_data);
return ret;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 9be007c9420f..a66386043aa6 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -268,7 +268,7 @@ static int ioc_count;
* Each bit can represent a number of pages.
* LSbs represent lower addresses (IOVA's).
*
-* This was was copied from sba_iommu.c. Don't try to unify
+* This was copied from sba_iommu.c. Don't try to unify
* the two resource managers unless a way to have different
* allocation policies is also adjusted. We'd like to avoid
* I/O TLB thrashing by having resource allocation policy
@@ -1380,15 +1380,17 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
}
}
-static void __init ccio_init_resources(struct ioc *ioc)
+static int __init ccio_init_resources(struct ioc *ioc)
{
struct resource *res = ioc->mmio_region;
char *name = kmalloc(14, GFP_KERNEL);
-
+ if (unlikely(!name))
+ return -ENOMEM;
snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
+ return 0;
}
static int new_ioc_area(struct resource *res, unsigned long size,
@@ -1543,7 +1545,11 @@ static int __init ccio_probe(struct parisc_device *dev)
return -ENOMEM;
}
ccio_ioc_init(ioc);
- ccio_init_resources(ioc);
+ if (ccio_init_resources(ioc)) {
+ iounmap(ioc->ioc_regs);
+ kfree(ioc);
+ return -ENOMEM;
+ }
hppa_dma_ops = &ccio_ops;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 3a8c98615634..bdef7a8d6ab8 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -221,16 +221,7 @@ static size_t irt_num_entry;
static struct irt_entry *iosapic_alloc_irt(int num_entries)
{
- unsigned long a;
-
- /* The IRT needs to be 8-byte aligned for the PDC call.
- * Normally kmalloc would guarantee larger alignment, but
- * if CONFIG_DEBUG_SLAB is enabled, then we can get only
- * 4-byte alignment on 32-bit kernels
- */
- a = (unsigned long)kmalloc(sizeof(struct irt_entry) * num_entries + 8, GFP_KERNEL);
- a = (a + 7UL) & ~7UL;
- return (struct irt_entry *)a;
+ return kcalloc(num_entries, sizeof(struct irt_entry), GFP_KERNEL);
}
/**
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 732b516c7bf8..afc6e66ddc31 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1476,9 +1476,13 @@ lba_driver_probe(struct parisc_device *dev)
u32 func_class;
void *tmp_obj;
char *version;
- void __iomem *addr = ioremap(dev->hpa.start, 4096);
+ void __iomem *addr;
int max;
+ addr = ioremap(dev->hpa.start, 4096);
+ if (addr == NULL)
+ return -ENOMEM;
+
/* Read HW Rev First */
func_class = READ_REG32(addr + LBA_FCLASS);
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 1e4a5663d011..d4be9d2ee74d 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -646,7 +646,7 @@ int lcd_print( const char *str )
cancel_delayed_work_sync(&led_task);
/* copy display string to buffer for procfs */
- strlcpy(lcd_text, str, sizeof(lcd_text));
+ strscpy(lcd_text, str, sizeof(lcd_text));
/* Set LCD Cursor to 1st character */
gsc_writeb(lcd_info.reset_cmd1, LCD_CMD_REG);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 133c73207782..55c028af4bd9 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -121,6 +121,9 @@ config XEN_PCIDEV_FRONTEND
config PCI_ATS
bool
+config PCI_DOE
+ bool
+
config PCI_ECAM
bool
@@ -164,6 +167,11 @@ config PCI_PASID
config PCI_P2PDMA
bool "PCI peer-to-peer transfer support"
depends on ZONE_DEVICE
+ #
+ # The need for the scatterlist DMA bus address flag means PCI P2PDMA
+ # requires 64bit
+ #
+ depends on 64BIT
select GENERIC_ALLOCATOR
help
Enableѕ drivers to do PCI peer-to-peer transactions to and from
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 0da6b1ebc694..2680e4c92f0a 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_PCI_ECAM) += ecam.o
obj-$(CONFIG_PCI_P2PDMA) += p2pdma.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
obj-$(CONFIG_VGA_ARB) += vgaarb.o
+obj-$(CONFIG_PCI_DOE) += doe.o
# Endpoint library must be initialized before its users
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index cf1627679716..83ddb190292e 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -161,7 +161,11 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ if (!ep->bar_to_atu[bar])
+ free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ else
+ free_win = ep->bar_to_atu[bar];
+
if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n");
return -EINVAL;
@@ -218,6 +222,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index);
clear_bit(atu_index, ep->ib_window_map);
ep->epf_bar[bar] = NULL;
+ ep->bar_to_atu[bar] = 0;
}
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
@@ -245,6 +250,9 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
if (ret)
return ret;
+ if (ep->epf_bar[bar])
+ return 0;
+
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
new file mode 100644
index 000000000000..e402f05068a5
--- /dev/null
+++ b/drivers/pci/doe.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Data Object Exchange
+ * PCIe r6.0, sec 6.30 DOE
+ *
+ * Copyright (C) 2021 Huawei
+ * Jonathan Cameron <Jonathan.Cameron@huawei.com>
+ *
+ * Copyright (C) 2022 Intel Corporation
+ * Ira Weiny <ira.weiny@intel.com>
+ */
+
+#define dev_fmt(fmt) "DOE: " fmt
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci-doe.h>
+#include <linux/workqueue.h>
+
+#define PCI_DOE_PROTOCOL_DISCOVERY 0
+
+/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
+#define PCI_DOE_TIMEOUT HZ
+#define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128)
+
+#define PCI_DOE_FLAG_CANCEL 0
+#define PCI_DOE_FLAG_DEAD 1
+
+/**
+ * struct pci_doe_mb - State for a single DOE mailbox
+ *
+ * This state is used to manage a single DOE mailbox capability. All fields
+ * should be considered opaque to the consumers and the structure passed into
+ * the helpers below after being created by devm_pci_doe_create()
+ *
+ * @pdev: PCI device this mailbox belongs to
+ * @cap_offset: Capability offset
+ * @prots: Array of protocols supported (encoded as long values)
+ * @wq: Wait queue for work item
+ * @work_queue: Queue of pci_doe_work items
+ * @flags: Bit array of PCI_DOE_FLAG_* flags
+ */
+struct pci_doe_mb {
+ struct pci_dev *pdev;
+ u16 cap_offset;
+ struct xarray prots;
+
+ wait_queue_head_t wq;
+ struct workqueue_struct *work_queue;
+ unsigned long flags;
+};
+
+static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout)
+{
+ if (wait_event_timeout(doe_mb->wq,
+ test_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags),
+ timeout))
+ return -EIO;
+ return 0;
+}
+
+static void pci_doe_write_ctrl(struct pci_doe_mb *doe_mb, u32 val)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+
+ pci_write_config_dword(pdev, offset + PCI_DOE_CTRL, val);
+}
+
+static int pci_doe_abort(struct pci_doe_mb *doe_mb)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ unsigned long timeout_jiffies;
+
+ pci_dbg(pdev, "[%x] Issuing Abort\n", offset);
+
+ timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
+ pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_ABORT);
+
+ do {
+ int rc;
+ u32 val;
+
+ rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
+ if (rc)
+ return rc;
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+
+ /* Abort success! */
+ if (!FIELD_GET(PCI_DOE_STATUS_ERROR, val) &&
+ !FIELD_GET(PCI_DOE_STATUS_BUSY, val))
+ return 0;
+
+ } while (!time_after(jiffies, timeout_jiffies));
+
+ /* Abort has timed out and the MB is dead */
+ pci_err(pdev, "[%x] ABORT timed out\n", offset);
+ return -EIO;
+}
+
+static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
+ struct pci_doe_task *task)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ u32 val;
+ int i;
+
+ /*
+ * Check the DOE busy bit is not set. If it is set, this could indicate
+ * someone other than Linux (e.g. firmware) is using the mailbox. Note
+ * it is expected that firmware and OS will negotiate access rights via
+ * an, as yet to be defined, method.
+ */
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_BUSY, val))
+ return -EBUSY;
+
+ if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
+ return -EIO;
+
+ /* Write DOE Header */
+ val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) |
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type);
+ pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val);
+ /* Length is 2 DW of header + length of payload in DW */
+ pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
+ 2 + task->request_pl_sz /
+ sizeof(u32)));
+ for (i = 0; i < task->request_pl_sz / sizeof(u32); i++)
+ pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
+ task->request_pl[i]);
+
+ pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO);
+
+ return 0;
+}
+
+static bool pci_doe_data_obj_ready(struct pci_doe_mb *doe_mb)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ u32 val;
+
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val))
+ return true;
+ return false;
+}
+
+static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ size_t length, payload_length;
+ u32 val;
+ int i;
+
+ /* Read the first dword to get the protocol */
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+ if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) ||
+ (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) {
+ dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n",
+ doe_mb->cap_offset, task->prot.vid, task->prot.type,
+ FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val),
+ FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val));
+ return -EIO;
+ }
+
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+ /* Read the second dword to get the length */
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+
+ length = FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, val);
+ if (length > SZ_1M || length < 2)
+ return -EIO;
+
+ /* First 2 dwords have already been read */
+ length -= 2;
+ payload_length = min(length, task->response_pl_sz / sizeof(u32));
+ /* Read the rest of the response payload */
+ for (i = 0; i < payload_length; i++) {
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ,
+ &task->response_pl[i]);
+ /* Prior to the last ack, ensure Data Object Ready */
+ if (i == (payload_length - 1) && !pci_doe_data_obj_ready(doe_mb))
+ return -EIO;
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+ }
+
+ /* Flush excess length */
+ for (; i < length; i++) {
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+ }
+
+ /* Final error check to pick up on any since Data Object Ready */
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
+ return -EIO;
+
+ return min(length, task->response_pl_sz / sizeof(u32)) * sizeof(u32);
+}
+
+static void signal_task_complete(struct pci_doe_task *task, int rv)
+{
+ task->rv = rv;
+ task->complete(task);
+}
+
+static void signal_task_abort(struct pci_doe_task *task, int rv)
+{
+ struct pci_doe_mb *doe_mb = task->doe_mb;
+ struct pci_dev *pdev = doe_mb->pdev;
+
+ if (pci_doe_abort(doe_mb)) {
+ /*
+ * If the device can't process an abort; set the mailbox dead
+ * - no more submissions
+ */
+ pci_err(pdev, "[%x] Abort failed marking mailbox dead\n",
+ doe_mb->cap_offset);
+ set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
+ }
+ signal_task_complete(task, rv);
+}
+
+static void doe_statemachine_work(struct work_struct *work)
+{
+ struct pci_doe_task *task = container_of(work, struct pci_doe_task,
+ work);
+ struct pci_doe_mb *doe_mb = task->doe_mb;
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ unsigned long timeout_jiffies;
+ u32 val;
+ int rc;
+
+ if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) {
+ signal_task_complete(task, -EIO);
+ return;
+ }
+
+ /* Send request */
+ rc = pci_doe_send_req(doe_mb, task);
+ if (rc) {
+ /*
+ * The specification does not provide any guidance on how to
+ * resolve conflicting requests from other entities.
+ * Furthermore, it is likely that busy will not be detected
+ * most of the time. Flag any detection of status busy with an
+ * error.
+ */
+ if (rc == -EBUSY)
+ dev_err_ratelimited(&pdev->dev, "[%x] busy detected; another entity is sending conflicting requests\n",
+ offset);
+ signal_task_abort(task, rc);
+ return;
+ }
+
+ timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
+ /* Poll for response */
+retry_resp:
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) {
+ signal_task_abort(task, -EIO);
+ return;
+ }
+
+ if (!FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) {
+ if (time_after(jiffies, timeout_jiffies)) {
+ signal_task_abort(task, -EIO);
+ return;
+ }
+ rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
+ if (rc) {
+ signal_task_abort(task, rc);
+ return;
+ }
+ goto retry_resp;
+ }
+
+ rc = pci_doe_recv_resp(doe_mb, task);
+ if (rc < 0) {
+ signal_task_abort(task, rc);
+ return;
+ }
+
+ signal_task_complete(task, rc);
+}
+
+static void pci_doe_task_complete(struct pci_doe_task *task)
+{
+ complete(task->private);
+}
+
+static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
+ u8 *protocol)
+{
+ u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
+ *index);
+ u32 response_pl;
+ DECLARE_COMPLETION_ONSTACK(c);
+ struct pci_doe_task task = {
+ .prot.vid = PCI_VENDOR_ID_PCI_SIG,
+ .prot.type = PCI_DOE_PROTOCOL_DISCOVERY,
+ .request_pl = &request_pl,
+ .request_pl_sz = sizeof(request_pl),
+ .response_pl = &response_pl,
+ .response_pl_sz = sizeof(response_pl),
+ .complete = pci_doe_task_complete,
+ .private = &c,
+ };
+ int rc;
+
+ rc = pci_doe_submit_task(doe_mb, &task);
+ if (rc < 0)
+ return rc;
+
+ wait_for_completion(&c);
+
+ if (task.rv != sizeof(response_pl))
+ return -EIO;
+
+ *vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
+ *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
+ response_pl);
+ *index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX,
+ response_pl);
+
+ return 0;
+}
+
+static void *pci_doe_xa_prot_entry(u16 vid, u8 prot)
+{
+ return xa_mk_value((vid << 8) | prot);
+}
+
+static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
+{
+ u8 index = 0;
+ u8 xa_idx = 0;
+
+ do {
+ int rc;
+ u16 vid;
+ u8 prot;
+
+ rc = pci_doe_discovery(doe_mb, &index, &vid, &prot);
+ if (rc)
+ return rc;
+
+ pci_dbg(doe_mb->pdev,
+ "[%x] Found protocol %d vid: %x prot: %x\n",
+ doe_mb->cap_offset, xa_idx, vid, prot);
+
+ rc = xa_insert(&doe_mb->prots, xa_idx++,
+ pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL);
+ if (rc)
+ return rc;
+ } while (index);
+
+ return 0;
+}
+
+static void pci_doe_xa_destroy(void *mb)
+{
+ struct pci_doe_mb *doe_mb = mb;
+
+ xa_destroy(&doe_mb->prots);
+}
+
+static void pci_doe_destroy_workqueue(void *mb)
+{
+ struct pci_doe_mb *doe_mb = mb;
+
+ destroy_workqueue(doe_mb->work_queue);
+}
+
+static void pci_doe_flush_mb(void *mb)
+{
+ struct pci_doe_mb *doe_mb = mb;
+
+ /* Stop all pending work items from starting */
+ set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
+
+ /* Cancel an in progress work item, if necessary */
+ set_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags);
+ wake_up(&doe_mb->wq);
+
+ /* Flush all work items */
+ flush_workqueue(doe_mb->work_queue);
+}
+
+/**
+ * pcim_doe_create_mb() - Create a DOE mailbox object
+ *
+ * @pdev: PCI device to create the DOE mailbox for
+ * @cap_offset: Offset of the DOE mailbox
+ *
+ * Create a single mailbox object to manage the mailbox protocol at the
+ * cap_offset specified.
+ *
+ * RETURNS: created mailbox object on success
+ * ERR_PTR(-errno) on failure
+ */
+struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset)
+{
+ struct pci_doe_mb *doe_mb;
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ doe_mb = devm_kzalloc(dev, sizeof(*doe_mb), GFP_KERNEL);
+ if (!doe_mb)
+ return ERR_PTR(-ENOMEM);
+
+ doe_mb->pdev = pdev;
+ doe_mb->cap_offset = cap_offset;
+ init_waitqueue_head(&doe_mb->wq);
+
+ xa_init(&doe_mb->prots);
+ rc = devm_add_action(dev, pci_doe_xa_destroy, doe_mb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0,
+ dev_driver_string(&pdev->dev),
+ pci_name(pdev),
+ doe_mb->cap_offset);
+ if (!doe_mb->work_queue) {
+ pci_err(pdev, "[%x] failed to allocate work queue\n",
+ doe_mb->cap_offset);
+ return ERR_PTR(-ENOMEM);
+ }
+ rc = devm_add_action_or_reset(dev, pci_doe_destroy_workqueue, doe_mb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /* Reset the mailbox by issuing an abort */
+ rc = pci_doe_abort(doe_mb);
+ if (rc) {
+ pci_err(pdev, "[%x] failed to reset mailbox with abort command : %d\n",
+ doe_mb->cap_offset, rc);
+ return ERR_PTR(rc);
+ }
+
+ /*
+ * The state machine and the mailbox should be in sync now;
+ * Set up mailbox flush prior to using the mailbox to query protocols.
+ */
+ rc = devm_add_action_or_reset(dev, pci_doe_flush_mb, doe_mb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ rc = pci_doe_cache_protocols(doe_mb);
+ if (rc) {
+ pci_err(pdev, "[%x] failed to cache protocols : %d\n",
+ doe_mb->cap_offset, rc);
+ return ERR_PTR(rc);
+ }
+
+ return doe_mb;
+}
+EXPORT_SYMBOL_GPL(pcim_doe_create_mb);
+
+/**
+ * pci_doe_supports_prot() - Return if the DOE instance supports the given
+ * protocol
+ * @doe_mb: DOE mailbox capability to query
+ * @vid: Protocol Vendor ID
+ * @type: Protocol type
+ *
+ * RETURNS: True if the DOE mailbox supports the protocol specified
+ */
+bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
+{
+ unsigned long index;
+ void *entry;
+
+ /* The discovery protocol must always be supported */
+ if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY)
+ return true;
+
+ xa_for_each(&doe_mb->prots, index, entry)
+ if (entry == pci_doe_xa_prot_entry(vid, type))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(pci_doe_supports_prot);
+
+/**
+ * pci_doe_submit_task() - Submit a task to be processed by the state machine
+ *
+ * @doe_mb: DOE mailbox capability to submit to
+ * @task: task to be queued
+ *
+ * Submit a DOE task (request/response) to the DOE mailbox to be processed.
+ * Returns upon queueing the task object. If the queue is full this function
+ * will sleep until there is room in the queue.
+ *
+ * task->complete will be called when the state machine is done processing this
+ * task.
+ *
+ * Excess data will be discarded.
+ *
+ * RETURNS: 0 when task has been successfully queued, -ERRNO on error
+ */
+int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
+{
+ if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type))
+ return -EINVAL;
+
+ /*
+ * DOE requests must be a whole number of DW and the response needs to
+ * be big enough for at least 1 DW
+ */
+ if (task->request_pl_sz % sizeof(u32) ||
+ task->response_pl_sz < sizeof(u32))
+ return -EINVAL;
+
+ if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
+ return -EIO;
+
+ task->doe_mb = doe_mb;
+ INIT_WORK(&task->work, doe_statemachine_work);
+ queue_work(doe_mb->work_queue, &task->work);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_doe_submit_task);
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 5f1242ca2f4e..295a033ee9a2 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -25,3 +25,15 @@ config PCI_EPF_NTB
device tree.
If in doubt, say "N" to disable Endpoint NTB driver.
+
+config PCI_EPF_VNTB
+ tristate "PCI Endpoint NTB driver"
+ depends on PCI_ENDPOINT
+ depends on NTB
+ select CONFIGFS_FS
+ help
+ Select this configuration option to enable the Non-Transparent
+ Bridge (NTB) driver for PCIe Endpoint. NTB driver implements NTB
+ between PCI Root Port and PCIe Endpoint.
+
+ If in doubt, say "N" to disable Endpoint NTB driver.
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile
index 96ab932a537a..5c13001deaba 100644
--- a/drivers/pci/endpoint/functions/Makefile
+++ b/drivers/pci/endpoint/functions/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o
obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o
+obj-$(CONFIG_PCI_EPF_VNTB) += pci-epf-vntb.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
new file mode 100644
index 000000000000..0ea85e1d292e
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -0,0 +1,1442 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Endpoint Function Driver to implement Non-Transparent Bridge functionality
+ * Between PCI RC and EP
+ *
+ * Copyright (C) 2020 Texas Instruments
+ * Copyright (C) 2022 NXP
+ *
+ * Based on pci-epf-ntb.c
+ * Author: Frank Li <Frank.Li@nxp.com>
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+/**
+ * +------------+ +---------------------------------------+
+ * | | | |
+ * +------------+ | +--------------+
+ * | NTB | | | NTB |
+ * | NetDev | | | NetDev |
+ * +------------+ | +--------------+
+ * | NTB | | | NTB |
+ * | Transfer | | | Transfer |
+ * +------------+ | +--------------+
+ * | | | | |
+ * | PCI NTB | | | |
+ * | EPF | | | |
+ * | Driver | | | PCI Virtual |
+ * | | +---------------+ | NTB Driver |
+ * | | | PCI EP NTB |<------>| |
+ * | | | FN Driver | | |
+ * +------------+ +---------------+ +--------------+
+ * | | | | | |
+ * | PCI Bus | <-----> | PCI EP Bus | | Virtual PCI |
+ * | | PCI | | | Bus |
+ * +------------+ +---------------+--------+--------------+
+ * PCIe Root Port PCI EP
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/ntb.h>
+
+static struct workqueue_struct *kpcintb_workqueue;
+
+#define COMMAND_CONFIGURE_DOORBELL 1
+#define COMMAND_TEARDOWN_DOORBELL 2
+#define COMMAND_CONFIGURE_MW 3
+#define COMMAND_TEARDOWN_MW 4
+#define COMMAND_LINK_UP 5
+#define COMMAND_LINK_DOWN 6
+
+#define COMMAND_STATUS_OK 1
+#define COMMAND_STATUS_ERROR 2
+
+#define LINK_STATUS_UP BIT(0)
+
+#define SPAD_COUNT 64
+#define DB_COUNT 4
+#define NTB_MW_OFFSET 2
+#define DB_COUNT_MASK GENMASK(15, 0)
+#define MSIX_ENABLE BIT(16)
+#define MAX_DB_COUNT 32
+#define MAX_MW 4
+
+enum epf_ntb_bar {
+ BAR_CONFIG,
+ BAR_DB,
+ BAR_MW0,
+ BAR_MW1,
+ BAR_MW2,
+};
+
+/*
+ * +--------------------------------------------------+ Base
+ * | |
+ * | |
+ * | |
+ * | Common Control Register |
+ * | |
+ * | |
+ * | |
+ * +-----------------------+--------------------------+ Base+span_offset
+ * | | |
+ * | Peer Span Space | Span Space |
+ * | | |
+ * | | |
+ * +-----------------------+--------------------------+ Base+span_offset
+ * | | | +span_count * 4
+ * | | |
+ * | Span Space | Peer Span Space |
+ * | | |
+ * +-----------------------+--------------------------+
+ * Virtual PCI PCIe Endpoint
+ * NTB Driver NTB Driver
+ */
+struct epf_ntb_ctrl {
+ u32 command;
+ u32 argument;
+ u16 command_status;
+ u16 link_status;
+ u32 topology;
+ u64 addr;
+ u64 size;
+ u32 num_mws;
+ u32 reserved;
+ u32 spad_offset;
+ u32 spad_count;
+ u32 db_entry_size;
+ u32 db_data[MAX_DB_COUNT];
+ u32 db_offset[MAX_DB_COUNT];
+} __packed;
+
+struct epf_ntb {
+ struct ntb_dev ntb;
+ struct pci_epf *epf;
+ struct config_group group;
+
+ u32 num_mws;
+ u32 db_count;
+ u32 spad_count;
+ u64 mws_size[MAX_MW];
+ u64 db;
+ u32 vbus_number;
+ u16 vntb_pid;
+ u16 vntb_vid;
+
+ bool linkup;
+ u32 spad_size;
+
+ enum pci_barno epf_ntb_bar[6];
+
+ struct epf_ntb_ctrl *reg;
+
+ phys_addr_t epf_db_phy;
+ void __iomem *epf_db;
+
+ phys_addr_t vpci_mw_phy[MAX_MW];
+ void __iomem *vpci_mw_addr[MAX_MW];
+
+ struct delayed_work cmd_handler;
+};
+
+#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
+#define ntb_ndev(__ntb) container_of(__ntb, struct epf_ntb, ntb)
+
+static struct pci_epf_header epf_ntb_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .baseclass_code = PCI_BASE_CLASS_MEMORY,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+/**
+ * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @link_up: true or false indicating Link is UP or Down
+ *
+ * Once NTB function in HOST invoke ntb_link_enable(),
+ * this NTB function driver will trigger a link event to vhost.
+ */
+static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
+{
+ if (link_up)
+ ntb->reg->link_status |= LINK_STATUS_UP;
+ else
+ ntb->reg->link_status &= ~LINK_STATUS_UP;
+
+ ntb_link_event(&ntb->ntb);
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_mw() - Configure the Outbound Address Space for vhost
+ * to access the memory window of host
+ * @ntb: NTB device that facilitates communication between host and vhost
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * EP Outbound Window
+ * +--------+ +-----------+
+ * | | | |
+ * | | | |
+ * | | | |
+ * | | | |
+ * | | +-----------+
+ * | Virtual| | Memory Win|
+ * | NTB | -----------> | |
+ * | Driver | | |
+ * | | +-----------+
+ * | | | |
+ * | | | |
+ * +--------+ +-----------+
+ * VHost PCI EP
+ */
+static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw)
+{
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ u64 addr, size;
+ int ret = 0;
+
+ phys_addr = ntb->vpci_mw_phy[mw];
+ addr = ntb->reg->addr;
+ size = ntb->reg->size;
+
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = pci_epc_map_addr(ntb->epf->epc, func_no, vfunc_no, phys_addr, addr, size);
+ if (ret)
+ dev_err(&ntb->epf->epc->dev,
+ "Failed to map memory window %d address\n", mw);
+ return ret;
+}
+
+/**
+ * epf_ntb_teardown_mw() - Teardown the configured OB ATU
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
+ * pci_epc_unmap_addr()
+ */
+static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw)
+{
+ pci_epc_unmap_addr(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ ntb->vpci_mw_phy[mw]);
+}
+
+/**
+ * epf_ntb_cmd_handler() - Handle commands provided by the NTB Host
+ * @work: work_struct for the epf_ntb_epc
+ *
+ * Workqueue function that gets invoked for the two epf_ntb_epc
+ * periodically (once every 5ms) to see if it has received any commands
+ * from NTB host. The host can send commands to configure doorbell or
+ * configure memory window or to update link status.
+ */
+static void epf_ntb_cmd_handler(struct work_struct *work)
+{
+ struct epf_ntb_ctrl *ctrl;
+ u32 command, argument;
+ struct epf_ntb *ntb;
+ struct device *dev;
+ int ret;
+ int i;
+
+ ntb = container_of(work, struct epf_ntb, cmd_handler.work);
+
+ for (i = 1; i < ntb->db_count; i++) {
+ if (readl(ntb->epf_db + i * 4)) {
+ if (readl(ntb->epf_db + i * 4))
+ ntb->db |= 1 << (i - 1);
+
+ ntb_db_event(&ntb->ntb, i);
+ writel(0, ntb->epf_db + i * 4);
+ }
+ }
+
+ ctrl = ntb->reg;
+ command = ctrl->command;
+ if (!command)
+ goto reset_handler;
+ argument = ctrl->argument;
+
+ ctrl->command = 0;
+ ctrl->argument = 0;
+
+ ctrl = ntb->reg;
+ dev = &ntb->epf->dev;
+
+ switch (command) {
+ case COMMAND_CONFIGURE_DOORBELL:
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_DOORBELL:
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_CONFIGURE_MW:
+ ret = epf_ntb_configure_mw(ntb, argument);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_MW:
+ epf_ntb_teardown_mw(ntb, argument);
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_LINK_UP:
+ ntb->linkup = true;
+ ret = epf_ntb_link_up(ntb, true);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ goto reset_handler;
+ case COMMAND_LINK_DOWN:
+ ntb->linkup = false;
+ ret = epf_ntb_link_up(ntb, false);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ default:
+ dev_err(dev, "UNKNOWN command: %d\n", command);
+ break;
+ }
+
+reset_handler:
+ queue_delayed_work(kpcintb_workqueue, &ntb->cmd_handler,
+ msecs_to_jiffies(5));
+}
+
+/**
+ * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address.
+ *
+ * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region (removes inbound ATU configuration). While BAR0 is
+ * the default self scratchpad BAR, an NTB could have other BARs for self
+ * scratchpad (because of reserved BARs). This function can get the exact BAR
+ * used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR. Also note HOST2's peer
+ * scratchpad is HOST1's self scratchpad.
+ */
+static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb->epf->bar[barno];
+
+ pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
+}
+
+/**
+ * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ * Map BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region.
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR.
+ */
+static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb->epf->bar[barno];
+
+ ret = pci_epc_set_bar(ntb->epf->epc, func_no, vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "inft: Config/Status/SPAD BAR set failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * epf_ntb_config_spad_bar_free() - Free the physical memory associated with
+ * config + scratchpad region
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
+{
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ pci_epf_free_space(ntb->epf, ntb->reg, barno, 0);
+}
+
+/**
+ * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
+ * region
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Allocate the Local Memory mentioned in the above diagram. The size of
+ * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
+ * is obtained from "spad-count" configfs entry.
+ */
+static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
+{
+ size_t align;
+ enum pci_barno barno;
+ struct epf_ntb_ctrl *ctrl;
+ u32 spad_size, ctrl_size;
+ u64 size;
+ struct pci_epf *epf = ntb->epf;
+ struct device *dev = &epf->dev;
+ u32 spad_count;
+ void *base;
+ int i;
+ const struct pci_epc_features *epc_features = pci_epc_get_features(epf->epc,
+ epf->func_no,
+ epf->vfunc_no);
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ size = epc_features->bar_fixed_size[barno];
+ align = epc_features->align;
+
+ if ((!IS_ALIGNED(size, align)))
+ return -EINVAL;
+
+ spad_count = ntb->spad_count;
+
+ ctrl_size = sizeof(struct epf_ntb_ctrl);
+ spad_size = 2 * spad_count * 4;
+
+ if (!align) {
+ ctrl_size = roundup_pow_of_two(ctrl_size);
+ spad_size = roundup_pow_of_two(spad_size);
+ } else {
+ ctrl_size = ALIGN(ctrl_size, align);
+ spad_size = ALIGN(spad_size, align);
+ }
+
+ if (!size)
+ size = ctrl_size + spad_size;
+ else if (size < ctrl_size + spad_size)
+ return -EINVAL;
+
+ base = pci_epf_alloc_space(epf, size, barno, align, 0);
+ if (!base) {
+ dev_err(dev, "Config/Status/SPAD alloc region fail\n");
+ return -ENOMEM;
+ }
+
+ ntb->reg = base;
+
+ ctrl = ntb->reg;
+ ctrl->spad_offset = ctrl_size;
+
+ ctrl->spad_count = spad_count;
+ ctrl->num_mws = ntb->num_mws;
+ ntb->spad_size = spad_size;
+
+ ctrl->db_entry_size = 4;
+
+ for (i = 0; i < ntb->db_count; i++) {
+ ntb->reg->db_data[i] = 1 + i;
+ ntb->reg->db_offset[i] = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capaiblity
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ * Configure MSI/MSI-X capability for each interface with number of
+ * interrupts equal to "db_count" configfs entry.
+ */
+static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ struct device *dev;
+ u32 db_count;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
+
+ if (!(epc_features->msix_capable || epc_features->msi_capable)) {
+ dev_err(dev, "MSI or MSI-X is required for doorbell\n");
+ return -EINVAL;
+ }
+
+ db_count = ntb->db_count;
+ if (db_count > MAX_DB_COUNT) {
+ dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
+ return -EINVAL;
+ }
+
+ ntb->db_count = db_count;
+
+ if (epc_features->msi_capable) {
+ ret = pci_epc_set_msi(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ 16);
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_db_bar_init() - Configure Doorbell window BARs
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ u32 align;
+ struct device *dev = &ntb->epf->dev;
+ int ret;
+ struct pci_epf_bar *epf_bar;
+ void __iomem *mw_addr;
+ enum pci_barno barno;
+ size_t size = 4 * ntb->db_count;
+
+ epc_features = pci_epc_get_features(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no);
+ align = epc_features->align;
+
+ if (size < 128)
+ size = 128;
+
+ if (align)
+ size = ALIGN(size, align);
+ else
+ size = roundup_pow_of_two(size);
+
+ barno = ntb->epf_ntb_bar[BAR_DB];
+
+ mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0);
+ if (!mw_addr) {
+ dev_err(dev, "Failed to allocate OB address\n");
+ return -ENOMEM;
+ }
+
+ ntb->epf_db = mw_addr;
+
+ epf_bar = &ntb->epf->bar[barno];
+
+ ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "Doorbell BAR set failed\n");
+ goto err_alloc_peer_mem;
+ }
+ return ret;
+
+err_alloc_peer_mem:
+ pci_epc_mem_free_addr(ntb->epf->epc, epf_bar->phys_addr, mw_addr, epf_bar->size);
+ return -1;
+}
+
+static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws);
+
+/**
+ * epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory
+ * allocated in peer's outbound address space
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static void epf_ntb_db_bar_clear(struct epf_ntb *ntb)
+{
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_DB];
+ pci_epf_free_space(ntb->epf, ntb->epf_db, barno, 0);
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+}
+
+/**
+ * epf_ntb_mw_bar_init() - Configure Memory window BARs
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ */
+static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
+{
+ int ret = 0;
+ int i;
+ u64 size;
+ enum pci_barno barno;
+ struct device *dev = &ntb->epf->dev;
+
+ for (i = 0; i < ntb->num_mws; i++) {
+ size = ntb->mws_size[i];
+ barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+
+ ntb->epf->bar[barno].barno = barno;
+ ntb->epf->bar[barno].size = size;
+ ntb->epf->bar[barno].addr = NULL;
+ ntb->epf->bar[barno].phys_addr = 0;
+ ntb->epf->bar[barno].flags |= upper_32_bits(size) ?
+ PCI_BASE_ADDRESS_MEM_TYPE_64 :
+ PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ ret = pci_epc_set_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+ if (ret) {
+ dev_err(dev, "MW set failed\n");
+ goto err_alloc_mem;
+ }
+
+ /* Allocate EPC outbound memory windows to vpci vntb device */
+ ntb->vpci_mw_addr[i] = pci_epc_mem_alloc_addr(ntb->epf->epc,
+ &ntb->vpci_mw_phy[i],
+ size);
+ if (!ntb->vpci_mw_addr[i]) {
+ ret = -ENOMEM;
+ dev_err(dev, "Failed to allocate source address\n");
+ goto err_set_bar;
+ }
+ }
+
+ return ret;
+
+err_set_bar:
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+err_alloc_mem:
+ epf_ntb_mw_bar_clear(ntb, i);
+ return ret;
+}
+
+/**
+ * epf_ntb_mw_bar_clear() - Clear Memory window BARs
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
+{
+ enum pci_barno barno;
+ int i;
+
+ for (i = 0; i < num_mws; i++) {
+ barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+
+ pci_epc_mem_free_addr(ntb->epf->epc,
+ ntb->vpci_mw_phy[i],
+ ntb->vpci_mw_addr[i],
+ ntb->mws_size[i]);
+ }
+}
+
+/**
+ * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
+ */
+static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
+{
+ pci_epc_remove_epf(ntb->epf->epc, ntb->epf, 0);
+ pci_epc_put(ntb->epf->epc);
+}
+
+/**
+ * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
+ * constructs (scratchpad region, doorbell, memorywindow)
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ enum pci_barno barno;
+ enum epf_ntb_bar bar;
+ struct device *dev;
+ u32 num_mws;
+ int i;
+
+ barno = BAR_0;
+ num_mws = ntb->num_mws;
+ dev = &ntb->epf->dev;
+ epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
+
+ /* These are required BARs which are mandatory for NTB functionality */
+ for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ dev_err(dev, "Fail to get NTB function BAR\n");
+ return barno;
+ }
+ ntb->epf_ntb_bar[bar] = barno;
+ }
+
+ /* These are optional BARs which don't impact NTB functionality */
+ for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ ntb->num_mws = i;
+ dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
+ }
+ ntb->epf_ntb_bar[bar] = barno;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_epc_init() - Initialize NTB interface
+ * @ntb: NTB device that facilitates communication between HOST and vHOST2
+ *
+ * Wrapper to initialize a particular EPC interface and start the workqueue
+ * to check for commands from host. This function will write to the
+ * EP controller HW for configuring it.
+ */
+static int epf_ntb_epc_init(struct epf_ntb *ntb)
+{
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+ struct device *dev;
+ int ret;
+
+ epf = ntb->epf;
+ dev = &epf->dev;
+ epc = epf->epc;
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = epf_ntb_config_sspad_bar_set(ntb);
+ if (ret) {
+ dev_err(dev, "Config/self SPAD BAR init failed");
+ return ret;
+ }
+
+ ret = epf_ntb_configure_interrupt(ntb);
+ if (ret) {
+ dev_err(dev, "Interrupt configuration failed\n");
+ goto err_config_interrupt;
+ }
+
+ ret = epf_ntb_db_bar_init(ntb);
+ if (ret) {
+ dev_err(dev, "DB BAR init failed\n");
+ goto err_db_bar_init;
+ }
+
+ ret = epf_ntb_mw_bar_init(ntb);
+ if (ret) {
+ dev_err(dev, "MW BAR init failed\n");
+ goto err_mw_bar_init;
+ }
+
+ if (vfunc_no <= 1) {
+ ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
+ if (ret) {
+ dev_err(dev, "Configuration header write failed\n");
+ goto err_write_header;
+ }
+ }
+
+ INIT_DELAYED_WORK(&ntb->cmd_handler, epf_ntb_cmd_handler);
+ queue_work(kpcintb_workqueue, &ntb->cmd_handler.work);
+
+ return 0;
+
+err_write_header:
+ epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+err_mw_bar_init:
+ epf_ntb_db_bar_clear(ntb);
+err_db_bar_init:
+err_config_interrupt:
+ epf_ntb_config_sspad_bar_clear(ntb);
+
+ return ret;
+}
+
+
+/**
+ * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper to cleanup all NTB interfaces.
+ */
+static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
+{
+ epf_ntb_db_bar_clear(ntb);
+ epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+}
+
+#define EPF_NTB_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ \
+ return sprintf(page, "%d\n", ntb->_name); \
+}
+
+#define EPF_NTB_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ u32 val; \
+ int ret; \
+ \
+ ret = kstrtou32(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ ntb->_name = val; \
+ \
+ return len; \
+}
+
+#define EPF_NTB_MW_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ struct device *dev = &ntb->epf->dev; \
+ int win_no; \
+ \
+ if (sscanf(#_name, "mw%d", &win_no) != 1) \
+ return -EINVAL; \
+ \
+ if (win_no <= 0 || win_no > ntb->num_mws) { \
+ dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
+ return -EINVAL; \
+ } \
+ \
+ return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
+}
+
+#define EPF_NTB_MW_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ struct device *dev = &ntb->epf->dev; \
+ int win_no; \
+ u64 val; \
+ int ret; \
+ \
+ ret = kstrtou64(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ if (sscanf(#_name, "mw%d", &win_no) != 1) \
+ return -EINVAL; \
+ \
+ if (win_no <= 0 || win_no > ntb->num_mws) { \
+ dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
+ return -EINVAL; \
+ } \
+ \
+ ntb->mws_size[win_no - 1] = val; \
+ \
+ return len; \
+}
+
+static ssize_t epf_ntb_num_mws_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct epf_ntb *ntb = to_epf_ntb(group);
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val > MAX_MW)
+ return -EINVAL;
+
+ ntb->num_mws = val;
+
+ return len;
+}
+
+EPF_NTB_R(spad_count)
+EPF_NTB_W(spad_count)
+EPF_NTB_R(db_count)
+EPF_NTB_W(db_count)
+EPF_NTB_R(num_mws)
+EPF_NTB_R(vbus_number)
+EPF_NTB_W(vbus_number)
+EPF_NTB_R(vntb_pid)
+EPF_NTB_W(vntb_pid)
+EPF_NTB_R(vntb_vid)
+EPF_NTB_W(vntb_vid)
+EPF_NTB_MW_R(mw1)
+EPF_NTB_MW_W(mw1)
+EPF_NTB_MW_R(mw2)
+EPF_NTB_MW_W(mw2)
+EPF_NTB_MW_R(mw3)
+EPF_NTB_MW_W(mw3)
+EPF_NTB_MW_R(mw4)
+EPF_NTB_MW_W(mw4)
+
+CONFIGFS_ATTR(epf_ntb_, spad_count);
+CONFIGFS_ATTR(epf_ntb_, db_count);
+CONFIGFS_ATTR(epf_ntb_, num_mws);
+CONFIGFS_ATTR(epf_ntb_, mw1);
+CONFIGFS_ATTR(epf_ntb_, mw2);
+CONFIGFS_ATTR(epf_ntb_, mw3);
+CONFIGFS_ATTR(epf_ntb_, mw4);
+CONFIGFS_ATTR(epf_ntb_, vbus_number);
+CONFIGFS_ATTR(epf_ntb_, vntb_pid);
+CONFIGFS_ATTR(epf_ntb_, vntb_vid);
+
+static struct configfs_attribute *epf_ntb_attrs[] = {
+ &epf_ntb_attr_spad_count,
+ &epf_ntb_attr_db_count,
+ &epf_ntb_attr_num_mws,
+ &epf_ntb_attr_mw1,
+ &epf_ntb_attr_mw2,
+ &epf_ntb_attr_mw3,
+ &epf_ntb_attr_mw4,
+ &epf_ntb_attr_vbus_number,
+ &epf_ntb_attr_vntb_pid,
+ &epf_ntb_attr_vntb_vid,
+ NULL,
+};
+
+static const struct config_item_type ntb_group_type = {
+ .ct_attrs = epf_ntb_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/**
+ * epf_ntb_add_cfs() - Add configfs directory specific to NTB
+ * @epf: NTB endpoint function device
+ * @group: A pointer to the config_group structure referencing a group of
+ * config_items of a specific type that belong to a specific sub-system.
+ *
+ * Add configfs directory specific to NTB. This directory will hold
+ * NTB specific properties like db_count, spad_count, num_mws etc.,
+ */
+static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct config_group *ntb_group = &ntb->group;
+ struct device *dev = &epf->dev;
+
+ config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
+
+ return ntb_group;
+}
+
+/*==== virtual PCI bus driver, which only load virtual NTB PCI driver ====*/
+
+static u32 pci_space[] = {
+ 0xffffffff, /*DeviceID, Vendor ID*/
+ 0, /*Status, Command*/
+ 0xffffffff, /*Class code, subclass, prog if, revision id*/
+ 0x40, /*bist, header type, latency Timer, cache line size*/
+ 0, /*BAR 0*/
+ 0, /*BAR 1*/
+ 0, /*BAR 2*/
+ 0, /*BAR 3*/
+ 0, /*BAR 4*/
+ 0, /*BAR 5*/
+ 0, /*Cardbus cis point*/
+ 0, /*Subsystem ID Subystem vendor id*/
+ 0, /*ROM Base Address*/
+ 0, /*Reserved, Cap. Point*/
+ 0, /*Reserved,*/
+ 0, /*Max Lat, Min Gnt, interrupt pin, interrupt line*/
+};
+
+static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
+{
+ if (devfn == 0) {
+ memcpy(val, ((u8 *)pci_space) + where, size);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
+{
+ return 0;
+}
+
+static struct pci_ops vpci_ops = {
+ .read = pci_read,
+ .write = pci_write,
+};
+
+static int vpci_scan_bus(void *sysdata)
+{
+ struct pci_bus *vpci_bus;
+ struct epf_ntb *ndev = sysdata;
+
+ vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
+ if (vpci_bus)
+ pr_err("create pci bus\n");
+
+ pci_bus_add_devices(vpci_bus);
+
+ return 0;
+}
+
+/*==================== Virtual PCIe NTB driver ==========================*/
+
+static int vntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
+{
+ struct epf_ntb *ndev = ntb_ndev(ntb);
+
+ return ndev->num_mws;
+}
+
+static int vntb_epf_spad_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->spad_count;
+}
+
+static int vntb_epf_peer_mw_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->num_mws;
+}
+
+static u64 vntb_epf_db_valid_mask(struct ntb_dev *ntb)
+{
+ return BIT_ULL(ntb_ndev(ntb)->db_count) - 1;
+}
+
+static int vntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ return 0;
+}
+
+static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ int ret;
+ struct device *dev;
+
+ dev = &ntb->ntb.dev;
+ barno = ntb->epf_ntb_bar[BAR_MW0 + idx];
+ epf_bar = &ntb->epf->bar[barno];
+ epf_bar->phys_addr = addr;
+ epf_bar->barno = barno;
+ epf_bar->size = size;
+
+ ret = pci_epc_set_bar(ntb->epf->epc, 0, 0, epf_bar);
+ if (ret) {
+ dev_err(dev, "failure set mw trans\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int vntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
+{
+ return 0;
+}
+
+static int vntb_epf_peer_mw_get_addr(struct ntb_dev *ndev, int idx,
+ phys_addr_t *base, resource_size_t *size)
+{
+
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ if (base)
+ *base = ntb->vpci_mw_phy[idx];
+
+ if (size)
+ *size = ntb->mws_size[idx];
+
+ return 0;
+}
+
+static int vntb_epf_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed,
+ enum ntb_width max_width)
+{
+ return 0;
+}
+
+static u32 vntb_epf_spad_read(struct ntb_dev *ndev, int idx)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * 4;
+ u32 val;
+ void __iomem *base = ntb->reg;
+
+ val = readl(base + off + ct + idx * 4);
+ return val;
+}
+
+static int vntb_epf_spad_write(struct ntb_dev *ndev, int idx, u32 val)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset, ct = ctrl->spad_count * 4;
+ void __iomem *base = ntb->reg;
+
+ writel(val, base + off + ct + idx * 4);
+ return 0;
+}
+
+static u32 vntb_epf_peer_spad_read(struct ntb_dev *ndev, int pidx, int idx)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset;
+ void __iomem *base = ntb->reg;
+ u32 val;
+
+ val = readl(base + off + idx * 4);
+ return val;
+}
+
+static int vntb_epf_peer_spad_write(struct ntb_dev *ndev, int pidx, int idx, u32 val)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset;
+ void __iomem *base = ntb->reg;
+
+ writel(val, base + off + idx * 4);
+ return 0;
+}
+
+static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits)
+{
+ u32 interrupt_num = ffs(db_bits) + 1;
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ u8 func_no, vfunc_no;
+ int ret;
+
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = pci_epc_raise_irq(ntb->epf->epc,
+ func_no,
+ vfunc_no,
+ PCI_EPC_IRQ_MSI,
+ interrupt_num + 1);
+ if (ret)
+ dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n");
+
+ return ret;
+}
+
+static u64 vntb_epf_db_read(struct ntb_dev *ndev)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ return ntb->db;
+}
+
+static int vntb_epf_mw_get_align(struct ntb_dev *ndev, int pidx, int idx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ if (addr_align)
+ *addr_align = SZ_4K;
+
+ if (size_align)
+ *size_align = 1;
+
+ if (size_max)
+ *size_max = ntb->mws_size[idx];
+
+ return 0;
+}
+
+static u64 vntb_epf_link_is_up(struct ntb_dev *ndev,
+ enum ntb_speed *speed,
+ enum ntb_width *width)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ return ntb->reg->link_status;
+}
+
+static int vntb_epf_db_clear_mask(struct ntb_dev *ndev, u64 db_bits)
+{
+ return 0;
+}
+
+static int vntb_epf_db_clear(struct ntb_dev *ndev, u64 db_bits)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ ntb->db &= ~db_bits;
+ return 0;
+}
+
+static int vntb_epf_link_disable(struct ntb_dev *ntb)
+{
+ return 0;
+}
+
+static const struct ntb_dev_ops vntb_epf_ops = {
+ .mw_count = vntb_epf_mw_count,
+ .spad_count = vntb_epf_spad_count,
+ .peer_mw_count = vntb_epf_peer_mw_count,
+ .db_valid_mask = vntb_epf_db_valid_mask,
+ .db_set_mask = vntb_epf_db_set_mask,
+ .mw_set_trans = vntb_epf_mw_set_trans,
+ .mw_clear_trans = vntb_epf_mw_clear_trans,
+ .peer_mw_get_addr = vntb_epf_peer_mw_get_addr,
+ .link_enable = vntb_epf_link_enable,
+ .spad_read = vntb_epf_spad_read,
+ .spad_write = vntb_epf_spad_write,
+ .peer_spad_read = vntb_epf_peer_spad_read,
+ .peer_spad_write = vntb_epf_peer_spad_write,
+ .peer_db_set = vntb_epf_peer_db_set,
+ .db_read = vntb_epf_db_read,
+ .mw_get_align = vntb_epf_mw_get_align,
+ .link_is_up = vntb_epf_link_is_up,
+ .db_clear_mask = vntb_epf_db_clear_mask,
+ .db_clear = vntb_epf_db_clear,
+ .link_disable = vntb_epf_link_disable,
+};
+
+static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int ret;
+ struct epf_ntb *ndev = (struct epf_ntb *)pdev->sysdata;
+ struct device *dev = &pdev->dev;
+
+ ndev->ntb.pdev = pdev;
+ ndev->ntb.topo = NTB_TOPO_NONE;
+ ndev->ntb.ops = &vntb_epf_ops;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "Cannot set DMA mask\n");
+ return -EINVAL;
+ }
+
+ ret = ntb_register_device(&ndev->ntb);
+ if (ret) {
+ dev_err(dev, "Failed to register NTB device\n");
+ goto err_register_dev;
+ }
+
+ dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
+ return 0;
+
+err_register_dev:
+ return -EINVAL;
+}
+
+static struct pci_device_id pci_vntb_table[] = {
+ {
+ PCI_DEVICE(0xffff, 0xffff),
+ },
+ {},
+};
+
+static struct pci_driver vntb_pci_driver = {
+ .name = "pci-vntb",
+ .id_table = pci_vntb_table,
+ .probe = pci_vntb_probe,
+};
+
+/* ============ PCIe EPF Driver Bind ====================*/
+
+/**
+ * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
+ * @epf: NTB endpoint function device
+ *
+ * Initialize both the endpoint controllers associated with NTB function device.
+ * Invoked when a primary interface or secondary interface is bound to EPC
+ * device. This function will succeed only when EPC is bound to both the
+ * interfaces.
+ */
+static int epf_ntb_bind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct device *dev = &epf->dev;
+ int ret;
+
+ if (!epf->epc) {
+ dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
+ return 0;
+ }
+
+ ret = epf_ntb_init_epc_bar(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to create NTB EPC\n");
+ goto err_bar_init;
+ }
+
+ ret = epf_ntb_config_spad_bar_alloc(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to allocate BAR memory\n");
+ goto err_bar_alloc;
+ }
+
+ ret = epf_ntb_epc_init(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to initialize EPC\n");
+ goto err_bar_alloc;
+ }
+
+ epf_set_drvdata(epf, ntb);
+
+ pci_space[0] = (ntb->vntb_pid << 16) | ntb->vntb_vid;
+ pci_vntb_table[0].vendor = ntb->vntb_vid;
+ pci_vntb_table[0].device = ntb->vntb_pid;
+
+ ret = pci_register_driver(&vntb_pci_driver);
+ if (ret) {
+ dev_err(dev, "failure register vntb pci driver\n");
+ goto err_bar_alloc;
+ }
+
+ vpci_scan_bus(ntb);
+
+ return 0;
+
+err_bar_alloc:
+ epf_ntb_config_spad_bar_free(ntb);
+
+err_bar_init:
+ epf_ntb_epc_destroy(ntb);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
+ * @epf: NTB endpoint function device
+ *
+ * Cleanup the initialization from epf_ntb_bind()
+ */
+static void epf_ntb_unbind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+
+ epf_ntb_epc_cleanup(ntb);
+ epf_ntb_config_spad_bar_free(ntb);
+ epf_ntb_epc_destroy(ntb);
+
+ pci_unregister_driver(&vntb_pci_driver);
+}
+
+// EPF driver probe
+static struct pci_epf_ops epf_ntb_ops = {
+ .bind = epf_ntb_bind,
+ .unbind = epf_ntb_unbind,
+ .add_cfs = epf_ntb_add_cfs,
+};
+
+/**
+ * epf_ntb_probe() - Probe NTB function driver
+ * @epf: NTB endpoint function device
+ *
+ * Probe NTB function driver when endpoint function bus detects a NTB
+ * endpoint function.
+ */
+static int epf_ntb_probe(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb;
+ struct device *dev;
+
+ dev = &epf->dev;
+
+ ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
+ if (!ntb)
+ return -ENOMEM;
+
+ epf->header = &epf_ntb_header;
+ ntb->epf = epf;
+ ntb->vbus_number = 0xff;
+ epf_set_drvdata(epf, ntb);
+
+ dev_info(dev, "pci-ep epf driver loaded\n");
+ return 0;
+}
+
+static const struct pci_epf_device_id epf_ntb_ids[] = {
+ {
+ .name = "pci_epf_vntb",
+ },
+ {},
+};
+
+static struct pci_epf_driver epf_ntb_driver = {
+ .driver.name = "pci_epf_vntb",
+ .probe = epf_ntb_probe,
+ .id_table = epf_ntb_ids,
+ .ops = &epf_ntb_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init epf_ntb_init(void)
+{
+ int ret;
+
+ kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
+ WQ_HIGHPRI, 0);
+ ret = pci_epf_register_driver(&epf_ntb_driver);
+ if (ret) {
+ destroy_workqueue(kpcintb_workqueue);
+ pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(epf_ntb_init);
+
+static void __exit epf_ntb_exit(void)
+{
+ pci_epf_unregister_driver(&epf_ntb_driver);
+ destroy_workqueue(kpcintb_workqueue);
+}
+module_exit(epf_ntb_exit);
+
+MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
+MODULE_AUTHOR("Frank Li <Frank.li@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 462b429ad243..4496a7c5c478 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) "pci-p2pdma: " fmt
#include <linux/ctype.h>
+#include <linux/dma-map-ops.h>
#include <linux/pci-p2pdma.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -20,13 +21,6 @@
#include <linux/seq_buf.h>
#include <linux/xarray.h>
-enum pci_p2pdma_map_type {
- PCI_P2PDMA_MAP_UNKNOWN = 0,
- PCI_P2PDMA_MAP_NOT_SUPPORTED,
- PCI_P2PDMA_MAP_BUS_ADDR,
- PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
-};
-
struct pci_p2pdma {
struct gen_pool *pool;
bool p2pmem_published;
@@ -854,6 +848,7 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
struct pci_dev *provider = to_p2p_pgmap(pgmap)->provider;
struct pci_dev *client;
struct pci_p2pdma *p2pdma;
+ int dist;
if (!provider->p2pdma)
return PCI_P2PDMA_MAP_NOT_SUPPORTED;
@@ -870,74 +865,48 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
type = xa_to_value(xa_load(&p2pdma->map_types,
map_types_idx(client)));
rcu_read_unlock();
- return type;
-}
-static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
- struct device *dev, struct scatterlist *sg, int nents)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i) {
- s->dma_address = sg_phys(s) + p2p_pgmap->bus_offset;
- sg_dma_len(s) = s->length;
- }
+ if (type == PCI_P2PDMA_MAP_UNKNOWN)
+ return calc_map_type_and_dist(provider, client, &dist, true);
- return nents;
+ return type;
}
/**
- * pci_p2pdma_map_sg_attrs - map a PCI peer-to-peer scatterlist for DMA
- * @dev: device doing the DMA request
- * @sg: scatter list to map
- * @nents: elements in the scatterlist
- * @dir: DMA direction
- * @attrs: DMA attributes passed to dma_map_sg() (if called)
+ * pci_p2pdma_map_segment - map an sg segment determining the mapping type
+ * @state: State structure that should be declared outside of the for_each_sg()
+ * loop and initialized to zero.
+ * @dev: DMA device that's doing the mapping operation
+ * @sg: scatterlist segment to map
*
- * Scatterlists mapped with this function should be unmapped using
- * pci_p2pdma_unmap_sg_attrs().
+ * This is a helper to be used by non-IOMMU dma_map_sg() implementations where
+ * the sg segment is the same for the page_link and the dma_address.
*
- * Returns the number of SG entries mapped or 0 on error.
+ * Attempt to map a single segment in an SGL with the PCI bus address.
+ * The segment must point to a PCI P2PDMA page and thus must be
+ * wrapped in a is_pci_p2pdma_page(sg_page(sg)) check.
+ *
+ * Returns the type of mapping used and maps the page if the type is
+ * PCI_P2PDMA_MAP_BUS_ADDR.
*/
-int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
+enum pci_p2pdma_map_type
+pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct scatterlist *sg)
{
- struct pci_p2pdma_pagemap *p2p_pgmap =
- to_p2p_pgmap(sg_page(sg)->pgmap);
-
- switch (pci_p2pdma_map_type(sg_page(sg)->pgmap, dev)) {
- case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
- return dma_map_sg_attrs(dev, sg, nents, dir, attrs);
- case PCI_P2PDMA_MAP_BUS_ADDR:
- return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents);
- default:
- WARN_ON_ONCE(1);
- return 0;
+ if (state->pgmap != sg_page(sg)->pgmap) {
+ state->pgmap = sg_page(sg)->pgmap;
+ state->map = pci_p2pdma_map_type(state->pgmap, dev);
+ state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset;
}
-}
-EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
-/**
- * pci_p2pdma_unmap_sg_attrs - unmap a PCI peer-to-peer scatterlist that was
- * mapped with pci_p2pdma_map_sg()
- * @dev: device doing the DMA request
- * @sg: scatter list to map
- * @nents: number of elements returned by pci_p2pdma_map_sg()
- * @dir: DMA direction
- * @attrs: DMA attributes passed to dma_unmap_sg() (if called)
- */
-void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- enum pci_p2pdma_map_type map_type;
-
- map_type = pci_p2pdma_map_type(sg_page(sg)->pgmap, dev);
+ if (state->map == PCI_P2PDMA_MAP_BUS_ADDR) {
+ sg->dma_address = sg_phys(sg) + state->bus_off;
+ sg_dma_len(sg) = sg->length;
+ sg_dma_mark_bus_address(sg);
+ }
- if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
- dma_unmap_sg_attrs(dev, sg, nents, dir, attrs);
+ return state->map;
}
-EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs);
/**
* pci_p2pdma_enable_store - parse a configfs/sysfs attribute store
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 9884d8b29d3b..c5286b027f00 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2315,7 +2315,7 @@ EXPORT_SYMBOL(pci_alloc_dev);
static bool pci_bus_crs_vendor_id(u32 l)
{
- return (l & 0xffff) == 0x0001;
+ return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
}
static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
diff --git a/drivers/peci/controller/peci-aspeed.c b/drivers/peci/controller/peci-aspeed.c
index 1925ddc13f00..731c5d8f75c6 100644
--- a/drivers/peci/controller/peci-aspeed.c
+++ b/drivers/peci/controller/peci-aspeed.c
@@ -523,7 +523,7 @@ static int aspeed_peci_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
priv->irq = platform_get_irq(pdev, 0);
- if (!priv->irq)
+ if (priv->irq < 0)
return priv->irq;
ret = devm_request_irq(&pdev->dev, priv->irq, aspeed_peci_irq_handler,
diff --git a/drivers/peci/cpu.c b/drivers/peci/cpu.c
index 68eb61c65d34..de4a7b3e5966 100644
--- a/drivers/peci/cpu.c
+++ b/drivers/peci/cpu.c
@@ -188,8 +188,6 @@ static void adev_release(struct device *dev)
{
struct auxiliary_device *adev = to_auxiliary_dev(dev);
- auxiliary_device_uninit(adev);
-
kfree(adev->name);
kfree(adev);
}
@@ -234,6 +232,7 @@ static void unregister_adev(void *_adev)
struct auxiliary_device *adev = _adev;
auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
}
static int devm_adev_add(struct device *dev, int idx)
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 80d8309652a4..b80a9b74662b 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -36,7 +36,7 @@
#define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
#define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
-#define CMN_CHILD_NODE_ADDR GENMASK(27, 0)
+#define CMN_CHILD_NODE_ADDR GENMASK(29, 0)
#define CMN_CHILD_NODE_EXTERNAL BIT(31)
#define CMN_MAX_DIMENSION 12
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 513de1f54e2d..933b96e243b8 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -117,7 +117,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
if (num_irqs == 1) {
int irq = platform_get_irq(pdev, 0);
- if (irq && irq_is_percpu_devid(irq))
+ if ((irq > 0) && irq_is_percpu_devid(irq))
return pmu_parse_percpu_irq(pmu, irq);
}
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index 2c961839903d..ebca5eab9c9b 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -170,7 +170,6 @@ int riscv_pmu_event_set_period(struct perf_event *event)
left = (max_period >> 1);
local64_set(&hwc->prev_count, (u64)-left);
- perf_event_update_userpage(event);
return overflow;
}
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index 342778782359..2c20b0de8cb0 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -72,7 +72,7 @@ static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival)
local64_set(&hwc->prev_count, initial_val);
}
-/**
+/*
* This is just a simple implementation to allow legacy implementations
* compatible with new RISC-V PMU driver framework.
* This driver only allows reading two counters i.e CYCLE & INSTRET.
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 79a3de515ece..8de4ca2fef21 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -41,20 +41,6 @@ static const struct attribute_group *riscv_pmu_attr_groups[] = {
NULL,
};
-union sbi_pmu_ctr_info {
- unsigned long value;
- struct {
- unsigned long csr:12;
- unsigned long width:6;
-#if __riscv_xlen == 32
- unsigned long reserved:13;
-#else
- unsigned long reserved:45;
-#endif
- unsigned long type:1;
- };
-};
-
/*
* RISC-V doesn't have hetergenous harts yet. This need to be part of
* per_cpu in case of harts with different pmu counters
@@ -294,8 +280,13 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
cflags |= SBI_PMU_CFG_FLAG_SET_UINH;
/* retrieve the available counter index */
+#if defined(CONFIG_32BIT)
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
+ cflags, hwc->event_base, hwc->config, hwc->config >> 32);
+#else
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
cflags, hwc->event_base, hwc->config, 0);
+#endif
if (ret.error) {
pr_debug("Not able to find a counter for event %lx config %llx\n",
hwc->event_base, hwc->config);
@@ -437,8 +428,13 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
struct hw_perf_event *hwc = &event->hw;
unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
+#if defined(CONFIG_32BIT)
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
1, flag, ival, ival >> 32, 0);
+#else
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
+ 1, flag, ival, 0, 0);
+#endif
if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
pr_err("Starting counter idx %d failed with error %d\n",
hwc->idx, sbi_err_map_linux_errno(ret.error));
@@ -477,7 +473,7 @@ static int pmu_sbi_get_ctrinfo(int nctr)
if (!pmu_ctr_list)
return -ENOMEM;
- for (i = 0; i <= nctr; i++) {
+ for (i = 0; i < nctr; i++) {
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
if (ret.error)
/* The logical counter ids are not expected to be contiguous */
@@ -545,8 +541,14 @@ static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
hwc = &event->hw;
max_period = riscv_pmu_ctr_get_width_mask(event);
init_val = local64_read(&hwc->prev_count) & max_period;
+#if defined(CONFIG_32BIT)
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
+ flag, init_val, init_val >> 32, 0);
+#else
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
flag, init_val, 0, 0);
+#endif
+ perf_event_update_userpage(event);
}
ctr_ovf_mask = ctr_ovf_mask >> 1;
idx++;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index bff144c97e66..1cf74b0c42e5 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -311,7 +311,7 @@ config PINCTRL_MICROCHIP_SGPIO
LED controller.
config PINCTRL_OCELOT
- bool "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
+ tristate "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
depends on OF
depends on HAS_IOMEM
select GPIOLIB
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
index 4d7548686f39..aaa78a613196 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
@@ -632,7 +632,7 @@ struct aspeed_pin_desc {
SIG_EXPR_LIST_ALIAS(pin, sig, group)
/**
- * Similar to the above, but for pins with a dual expressions (DE) and
+ * Similar to the above, but for pins with a dual expressions (DE)
* and a single group (SG) of pins.
*
* @pin: The pin the signal will be routed to
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index dad453054776..7857e612a100 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -507,7 +507,7 @@ static void bcm2835_gpio_irq_config(struct bcm2835_pinctrl *pc,
}
}
-static void bcm2835_gpio_irq_enable(struct irq_data *data)
+static void bcm2835_gpio_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
@@ -516,13 +516,15 @@ static void bcm2835_gpio_irq_enable(struct irq_data *data)
unsigned bank = GPIO_REG_OFFSET(gpio);
unsigned long flags;
+ gpiochip_enable_irq(chip, gpio);
+
raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
set_bit(offset, &pc->enabled_irq_map[bank]);
bcm2835_gpio_irq_config(pc, gpio, true);
raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
}
-static void bcm2835_gpio_irq_disable(struct irq_data *data)
+static void bcm2835_gpio_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
@@ -537,6 +539,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
clear_bit(offset, &pc->enabled_irq_map[bank]);
raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+
+ gpiochip_disable_irq(chip, gpio);
}
static int __bcm2835_gpio_irq_set_type_disabled(struct bcm2835_pinctrl *pc,
@@ -693,16 +697,15 @@ static int bcm2835_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
return ret;
}
-static struct irq_chip bcm2835_gpio_irq_chip = {
+static const struct irq_chip bcm2835_gpio_irq_chip = {
.name = MODULE_NAME,
- .irq_enable = bcm2835_gpio_irq_enable,
- .irq_disable = bcm2835_gpio_irq_disable,
.irq_set_type = bcm2835_gpio_irq_set_type,
.irq_ack = bcm2835_gpio_irq_ack,
- .irq_mask = bcm2835_gpio_irq_disable,
- .irq_unmask = bcm2835_gpio_irq_enable,
+ .irq_mask = bcm2835_gpio_irq_mask,
+ .irq_unmask = bcm2835_gpio_irq_unmask,
.irq_set_wake = bcm2835_gpio_irq_set_wake,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
+ .flags = (IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE),
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int bcm2835_pctl_get_groups_count(struct pinctrl_dev *pctldev)
@@ -1280,7 +1283,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
girq = &pc->gpio_chip.irq;
- girq->chip = &bcm2835_gpio_irq_chip;
+ gpio_irq_chip_set_chip(girq, &bcm2835_gpio_irq_chip);
girq->parent_handler = bcm2835_gpio_irq_handler;
girq->num_parents = BCM2835_NUM_IRQS;
girq->parents = devm_kcalloc(dev, BCM2835_NUM_IRQS,
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index ffe39336fcac..9e57f4c62e60 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -126,7 +126,7 @@ struct pinctrl_dev *get_pinctrl_dev_from_of_node(struct device_node *np)
mutex_lock(&pinctrldev_list_mutex);
list_for_each_entry(pctldev, &pinctrldev_list, node)
- if (pctldev->dev->of_node == np) {
+ if (device_match_of_node(pctldev->dev, np)) {
mutex_unlock(&pinctrldev_list_mutex);
return pctldev;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx93.c b/drivers/pinctrl/freescale/pinctrl-imx93.c
index 417e41b37a6f..91b3ee1e6fa9 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx93.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx93.c
@@ -247,6 +247,7 @@ static const struct of_device_id imx93_pinctrl_of_match[] = {
{ .compatible = "fsl,imx93-iomuxc", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, imx93_pinctrl_of_match);
static int imx93_pinctrl_probe(struct platform_device *pdev)
{
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index e5ec8b8956da..078eec8af4a4 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -151,6 +151,14 @@ config PINCTRL_LEWISBURG
This pinctrl driver provides an interface that allows configuring
of Intel Lewisburg pins and using them as GPIOs.
+config PINCTRL_METEORLAKE
+ tristate "Intel Meteor Lake pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Meteor Lake pins and using them as GPIOs.
+
config PINCTRL_SUNRISEPOINT
tristate "Intel Sunrisepoint pinctrl and GPIO driver"
depends on ACPI
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 181ffcf34d62..bb87e7bc7b20 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -18,5 +18,6 @@ obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o
obj-$(CONFIG_PINCTRL_JASPERLAKE) += pinctrl-jasperlake.o
obj-$(CONFIG_PINCTRL_LAKEFIELD) += pinctrl-lakefield.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
+obj-$(CONFIG_PINCTRL_METEORLAKE) += pinctrl-meteorlake.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
obj-$(CONFIG_PINCTRL_TIGERLAKE) += pinctrl-tigerlake.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 31f8f271628c..67db79f38051 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -603,7 +603,7 @@ static const char *byt_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc->groups[selector].name;
+ return vg->soc->groups[selector].grp.name;
}
static int byt_get_group_pins(struct pinctrl_dev *pctldev,
@@ -613,8 +613,8 @@ static int byt_get_group_pins(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- *pins = vg->soc->groups[selector].pins;
- *num_pins = vg->soc->groups[selector].npins;
+ *pins = vg->soc->groups[selector].grp.pins;
+ *num_pins = vg->soc->groups[selector].grp.npins;
return 0;
}
@@ -662,15 +662,15 @@ static void byt_set_group_simple_mux(struct intel_pinctrl *vg,
raw_spin_lock_irqsave(&byt_lock, flags);
- for (i = 0; i < group.npins; i++) {
+ for (i = 0; i < group.grp.npins; i++) {
void __iomem *padcfg0;
u32 value;
- padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
+ padcfg0 = byt_gpio_reg(vg, group.grp.pins[i], BYT_CONF0_REG);
if (!padcfg0) {
dev_warn(vg->dev,
"Group %s, pin %i not muxed (no padcfg0)\n",
- group.name, i);
+ group.grp.name, i);
continue;
}
@@ -692,15 +692,15 @@ static void byt_set_group_mixed_mux(struct intel_pinctrl *vg,
raw_spin_lock_irqsave(&byt_lock, flags);
- for (i = 0; i < group.npins; i++) {
+ for (i = 0; i < group.grp.npins; i++) {
void __iomem *padcfg0;
u32 value;
- padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
+ padcfg0 = byt_gpio_reg(vg, group.grp.pins[i], BYT_CONF0_REG);
if (!padcfg0) {
dev_warn(vg->dev,
"Group %s, pin %i not muxed (no padcfg0)\n",
- group.name, i);
+ group.grp.name, i);
continue;
}
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 26b2a425d201..5c4fd16e5b01 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -627,7 +627,7 @@ static const char *chv_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- return pctrl->soc->groups[group].name;
+ return pctrl->soc->groups[group].grp.name;
}
static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
@@ -635,8 +635,8 @@ static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- *pins = pctrl->soc->groups[group].pins;
- *npins = pctrl->soc->groups[group].npins;
+ *pins = pctrl->soc->groups[group].grp.pins;
+ *npins = pctrl->soc->groups[group].grp.npins;
return 0;
}
@@ -721,16 +721,16 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_lock_irqsave(&chv_lock, flags);
/* Check first that the pad is not locked */
- for (i = 0; i < grp->npins; i++) {
- if (chv_pad_locked(pctrl, grp->pins[i])) {
+ for (i = 0; i < grp->grp.npins; i++) {
+ if (chv_pad_locked(pctrl, grp->grp.pins[i])) {
raw_spin_unlock_irqrestore(&chv_lock, flags);
- dev_warn(dev, "unable to set mode for locked pin %u\n", grp->pins[i]);
+ dev_warn(dev, "unable to set mode for locked pin %u\n", grp->grp.pins[i]);
return -EBUSY;
}
}
- for (i = 0; i < grp->npins; i++) {
- int pin = grp->pins[i];
+ for (i = 0; i < grp->grp.npins; i++) {
+ int pin = grp->grp.pins[i];
unsigned int mode;
bool invert_oe;
u32 value;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index fd093e36c3a8..52ecd66ce357 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -279,7 +279,7 @@ static const char *intel_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- return pctrl->soc->groups[group].name;
+ return pctrl->soc->groups[group].grp.name;
}
static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
@@ -287,8 +287,8 @@ static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- *pins = pctrl->soc->groups[group].pins;
- *npins = pctrl->soc->groups[group].npins;
+ *pins = pctrl->soc->groups[group].grp.pins;
+ *npins = pctrl->soc->groups[group].grp.npins;
return 0;
}
@@ -391,19 +391,19 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev,
* All pins in the groups needs to be accessible and writable
* before we can enable the mux for this group.
*/
- for (i = 0; i < grp->npins; i++) {
- if (!intel_pad_usable(pctrl, grp->pins[i])) {
+ for (i = 0; i < grp->grp.npins; i++) {
+ if (!intel_pad_usable(pctrl, grp->grp.pins[i])) {
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return -EBUSY;
}
}
/* Now enable the mux setting for each pin in the group */
- for (i = 0; i < grp->npins; i++) {
+ for (i = 0; i < grp->grp.npins; i++) {
void __iomem *padcfg0;
u32 value;
- padcfg0 = intel_get_padcfg(pctrl, grp->pins[i], PADCFG0);
+ padcfg0 = intel_get_padcfg(pctrl, grp->grp.pins[i], PADCFG0);
value = readl(padcfg0);
value &= ~PADCFG0_PMODE_MASK;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 710341bb67cc..65628423bf63 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -24,17 +24,12 @@ struct device;
/**
* struct intel_pingroup - Description about group of pins
- * @name: Name of the groups
- * @pins: All pins in this group
- * @npins: Number of pins in this groups
- * @mode: Native mode in which the group is muxed out @pins. Used if @modes
- * is %NULL.
+ * @grp: Generic data of the pin group (name and pins)
+ * @mode: Native mode in which the group is muxed out @pins. Used if @modes is %NULL.
* @modes: If not %NULL this will hold mode for each pin in @pins
*/
struct intel_pingroup {
- const char *name;
- const unsigned int *pins;
- size_t npins;
+ struct pingroup grp;
unsigned short mode;
const unsigned int *modes;
};
@@ -156,15 +151,11 @@ struct intel_community {
* a single integer or an array of integers in which case mode is per
* pin.
*/
-#define PIN_GROUP(n, p, m) \
- { \
- .name = (n), \
- .pins = (p), \
- .npins = ARRAY_SIZE((p)), \
- .mode = __builtin_choose_expr( \
- __builtin_constant_p((m)), (m), 0), \
- .modes = __builtin_choose_expr( \
- __builtin_constant_p((m)), NULL, (m)), \
+#define PIN_GROUP(n, p, m) \
+ { \
+ .grp = PINCTRL_PINGROUP((n), (p), ARRAY_SIZE((p))), \
+ .mode = __builtin_choose_expr(__builtin_constant_p((m)), (m), 0), \
+ .modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)), \
}
#define FUNCTION(n, g) \
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index 4fb39eb30902..5d1abee30f8f 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -282,7 +282,7 @@ static const char *lp_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
- return lg->soc->groups[selector].name;
+ return lg->soc->groups[selector].grp.name;
}
static int lp_get_group_pins(struct pinctrl_dev *pctldev,
@@ -292,8 +292,8 @@ static int lp_get_group_pins(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
- *pins = lg->soc->groups[selector].pins;
- *num_pins = lg->soc->groups[selector].npins;
+ *pins = lg->soc->groups[selector].grp.pins;
+ *num_pins = lg->soc->groups[selector].grp.npins;
return 0;
}
@@ -366,8 +366,8 @@ static int lp_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_lock_irqsave(&lg->lock, flags);
/* Now enable the mux setting for each pin in the group */
- for (i = 0; i < grp->npins; i++) {
- void __iomem *reg = lp_gpio_reg(&lg->chip, grp->pins[i], LP_CONFIG1);
+ for (i = 0; i < grp->grp.npins; i++) {
+ void __iomem *reg = lp_gpio_reg(&lg->chip, grp->grp.pins[i], LP_CONFIG1);
u32 value;
value = ioread32(reg);
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index 3ae141e0b421..5e752818adb4 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -520,7 +520,7 @@ static const char *mrfld_get_group_name(struct pinctrl_dev *pctldev,
{
struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
- return mp->groups[group].name;
+ return mp->groups[group].grp.name;
}
static int mrfld_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
@@ -528,8 +528,8 @@ static int mrfld_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
{
struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
- *pins = mp->groups[group].pins;
- *npins = mp->groups[group].npins;
+ *pins = mp->groups[group].grp.pins;
+ *npins = mp->groups[group].grp.npins;
return 0;
}
@@ -604,15 +604,15 @@ static int mrfld_pinmux_set_mux(struct pinctrl_dev *pctldev,
* All pins in the groups needs to be accessible and writable
* before we can enable the mux for this group.
*/
- for (i = 0; i < grp->npins; i++) {
- if (!mrfld_buf_available(mp, grp->pins[i]))
+ for (i = 0; i < grp->grp.npins; i++) {
+ if (!mrfld_buf_available(mp, grp->grp.pins[i]))
return -EBUSY;
}
/* Now enable the mux setting for each pin in the group */
raw_spin_lock_irqsave(&mp->lock, flags);
- for (i = 0; i < grp->npins; i++)
- mrfld_update_bufcfg(mp, grp->pins[i], bits, mask);
+ for (i = 0; i < grp->grp.npins; i++)
+ mrfld_update_bufcfg(mp, grp->grp.pins[i], bits, mask);
raw_spin_unlock_irqrestore(&mp->lock, flags);
return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-meteorlake.c b/drivers/pinctrl/intel/pinctrl-meteorlake.c
new file mode 100644
index 000000000000..9576dcd1cb29
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-meteorlake.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Meteor Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2022, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define MTL_PAD_OWN 0x0b0
+#define MTL_PADCFGLOCK 0x110
+#define MTL_HOSTSW_OWN 0x140
+#define MTL_GPI_IS 0x200
+#define MTL_GPI_IE 0x210
+
+#define MTL_GPP(r, s, e, g) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
+ }
+
+#define MTL_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = MTL_PAD_OWN, \
+ .padcfglock_offset = MTL_PADCFGLOCK, \
+ .hostown_offset = MTL_HOSTSW_OWN, \
+ .is_offset = MTL_GPI_IS, \
+ .ie_offset = MTL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Meteor Lake-P */
+static const struct pinctrl_pin_desc mtlp_pins[] = {
+ /* CPU */
+ PINCTRL_PIN(0, "PECI"),
+ PINCTRL_PIN(1, "UFS_RESET_B"),
+ PINCTRL_PIN(2, "VIDSOUT"),
+ PINCTRL_PIN(3, "VIDSCK"),
+ PINCTRL_PIN(4, "VIDALERT_B"),
+ /* GPP_V */
+ PINCTRL_PIN(5, "BATLOW_B"),
+ PINCTRL_PIN(6, "AC_PRESENT"),
+ PINCTRL_PIN(7, "SOC_WAKE_B"),
+ PINCTRL_PIN(8, "PWRBTN_B"),
+ PINCTRL_PIN(9, "SLP_S3_B"),
+ PINCTRL_PIN(10, "SLP_S4_B"),
+ PINCTRL_PIN(11, "SLP_A_B"),
+ PINCTRL_PIN(12, "GPP_V_7"),
+ PINCTRL_PIN(13, "SUSCLK"),
+ PINCTRL_PIN(14, "SLP_WLAN_B"),
+ PINCTRL_PIN(15, "SLP_S5_B"),
+ PINCTRL_PIN(16, "LANPHYPC"),
+ PINCTRL_PIN(17, "SLP_LAN_B"),
+ PINCTRL_PIN(18, "GPP_V_13"),
+ PINCTRL_PIN(19, "WAKE_B"),
+ PINCTRL_PIN(20, "GPP_V_15"),
+ PINCTRL_PIN(21, "GPP_V_16"),
+ PINCTRL_PIN(22, "GPP_V_17"),
+ PINCTRL_PIN(23, "GPP_V_18"),
+ PINCTRL_PIN(24, "CATERR_B"),
+ PINCTRL_PIN(25, "PROCHOT_B"),
+ PINCTRL_PIN(26, "THERMTRIP_B"),
+ PINCTRL_PIN(27, "DSI_DE_TE_2_GENLOCK_REF"),
+ PINCTRL_PIN(28, "DSI_DE_TE_1_DISP_UTILS"),
+ /* GPP_C */
+ PINCTRL_PIN(29, "SMBCLK"),
+ PINCTRL_PIN(30, "SMBDATA"),
+ PINCTRL_PIN(31, "SMBALERT_B"),
+ PINCTRL_PIN(32, "SML0CLK"),
+ PINCTRL_PIN(33, "SML0DATA"),
+ PINCTRL_PIN(34, "GPP_C_5"),
+ PINCTRL_PIN(35, "GPP_C_6"),
+ PINCTRL_PIN(36, "GPP_C_7"),
+ PINCTRL_PIN(37, "GPP_C_8"),
+ PINCTRL_PIN(38, "GPP_C_9"),
+ PINCTRL_PIN(39, "GPP_C_10"),
+ PINCTRL_PIN(40, "GPP_C_11"),
+ PINCTRL_PIN(41, "GPP_C_12"),
+ PINCTRL_PIN(42, "GPP_C_13"),
+ PINCTRL_PIN(43, "GPP_C_14"),
+ PINCTRL_PIN(44, "GPP_C_15"),
+ PINCTRL_PIN(45, "GPP_C_16"),
+ PINCTRL_PIN(46, "GPP_C_17"),
+ PINCTRL_PIN(47, "GPP_C_18"),
+ PINCTRL_PIN(48, "GPP_C_19"),
+ PINCTRL_PIN(49, "GPP_C_20"),
+ PINCTRL_PIN(50, "GPP_C_21"),
+ PINCTRL_PIN(51, "GPP_C_22"),
+ PINCTRL_PIN(52, "GPP_C_23"),
+ /* GPP_A */
+ PINCTRL_PIN(53, "ESPI_IO_0"),
+ PINCTRL_PIN(54, "ESPI_IO_1"),
+ PINCTRL_PIN(55, "ESPI_IO_2"),
+ PINCTRL_PIN(56, "ESPI_IO_3"),
+ PINCTRL_PIN(57, "ESPI_CS0_B"),
+ PINCTRL_PIN(58, "ESPI_CLK"),
+ PINCTRL_PIN(59, "ESPI_RESET_B"),
+ PINCTRL_PIN(60, "GPP_A_7"),
+ PINCTRL_PIN(61, "GPP_A_8"),
+ PINCTRL_PIN(62, "GPP_A_9"),
+ PINCTRL_PIN(63, "GPP_A_10"),
+ PINCTRL_PIN(64, "GPP_A_11"),
+ PINCTRL_PIN(65, "GPP_A_12"),
+ PINCTRL_PIN(66, "ESPI_CS1_B"),
+ PINCTRL_PIN(67, "ESPI_CS2_B"),
+ PINCTRL_PIN(68, "ESPI_CS3_B"),
+ PINCTRL_PIN(69, "ESPI_ALERT0_B"),
+ PINCTRL_PIN(70, "ESPI_ALERT1_B"),
+ PINCTRL_PIN(71, "ESPI_ALERT2_B"),
+ PINCTRL_PIN(72, "ESPI_ALERT3_B"),
+ PINCTRL_PIN(73, "GPP_A_20"),
+ PINCTRL_PIN(74, "GPP_A_21"),
+ PINCTRL_PIN(75, "GPP_A_22"),
+ PINCTRL_PIN(76, "GPP_A_23"),
+ PINCTRL_PIN(77, "ESPI_CLK_LOOPBK"),
+ /* GPP_E */
+ PINCTRL_PIN(78, "GPP_E_0"),
+ PINCTRL_PIN(79, "GPP_E_1"),
+ PINCTRL_PIN(80, "GPP_E_2"),
+ PINCTRL_PIN(81, "GPP_E_3"),
+ PINCTRL_PIN(82, "GPP_E_4"),
+ PINCTRL_PIN(83, "GPP_E_5"),
+ PINCTRL_PIN(84, "GPP_E_6"),
+ PINCTRL_PIN(85, "GPP_E_7"),
+ PINCTRL_PIN(86, "GPP_E_8"),
+ PINCTRL_PIN(87, "GPP_E_9"),
+ PINCTRL_PIN(88, "GPP_E_10"),
+ PINCTRL_PIN(89, "GPP_E_11"),
+ PINCTRL_PIN(90, "GPP_E_12"),
+ PINCTRL_PIN(91, "GPP_E_13"),
+ PINCTRL_PIN(92, "GPP_E_14"),
+ PINCTRL_PIN(93, "SLP_DRAM_B"),
+ PINCTRL_PIN(94, "GPP_E_16"),
+ PINCTRL_PIN(95, "GPP_E_17"),
+ PINCTRL_PIN(96, "GPP_E_18"),
+ PINCTRL_PIN(97, "GPP_E_19"),
+ PINCTRL_PIN(98, "GPP_E_20"),
+ PINCTRL_PIN(99, "GPP_E_21"),
+ PINCTRL_PIN(100, "DNX_FORCE_RELOAD"),
+ PINCTRL_PIN(101, "GPP_E_23"),
+ PINCTRL_PIN(102, "THC0_GSPI0_CLK_LOOPBK"),
+ /* GPP_H */
+ PINCTRL_PIN(103, "GPP_H_0"),
+ PINCTRL_PIN(104, "GPP_H_1"),
+ PINCTRL_PIN(105, "GPP_H_2"),
+ PINCTRL_PIN(106, "GPP_H_3"),
+ PINCTRL_PIN(107, "GPP_H_4"),
+ PINCTRL_PIN(108, "GPP_H_5"),
+ PINCTRL_PIN(109, "GPP_H_6"),
+ PINCTRL_PIN(110, "GPP_H_7"),
+ PINCTRL_PIN(111, "GPP_H_8"),
+ PINCTRL_PIN(112, "GPP_H_9"),
+ PINCTRL_PIN(113, "GPP_H_10"),
+ PINCTRL_PIN(114, "GPP_H_11"),
+ PINCTRL_PIN(115, "GPP_H_12"),
+ PINCTRL_PIN(116, "CPU_C10_GATE_B"),
+ PINCTRL_PIN(117, "GPP_H_14"),
+ PINCTRL_PIN(118, "GPP_H_15"),
+ PINCTRL_PIN(119, "GPP_H_16"),
+ PINCTRL_PIN(120, "GPP_H_17"),
+ PINCTRL_PIN(121, "GPP_H_18"),
+ PINCTRL_PIN(122, "GPP_H_19"),
+ PINCTRL_PIN(123, "GPP_H_20"),
+ PINCTRL_PIN(124, "GPP_H_21"),
+ PINCTRL_PIN(125, "GPP_H_22"),
+ PINCTRL_PIN(126, "GPP_H_23"),
+ PINCTRL_PIN(127, "LPI3C1_CLK_LOOPBK"),
+ PINCTRL_PIN(128, "I3C0_CLK_LOOPBK"),
+ /* GPP_F */
+ PINCTRL_PIN(129, "CNV_BRI_DT"),
+ PINCTRL_PIN(130, "CNV_BRI_RSP"),
+ PINCTRL_PIN(131, "CNV_RGI_DT"),
+ PINCTRL_PIN(132, "CNV_RGI_RSP"),
+ PINCTRL_PIN(133, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(134, "CRF_CLKREQ"),
+ PINCTRL_PIN(135, "GPP_F_6"),
+ PINCTRL_PIN(136, "FUSA_DIAGTEST_EN"),
+ PINCTRL_PIN(137, "FUSA_DIAGTEST_MODE"),
+ PINCTRL_PIN(138, "BOOTMPC"),
+ PINCTRL_PIN(139, "GPP_F_10"),
+ PINCTRL_PIN(140, "GPP_F_11"),
+ PINCTRL_PIN(141, "GSXDOUT"),
+ PINCTRL_PIN(142, "GSXSLOAD"),
+ PINCTRL_PIN(143, "GSXDIN"),
+ PINCTRL_PIN(144, "GSXSRESETB"),
+ PINCTRL_PIN(145, "GSXCLK"),
+ PINCTRL_PIN(146, "GMII_MDC_0"),
+ PINCTRL_PIN(147, "GMII_MDIO_0"),
+ PINCTRL_PIN(148, "GPP_F_19"),
+ PINCTRL_PIN(149, "GPP_F_20"),
+ PINCTRL_PIN(150, "GPP_F_21"),
+ PINCTRL_PIN(151, "GPP_F_22"),
+ PINCTRL_PIN(152, "GPP_F_23"),
+ PINCTRL_PIN(153, "THC1_GSPI1_CLK_LOOPBK"),
+ PINCTRL_PIN(154, "GSPI0A_CLK_LOOPBK"),
+ /* SPI0 */
+ PINCTRL_PIN(155, "SPI0_IO_2"),
+ PINCTRL_PIN(156, "SPI0_IO_3"),
+ PINCTRL_PIN(157, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(158, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(159, "SPI0_TPM_CS_B"),
+ PINCTRL_PIN(160, "SPI0_FLASH_0_CS_B"),
+ PINCTRL_PIN(161, "SPI0_FLASH_1_CS_B"),
+ PINCTRL_PIN(162, "SPI0_CLK"),
+ PINCTRL_PIN(163, "L_BKLTEN"),
+ PINCTRL_PIN(164, "L_BKLTCTL"),
+ PINCTRL_PIN(165, "L_VDDEN"),
+ PINCTRL_PIN(166, "SYS_PWROK"),
+ PINCTRL_PIN(167, "SYS_RESET_B"),
+ PINCTRL_PIN(168, "MLK_RST_B"),
+ PINCTRL_PIN(169, "SPI0_CLK_LOOPBK"),
+ /* vGPIO_3 */
+ PINCTRL_PIN(170, "ESPI_USB_OCB_0"),
+ PINCTRL_PIN(171, "ESPI_USB_OCB_1"),
+ PINCTRL_PIN(172, "ESPI_USB_OCB_2"),
+ PINCTRL_PIN(173, "ESPI_USB_OCB_3"),
+ PINCTRL_PIN(174, "USB_CPU_OCB_0"),
+ PINCTRL_PIN(175, "USB_CPU_OCB_1"),
+ PINCTRL_PIN(176, "USB_CPU_OCB_2"),
+ PINCTRL_PIN(177, "USB_CPU_OCB_3"),
+ PINCTRL_PIN(178, "TS0_IN_INT"),
+ PINCTRL_PIN(179, "TS1_IN_INT"),
+ PINCTRL_PIN(180, "THC0_WOT_INT"),
+ PINCTRL_PIN(181, "THC1_WOT_INT"),
+ PINCTRL_PIN(182, "THC0_WHC_INT"),
+ PINCTRL_PIN(183, "THC1_WHC_INT"),
+ /* GPP_S */
+ PINCTRL_PIN(184, "GPP_S_0"),
+ PINCTRL_PIN(185, "GPP_S_1"),
+ PINCTRL_PIN(186, "GPP_S_2"),
+ PINCTRL_PIN(187, "GPP_S_3"),
+ PINCTRL_PIN(188, "GPP_S_4"),
+ PINCTRL_PIN(189, "GPP_S_5"),
+ PINCTRL_PIN(190, "GPP_S_6"),
+ PINCTRL_PIN(191, "GPP_S_7"),
+ /* JTAG */
+ PINCTRL_PIN(192, "JTAG_MBPB0"),
+ PINCTRL_PIN(193, "JTAG_MBPB1"),
+ PINCTRL_PIN(194, "JTAG_MBPB2"),
+ PINCTRL_PIN(195, "JTAG_MBPB3"),
+ PINCTRL_PIN(196, "JTAG_TDO"),
+ PINCTRL_PIN(197, "PRDY_B"),
+ PINCTRL_PIN(198, "PREQ_B"),
+ PINCTRL_PIN(199, "JTAG_TDI"),
+ PINCTRL_PIN(200, "JTAG_TMS"),
+ PINCTRL_PIN(201, "JTAG_TCK"),
+ PINCTRL_PIN(202, "DBG_PMODE"),
+ PINCTRL_PIN(203, "JTAG_TRST_B"),
+ /* GPP_B */
+ PINCTRL_PIN(204, "ADM_VID_0"),
+ PINCTRL_PIN(205, "ADM_VID_1"),
+ PINCTRL_PIN(206, "GPP_B_2"),
+ PINCTRL_PIN(207, "GPP_B_3"),
+ PINCTRL_PIN(208, "GPP_B_4"),
+ PINCTRL_PIN(209, "GPP_B_5"),
+ PINCTRL_PIN(210, "GPP_B_6"),
+ PINCTRL_PIN(211, "GPP_B_7"),
+ PINCTRL_PIN(212, "GPP_B_8"),
+ PINCTRL_PIN(213, "GPP_B_9"),
+ PINCTRL_PIN(214, "GPP_B_10"),
+ PINCTRL_PIN(215, "GPP_B_11"),
+ PINCTRL_PIN(216, "SLP_S0_B"),
+ PINCTRL_PIN(217, "PLTRST_B"),
+ PINCTRL_PIN(218, "GPP_B_14"),
+ PINCTRL_PIN(219, "GPP_B_15"),
+ PINCTRL_PIN(220, "GPP_B_16"),
+ PINCTRL_PIN(221, "GPP_B_17"),
+ PINCTRL_PIN(222, "GPP_B_18"),
+ PINCTRL_PIN(223, "GPP_B_19"),
+ PINCTRL_PIN(224, "GPP_B_20"),
+ PINCTRL_PIN(225, "GPP_B_21"),
+ PINCTRL_PIN(226, "GPP_B_22"),
+ PINCTRL_PIN(227, "GPP_B_23"),
+ PINCTRL_PIN(228, "ISH_I3C0_CLK_LOOPBK"),
+ /* GPP_D */
+ PINCTRL_PIN(229, "GPP_D_0"),
+ PINCTRL_PIN(230, "GPP_D_1"),
+ PINCTRL_PIN(231, "GPP_D_2"),
+ PINCTRL_PIN(232, "GPP_D_3"),
+ PINCTRL_PIN(233, "GPP_D_4"),
+ PINCTRL_PIN(234, "GPP_D_5"),
+ PINCTRL_PIN(235, "GPP_D_6"),
+ PINCTRL_PIN(236, "GPP_D_7"),
+ PINCTRL_PIN(237, "GPP_D_8"),
+ PINCTRL_PIN(238, "GPP_D_9"),
+ PINCTRL_PIN(239, "HDA_BCLK"),
+ PINCTRL_PIN(240, "HDA_SYNC"),
+ PINCTRL_PIN(241, "HDA_SDO"),
+ PINCTRL_PIN(242, "HDA_SDI_0"),
+ PINCTRL_PIN(243, "GPP_D_14"),
+ PINCTRL_PIN(244, "GPP_D_15"),
+ PINCTRL_PIN(245, "GPP_D_16"),
+ PINCTRL_PIN(246, "HDA_RST_B"),
+ PINCTRL_PIN(247, "GPP_D_18"),
+ PINCTRL_PIN(248, "GPP_D_19"),
+ PINCTRL_PIN(249, "GPP_D_20"),
+ PINCTRL_PIN(250, "UFS_REFCLK"),
+ PINCTRL_PIN(251, "BPKI3C_SDA"),
+ PINCTRL_PIN(252, "BPKI3C_SCL"),
+ PINCTRL_PIN(253, "BOOTHALT_B"),
+ /* vGPIO */
+ PINCTRL_PIN(254, "CNV_BTEN"),
+ PINCTRL_PIN(255, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(256, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(257, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(258, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(259, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(260, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(261, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(262, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(263, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(264, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(265, "vUART0_TXD"),
+ PINCTRL_PIN(266, "vUART0_RXD"),
+ PINCTRL_PIN(267, "vUART0_CTS_B"),
+ PINCTRL_PIN(268, "vUART0_RTS_B"),
+ PINCTRL_PIN(269, "vISH_UART0_TXD"),
+ PINCTRL_PIN(270, "vISH_UART0_RXD"),
+ PINCTRL_PIN(271, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(272, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(273, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(274, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(275, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(276, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(277, "vI2S2_SCLK"),
+ PINCTRL_PIN(278, "vI2S2_SFRM"),
+ PINCTRL_PIN(279, "vI2S2_TXD"),
+ PINCTRL_PIN(280, "vI2S2_RXD"),
+ PINCTRL_PIN(281, "vCNV_BT_I2S_BCLK_2"),
+ PINCTRL_PIN(282, "vCNV_BT_I2S_WS_SYNC_2"),
+ PINCTRL_PIN(283, "vCNV_BT_I2S_SDO_2"),
+ PINCTRL_PIN(284, "vCNV_BT_I2S_SDI_2"),
+ PINCTRL_PIN(285, "vI2S2_SCLK_2"),
+ PINCTRL_PIN(286, "vI2S2_SFRM_2"),
+ PINCTRL_PIN(287, "vI2S2_TXD_2"),
+ PINCTRL_PIN(288, "vI2S2_RXD_2"),
+};
+
+static const struct intel_padgroup mtlp_community0_gpps[] = {
+ MTL_GPP(0, 0, 4, 0), /* CPU */
+ MTL_GPP(1, 5, 28, 32), /* GPP_V */
+ MTL_GPP(2, 29, 52, 64), /* GPP_C */
+};
+
+static const struct intel_padgroup mtlp_community1_gpps[] = {
+ MTL_GPP(0, 53, 77, 96), /* GPP_A */
+ MTL_GPP(1, 78, 102, 128), /* GPP_E */
+};
+
+static const struct intel_padgroup mtlp_community3_gpps[] = {
+ MTL_GPP(0, 103, 128, 160), /* GPP_H */
+ MTL_GPP(1, 129, 154, 192), /* GPP_F */
+ MTL_GPP(2, 155, 169, 224), /* SPI0 */
+ MTL_GPP(3, 170, 183, 256), /* vGPIO_3 */
+};
+
+static const struct intel_padgroup mtlp_community4_gpps[] = {
+ MTL_GPP(0, 184, 191, 288), /* GPP_S */
+ MTL_GPP(1, 192, 203, 320), /* JTAG */
+};
+
+static const struct intel_padgroup mtlp_community5_gpps[] = {
+ MTL_GPP(0, 204, 228, 352), /* GPP_B */
+ MTL_GPP(1, 229, 253, 384), /* GPP_D */
+ MTL_GPP(2, 254, 285, 416), /* vGPIO_0 */
+ MTL_GPP(3, 286, 288, 448), /* vGPIO_1 */
+};
+
+static const struct intel_community mtlp_communities[] = {
+ MTL_COMMUNITY(0, 0, 52, mtlp_community0_gpps),
+ MTL_COMMUNITY(1, 53, 102, mtlp_community1_gpps),
+ MTL_COMMUNITY(2, 103, 183, mtlp_community3_gpps),
+ MTL_COMMUNITY(3, 184, 203, mtlp_community4_gpps),
+ MTL_COMMUNITY(4, 204, 288, mtlp_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data mtlp_soc_data = {
+ .pins = mtlp_pins,
+ .npins = ARRAY_SIZE(mtlp_pins),
+ .communities = mtlp_communities,
+ .ncommunities = ARRAY_SIZE(mtlp_communities),
+};
+
+static const struct acpi_device_id mtl_pinctrl_acpi_match[] = {
+ { "INTC1083", (kernel_ulong_t)&mtlp_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, mtl_pinctrl_acpi_match);
+
+static INTEL_PINCTRL_PM_OPS(mtl_pinctrl_pm_ops);
+
+static struct platform_driver mtl_pinctrl_driver = {
+ .probe = intel_pinctrl_probe_by_hid,
+ .driver = {
+ .name = "meteorlake-pinctrl",
+ .acpi_match_table = mtl_pinctrl_acpi_match,
+ .pm = &mtl_pinctrl_pm_ops,
+ },
+};
+module_platform_driver(mtl_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Meteor Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
index acccde9262ba..78c02b7c81f0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
@@ -1107,24 +1107,10 @@ static const struct mtk_pin_field_calc mt8192_pin_pupd_range[] = {
PIN_FIELD_BASE(54, 54, 1, 0x0060, 0x10, 2, 1),
PIN_FIELD_BASE(55, 55, 1, 0x0060, 0x10, 4, 1),
PIN_FIELD_BASE(56, 56, 1, 0x0060, 0x10, 3, 1),
- PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 31, 1),
PIN_FIELD_BASE(152, 152, 7, 0x0090, 0x10, 3, 1),
PIN_FIELD_BASE(153, 153, 7, 0x0090, 0x10, 2, 1),
PIN_FIELD_BASE(154, 154, 7, 0x0090, 0x10, 0, 1),
PIN_FIELD_BASE(155, 155, 7, 0x0090, 0x10, 1, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 31, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 31, 1),
PIN_FIELD_BASE(183, 183, 9, 0x0030, 0x10, 1, 1),
PIN_FIELD_BASE(184, 184, 9, 0x0030, 0x10, 2, 1),
PIN_FIELD_BASE(185, 185, 9, 0x0030, 0x10, 4, 1),
@@ -1137,12 +1123,6 @@ static const struct mtk_pin_field_calc mt8192_pin_pupd_range[] = {
PIN_FIELD_BASE(192, 192, 9, 0x0030, 0x10, 0, 1),
PIN_FIELD_BASE(193, 193, 9, 0x0030, 0x10, 5, 1),
PIN_FIELD_BASE(194, 194, 9, 0x0030, 0x10, 11, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 31, 1),
};
static const struct mtk_pin_field_calc mt8192_pin_r0_range[] = {
@@ -1164,24 +1144,10 @@ static const struct mtk_pin_field_calc mt8192_pin_r0_range[] = {
PIN_FIELD_BASE(54, 54, 1, 0x0080, 0x10, 2, 1),
PIN_FIELD_BASE(55, 55, 1, 0x0080, 0x10, 4, 1),
PIN_FIELD_BASE(56, 56, 1, 0x0080, 0x10, 3, 1),
- PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 0, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 12, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 10, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 22, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 8, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 20, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 6, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 18, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 4, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 16, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 2, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 14, 1),
PIN_FIELD_BASE(152, 152, 7, 0x00c0, 0x10, 3, 1),
PIN_FIELD_BASE(153, 153, 7, 0x00c0, 0x10, 2, 1),
PIN_FIELD_BASE(154, 154, 7, 0x00c0, 0x10, 0, 1),
PIN_FIELD_BASE(155, 155, 7, 0x00c0, 0x10, 1, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 0, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 2, 1),
PIN_FIELD_BASE(183, 183, 9, 0x0040, 0x10, 1, 1),
PIN_FIELD_BASE(184, 184, 9, 0x0040, 0x10, 2, 1),
PIN_FIELD_BASE(185, 185, 9, 0x0040, 0x10, 4, 1),
@@ -1194,12 +1160,6 @@ static const struct mtk_pin_field_calc mt8192_pin_r0_range[] = {
PIN_FIELD_BASE(192, 192, 9, 0x0040, 0x10, 0, 1),
PIN_FIELD_BASE(193, 193, 9, 0x0040, 0x10, 5, 1),
PIN_FIELD_BASE(194, 194, 9, 0x0040, 0x10, 11, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 2, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 6, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 0, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 2, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 0, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 4, 1),
};
static const struct mtk_pin_field_calc mt8192_pin_r1_range[] = {
@@ -1221,24 +1181,10 @@ static const struct mtk_pin_field_calc mt8192_pin_r1_range[] = {
PIN_FIELD_BASE(54, 54, 1, 0x0090, 0x10, 2, 1),
PIN_FIELD_BASE(55, 55, 1, 0x0090, 0x10, 4, 1),
PIN_FIELD_BASE(56, 56, 1, 0x0090, 0x10, 3, 1),
- PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 1, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 13, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 11, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 23, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 9, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 21, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 7, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 19, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 5, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 17, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 3, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 15, 1),
PIN_FIELD_BASE(152, 152, 7, 0x00d0, 0x10, 3, 1),
PIN_FIELD_BASE(153, 153, 7, 0x00d0, 0x10, 2, 1),
PIN_FIELD_BASE(154, 154, 7, 0x00d0, 0x10, 0, 1),
PIN_FIELD_BASE(155, 155, 7, 0x00d0, 0x10, 1, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 1, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 3, 1),
PIN_FIELD_BASE(183, 183, 9, 0x0050, 0x10, 1, 1),
PIN_FIELD_BASE(184, 184, 9, 0x0050, 0x10, 2, 1),
PIN_FIELD_BASE(185, 185, 9, 0x0050, 0x10, 4, 1),
@@ -1251,83 +1197,169 @@ static const struct mtk_pin_field_calc mt8192_pin_r1_range[] = {
PIN_FIELD_BASE(192, 192, 9, 0x0050, 0x10, 0, 1),
PIN_FIELD_BASE(193, 193, 9, 0x0050, 0x10, 5, 1),
PIN_FIELD_BASE(194, 194, 9, 0x0050, 0x10, 11, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 3, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 7, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 1, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 3, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 1, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 5, 1),
};
-static const struct mtk_pin_field_calc mt8192_pin_e1e0en_range[] = {
- PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 0, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 18, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 15, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 3, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 12, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 0, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 9, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 27, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 6, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 24, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 3, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 21, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 0, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 3, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 3, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 9, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 0, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 3, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 0, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 6, 1),
-};
+static const struct mtk_pin_field_calc mt8192_pin_drv_adv_range[] = {
+ PIN_FIELD_BASE(89, 89, 2, 0x0040, 0x10, 0, 5),
+ PIN_FIELD_BASE(90, 90, 2, 0x0040, 0x10, 5, 5),
-static const struct mtk_pin_field_calc mt8192_pin_e0_range[] = {
- PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 1, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 19, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 16, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 4, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 13, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 1, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 10, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 28, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 7, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 25, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 4, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 22, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 1, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 4, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 4, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 10, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 1, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 4, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 1, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 7, 1),
+ PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 0, 3),
+ PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 18, 3),
+ PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 15, 3),
+ PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 3, 3),
+ PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 12, 3),
+ PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 0, 3),
+ PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 9, 3),
+ PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 27, 3),
+ PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 6, 3),
+ PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 24, 3),
+ PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 3, 3),
+ PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 21, 3),
+ PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 6, 3),
};
-static const struct mtk_pin_field_calc mt8192_pin_e1_range[] = {
- PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 2, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 20, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 17, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 5, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 14, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 2, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 11, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 29, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 8, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 26, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 5, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 23, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 2, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 5, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 5, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 11, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 2, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 5, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 2, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 8, 1),
+static const struct mtk_pin_field_calc mt8192_pin_rsel_range[] = {
+ PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 0, 2),
+ PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 12, 2),
+ PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 10, 2),
+ PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 22, 2),
+ PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 8, 2),
+ PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 20, 2),
+ PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 6, 2),
+ PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 18, 2),
+ PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 4, 2),
+ PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 2, 2),
+ PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 14, 2),
+ PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 0, 2),
+ PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 2, 2),
+ PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 2, 2),
+ PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 6, 2),
+ PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 0, 2),
+ PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 2, 2),
+ PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 0, 2),
+ PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 4, 2),
};
+static const unsigned int mt8192_pull_type[] = {
+ MTK_PULL_PU_PD_TYPE,/*0*/ MTK_PULL_PU_PD_TYPE,/*1*/
+ MTK_PULL_PU_PD_TYPE,/*2*/ MTK_PULL_PU_PD_TYPE,/*3*/
+ MTK_PULL_PU_PD_TYPE,/*4*/ MTK_PULL_PU_PD_TYPE,/*5*/
+ MTK_PULL_PU_PD_TYPE,/*6*/ MTK_PULL_PU_PD_TYPE,/*7*/
+ MTK_PULL_PU_PD_TYPE,/*8*/ MTK_PULL_PU_PD_TYPE,/*9*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*10*/ MTK_PULL_PUPD_R1R0_TYPE,/*11*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*12*/ MTK_PULL_PUPD_R1R0_TYPE,/*13*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*14*/ MTK_PULL_PUPD_R1R0_TYPE,/*15*/
+ MTK_PULL_PU_PD_TYPE,/*16*/ MTK_PULL_PU_PD_TYPE,/*17*/
+ MTK_PULL_PU_PD_TYPE,/*18*/ MTK_PULL_PU_PD_TYPE,/*19*/
+ MTK_PULL_PU_PD_TYPE,/*20*/ MTK_PULL_PU_PD_TYPE,/*21*/
+ MTK_PULL_PU_PD_TYPE,/*22*/ MTK_PULL_PU_PD_TYPE,/*23*/
+ MTK_PULL_PU_PD_TYPE,/*24*/ MTK_PULL_PU_PD_TYPE,/*25*/
+ MTK_PULL_PU_PD_TYPE,/*26*/ MTK_PULL_PU_PD_TYPE,/*27*/
+ MTK_PULL_PU_PD_TYPE,/*28*/ MTK_PULL_PU_PD_TYPE,/*29*/
+ MTK_PULL_PU_PD_TYPE,/*30*/ MTK_PULL_PU_PD_TYPE,/*31*/
+ MTK_PULL_PU_PD_TYPE,/*32*/ MTK_PULL_PU_PD_TYPE,/*33*/
+ MTK_PULL_PU_PD_TYPE,/*34*/ MTK_PULL_PU_PD_TYPE,/*35*/
+ MTK_PULL_PU_PD_TYPE,/*36*/ MTK_PULL_PU_PD_TYPE,/*37*/
+ MTK_PULL_PU_PD_TYPE,/*38*/ MTK_PULL_PU_PD_TYPE,/*39*/
+ MTK_PULL_PU_PD_TYPE,/*40*/ MTK_PULL_PU_PD_TYPE,/*41*/
+ MTK_PULL_PU_PD_TYPE,/*42*/ MTK_PULL_PU_PD_TYPE,/*43*/
+ MTK_PULL_PU_PD_TYPE,/*44*/ MTK_PULL_PUPD_R1R0_TYPE,/*45*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*46*/ MTK_PULL_PUPD_R1R0_TYPE,/*47*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*48*/ MTK_PULL_PUPD_R1R0_TYPE,/*49*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*50*/ MTK_PULL_PUPD_R1R0_TYPE,/*51*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*52*/ MTK_PULL_PUPD_R1R0_TYPE,/*53*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*54*/ MTK_PULL_PUPD_R1R0_TYPE,/*55*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*56*/ MTK_PULL_PU_PD_TYPE,/*57*/
+ MTK_PULL_PU_PD_TYPE,/*58*/ MTK_PULL_PU_PD_TYPE,/*59*/
+ MTK_PULL_PU_PD_TYPE,/*60*/ MTK_PULL_PU_PD_TYPE,/*61*/
+ MTK_PULL_PU_PD_TYPE,/*62*/ MTK_PULL_PU_PD_TYPE,/*63*/
+ MTK_PULL_PU_PD_TYPE,/*64*/ MTK_PULL_PU_PD_TYPE,/*65*/
+ MTK_PULL_PU_PD_TYPE,/*66*/ MTK_PULL_PU_PD_TYPE,/*67*/
+ MTK_PULL_PU_PD_TYPE,/*68*/ MTK_PULL_PU_PD_TYPE,/*69*/
+ MTK_PULL_PU_PD_TYPE,/*70*/ MTK_PULL_PU_PD_TYPE,/*71*/
+ MTK_PULL_PU_PD_TYPE,/*72*/ MTK_PULL_PU_PD_TYPE,/*73*/
+ MTK_PULL_PU_PD_TYPE,/*74*/ MTK_PULL_PU_PD_TYPE,/*75*/
+ MTK_PULL_PU_PD_TYPE,/*76*/ MTK_PULL_PU_PD_TYPE,/*77*/
+ MTK_PULL_PU_PD_TYPE,/*78*/ MTK_PULL_PU_PD_TYPE,/*79*/
+ MTK_PULL_PU_PD_TYPE,/*80*/ MTK_PULL_PU_PD_TYPE,/*81*/
+ MTK_PULL_PU_PD_TYPE,/*82*/ MTK_PULL_PU_PD_TYPE,/*83*/
+ MTK_PULL_PU_PD_TYPE,/*84*/ MTK_PULL_PU_PD_TYPE,/*85*/
+ MTK_PULL_PU_PD_TYPE,/*86*/ MTK_PULL_PU_PD_TYPE,/*87*/
+ MTK_PULL_PU_PD_TYPE,/*88*/ MTK_PULL_PU_PD_TYPE,/*89*/
+ MTK_PULL_PU_PD_TYPE,/*90*/ MTK_PULL_PU_PD_TYPE,/*91*/
+ MTK_PULL_PU_PD_TYPE,/*92*/ MTK_PULL_PU_PD_TYPE,/*93*/
+ MTK_PULL_PU_PD_TYPE,/*94*/ MTK_PULL_PU_PD_TYPE,/*95*/
+ MTK_PULL_PU_PD_TYPE,/*96*/ MTK_PULL_PU_PD_TYPE,/*97*/
+ MTK_PULL_PU_PD_TYPE,/*98*/ MTK_PULL_PU_PD_TYPE,/*99*/
+ MTK_PULL_PU_PD_TYPE,/*100*/ MTK_PULL_PU_PD_TYPE,/*101*/
+ MTK_PULL_PU_PD_TYPE,/*102*/ MTK_PULL_PU_PD_TYPE,/*103*/
+ MTK_PULL_PU_PD_TYPE,/*104*/ MTK_PULL_PU_PD_TYPE,/*105*/
+ MTK_PULL_PU_PD_TYPE,/*106*/ MTK_PULL_PU_PD_TYPE,/*107*/
+ MTK_PULL_PU_PD_TYPE,/*108*/ MTK_PULL_PU_PD_TYPE,/*109*/
+ MTK_PULL_PU_PD_TYPE,/*110*/ MTK_PULL_PU_PD_TYPE,/*111*/
+ MTK_PULL_PU_PD_TYPE,/*112*/ MTK_PULL_PU_PD_TYPE,/*113*/
+ MTK_PULL_PU_PD_TYPE,/*114*/ MTK_PULL_PU_PD_TYPE,/*115*/
+ MTK_PULL_PU_PD_TYPE,/*116*/ MTK_PULL_PU_PD_TYPE,/*117*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*118*/ MTK_PULL_PU_PD_RSEL_TYPE,/*119*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*120*/ MTK_PULL_PU_PD_RSEL_TYPE,/*121*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*122*/ MTK_PULL_PU_PD_RSEL_TYPE,/*123*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*124*/ MTK_PULL_PU_PD_RSEL_TYPE,/*125*/
+ MTK_PULL_PU_PD_TYPE,/*126*/ MTK_PULL_PU_PD_TYPE,/*127*/
+ MTK_PULL_PU_PD_TYPE,/*128*/ MTK_PULL_PU_PD_TYPE,/*129*/
+ MTK_PULL_PU_PD_TYPE,/*130*/ MTK_PULL_PU_PD_TYPE,/*131*/
+ MTK_PULL_PU_PD_TYPE,/*132*/ MTK_PULL_PU_PD_TYPE,/*133*/
+ MTK_PULL_PU_PD_TYPE,/*134*/ MTK_PULL_PU_PD_TYPE,/*135*/
+ MTK_PULL_PU_PD_TYPE,/*136*/ MTK_PULL_PU_PD_TYPE,/*137*/
+ MTK_PULL_PU_PD_TYPE,/*138*/ MTK_PULL_PU_PD_RSEL_TYPE,/*139*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*140*/ MTK_PULL_PU_PD_RSEL_TYPE,/*141*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*142*/ MTK_PULL_PU_PD_TYPE,/*143*/
+ MTK_PULL_PU_PD_TYPE,/*144*/ MTK_PULL_PU_PD_TYPE,/*145*/
+ MTK_PULL_PU_PD_TYPE,/*146*/ MTK_PULL_PU_PD_TYPE,/*147*/
+ MTK_PULL_PU_PD_TYPE,/*148*/ MTK_PULL_PU_PD_TYPE,/*149*/
+ MTK_PULL_PU_PD_TYPE,/*150*/ MTK_PULL_PU_PD_TYPE,/*151*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*152*/ MTK_PULL_PUPD_R1R0_TYPE,/*153*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*154*/ MTK_PULL_PUPD_R1R0_TYPE,/*155*/
+ MTK_PULL_PU_PD_TYPE,/*156*/ MTK_PULL_PU_PD_TYPE,/*157*/
+ MTK_PULL_PU_PD_TYPE,/*158*/ MTK_PULL_PU_PD_TYPE,/*159*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*160*/ MTK_PULL_PU_PD_RSEL_TYPE,/*161*/
+ MTK_PULL_PU_PD_TYPE,/*162*/ MTK_PULL_PU_PD_TYPE,/*163*/
+ MTK_PULL_PU_PD_TYPE,/*164*/ MTK_PULL_PU_PD_TYPE,/*165*/
+ MTK_PULL_PU_PD_TYPE,/*166*/ MTK_PULL_PU_PD_TYPE,/*167*/
+ MTK_PULL_PU_PD_TYPE,/*168*/ MTK_PULL_PU_PD_TYPE,/*169*/
+ MTK_PULL_PU_PD_TYPE,/*170*/ MTK_PULL_PU_PD_TYPE,/*171*/
+ MTK_PULL_PU_PD_TYPE,/*172*/ MTK_PULL_PU_PD_TYPE,/*173*/
+ MTK_PULL_PU_PD_TYPE,/*174*/ MTK_PULL_PU_PD_TYPE,/*175*/
+ MTK_PULL_PU_PD_TYPE,/*176*/ MTK_PULL_PU_PD_TYPE,/*177*/
+ MTK_PULL_PU_PD_TYPE,/*178*/ MTK_PULL_PU_PD_TYPE,/*179*/
+ MTK_PULL_PU_PD_TYPE,/*180*/ MTK_PULL_PU_PD_TYPE,/*181*/
+ MTK_PULL_PU_PD_TYPE,/*182*/ MTK_PULL_PUPD_R1R0_TYPE,/*183*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*184*/ MTK_PULL_PUPD_R1R0_TYPE,/*185*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*186*/ MTK_PULL_PUPD_R1R0_TYPE,/*187*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*188*/ MTK_PULL_PUPD_R1R0_TYPE,/*189*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*190*/ MTK_PULL_PUPD_R1R0_TYPE,/*191*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*192*/ MTK_PULL_PUPD_R1R0_TYPE,/*193*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*194*/ MTK_PULL_PU_PD_TYPE,/*195*/
+ MTK_PULL_PU_PD_TYPE,/*196*/ MTK_PULL_PU_PD_TYPE,/*197*/
+ MTK_PULL_PU_PD_TYPE,/*198*/ MTK_PULL_PU_PD_TYPE,/*199*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*200*/ MTK_PULL_PU_PD_RSEL_TYPE,/*201*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*202*/ MTK_PULL_PU_PD_RSEL_TYPE,/*203*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*204*/ MTK_PULL_PU_PD_RSEL_TYPE,/*205*/
+ MTK_PULL_PU_PD_TYPE,/*206*/ MTK_PULL_PU_PD_TYPE,/*207*/
+ MTK_PULL_PU_PD_TYPE,/*208*/ MTK_PULL_PU_PD_TYPE,/*209*/
+ MTK_PULL_PU_PD_TYPE,/*210*/ MTK_PULL_PU_PD_TYPE,/*211*/
+ MTK_PULL_PU_PD_TYPE,/*212*/ MTK_PULL_PU_PD_TYPE,/*213*/
+ MTK_PULL_PU_PD_TYPE,/*214*/ MTK_PULL_PU_PD_TYPE,/*215*/
+ MTK_PULL_PU_PD_TYPE,/*216*/ MTK_PULL_PU_PD_TYPE,/*217*/
+ MTK_PULL_PU_PD_TYPE,/*218*/ MTK_PULL_PU_PD_TYPE,/*219*/
+};
static const char * const mt8192_pinctrl_register_base_names[] = {
"iocfg0", "iocfg_rm", "iocfg_bm", "iocfg_bl", "iocfg_br",
@@ -1355,9 +1387,8 @@ static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = {
[PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8192_pin_pupd_range),
[PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8192_pin_r0_range),
[PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8192_pin_r1_range),
- [PINCTRL_PIN_REG_DRV_EN] = MTK_RANGE(mt8192_pin_e1e0en_range),
- [PINCTRL_PIN_REG_DRV_E0] = MTK_RANGE(mt8192_pin_e0_range),
- [PINCTRL_PIN_REG_DRV_E1] = MTK_RANGE(mt8192_pin_e1_range),
+ [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt8192_pin_drv_adv_range),
+ [PINCTRL_PIN_REG_RSEL] = MTK_RANGE(mt8192_pin_rsel_range),
};
static const struct mtk_pin_soc mt8192_data = {
@@ -1367,17 +1398,16 @@ static const struct mtk_pin_soc mt8192_data = {
.ngrps = ARRAY_SIZE(mtk_pins_mt8192),
.base_names = mt8192_pinctrl_register_base_names,
.nbase_names = ARRAY_SIZE(mt8192_pinctrl_register_base_names),
+ .pull_type = mt8192_pull_type,
.eint_hw = &mt8192_eint_hw,
.nfuncs = 8,
.gpio_m = 0,
.bias_set_combo = mtk_pinconf_bias_set_combo,
.bias_get_combo = mtk_pinconf_bias_get_combo,
- .drive_set = mtk_pinconf_drive_set_raw,
- .drive_get = mtk_pinconf_drive_get_raw,
- .adv_pull_get = mtk_pinconf_adv_pull_get,
- .adv_pull_set = mtk_pinconf_adv_pull_set,
- .adv_drive_get = mtk_pinconf_adv_drive_get,
- .adv_drive_set = mtk_pinconf_adv_drive_set,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_drive_get = mtk_pinconf_adv_drive_get_raw,
+ .adv_drive_set = mtk_pinconf_adv_drive_set_raw,
};
static const struct of_device_id mt8192_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index a1f93859e7ca..8ef0a97d2bf5 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -96,10 +96,12 @@ static struct mvebu_pinctrl_group *mvebu_pinctrl_find_group_by_name(
struct mvebu_pinctrl *pctl, const char *name)
{
unsigned n;
+
for (n = 0; n < pctl->num_groups; n++) {
if (strcmp(name, pctl->groups[n].name) == 0)
return &pctl->groups[n];
}
+
return NULL;
}
@@ -108,6 +110,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_val(
unsigned long config)
{
unsigned n;
+
for (n = 0; n < grp->num_settings; n++) {
if (config == grp->settings[n].val) {
if (!pctl->variant || (pctl->variant &
@@ -115,6 +118,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_val(
return &grp->settings[n];
}
}
+
return NULL;
}
@@ -123,6 +127,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_name(
const char *name)
{
unsigned n;
+
for (n = 0; n < grp->num_settings; n++) {
if (strcmp(name, grp->settings[n].name) == 0) {
if (!pctl->variant || (pctl->variant &
@@ -130,6 +135,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_name(
return &grp->settings[n];
}
}
+
return NULL;
}
@@ -137,6 +143,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_gpio_setting(
struct mvebu_pinctrl *pctl, struct mvebu_pinctrl_group *grp)
{
unsigned n;
+
for (n = 0; n < grp->num_settings; n++) {
if (grp->settings[n].flags &
(MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) {
@@ -145,6 +152,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_gpio_setting(
return &grp->settings[n];
}
}
+
return NULL;
}
@@ -152,10 +160,12 @@ static struct mvebu_pinctrl_function *mvebu_pinctrl_find_function_by_name(
struct mvebu_pinctrl *pctl, const char *name)
{
unsigned n;
+
for (n = 0; n < pctl->num_functions; n++) {
if (strcmp(name, pctl->functions[n].name) == 0)
return &pctl->functions[n];
}
+
return NULL;
}
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 640e50d94f27..f5014d09d81a 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1421,8 +1421,10 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
has_config = nmk_pinctrl_dt_get_config(np, &configs);
np_config = of_parse_phandle(np, "ste,config", 0);
- if (np_config)
+ if (np_config) {
has_config |= nmk_pinctrl_dt_get_config(np_config, &configs);
+ of_node_put(np_config);
+ }
if (has_config) {
const char *gpio_name;
const char *pin;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 0645c2c24f50..4691a33bc374 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -6,8 +6,6 @@
* Authors: Ken Xue <Ken.Xue@amd.com>
* Wu, Jeff <Jeff.Wu@amd.com>
*
- * Contact Information: Nehal Shah <Nehal-bakulchandra.Shah@amd.com>
- * Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
*/
#include <linux/err.h>
@@ -31,6 +29,7 @@
#include <linux/bitops.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
#include "core.h"
#include "pinctrl-utils.h"
@@ -203,8 +202,6 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
bool tmr_out_unit;
- unsigned int time;
- unsigned int unit;
bool tmr_large;
char *level_trig;
@@ -218,13 +215,13 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
char *pull_up_sel;
char *pull_up_enable;
char *pull_down_enable;
- char *output_value;
- char *output_enable;
+ char *orientation;
char debounce_value[40];
char *debounce_enable;
for (bank = 0; bank < gpio_dev->hwbank_num; bank++) {
- seq_printf(s, "GPIO bank%d\t", bank);
+ unsigned int time = 0;
+ unsigned int unit = 0;
switch (bank) {
case 0:
@@ -247,8 +244,9 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
/* Illegal bank number, ignore */
continue;
}
+ seq_printf(s, "GPIO bank%d\n", bank);
for (; i < pin_num; i++) {
- seq_printf(s, "pin%d\t", i);
+ seq_printf(s, "📌%d\t", i);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + i * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -256,84 +254,91 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
if (pin_reg & BIT(INTERRUPT_ENABLE_OFF)) {
u8 level = (pin_reg >> ACTIVE_LEVEL_OFF) &
ACTIVE_LEVEL_MASK;
- interrupt_enable = "interrupt is enabled|";
+ interrupt_enable = "+";
if (level == ACTIVE_LEVEL_HIGH)
- active_level = "Active high|";
+ active_level = "↑";
else if (level == ACTIVE_LEVEL_LOW)
- active_level = "Active low|";
+ active_level = "↓";
else if (!(pin_reg & BIT(LEVEL_TRIG_OFF)) &&
level == ACTIVE_LEVEL_BOTH)
- active_level = "Active on both|";
+ active_level = "b";
else
- active_level = "Unknown Active level|";
+ active_level = "?";
if (pin_reg & BIT(LEVEL_TRIG_OFF))
- level_trig = "Level trigger|";
+ level_trig = "level";
else
- level_trig = "Edge trigger|";
+ level_trig = " edge";
} else {
- interrupt_enable =
- "interrupt is disabled|";
- active_level = " ";
- level_trig = " ";
+ interrupt_enable = "∅";
+ active_level = "∅";
+ level_trig = " ∅";
}
if (pin_reg & BIT(INTERRUPT_MASK_OFF))
- interrupt_mask =
- "interrupt is unmasked|";
+ interrupt_mask = "-";
else
- interrupt_mask =
- "interrupt is masked|";
+ interrupt_mask = "+";
+ seq_printf(s, "int %s (🎭 %s)| active-%s| %s-🔫| ",
+ interrupt_enable,
+ interrupt_mask,
+ active_level,
+ level_trig);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S0I3))
- wake_cntrl0 = "enable wakeup in S0i3 state|";
+ wake_cntrl0 = "+";
else
- wake_cntrl0 = "disable wakeup in S0i3 state|";
+ wake_cntrl0 = "∅";
+ seq_printf(s, "S0i3 🌅 %s| ", wake_cntrl0);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S3))
- wake_cntrl1 = "enable wakeup in S3 state|";
+ wake_cntrl1 = "+";
else
- wake_cntrl1 = "disable wakeup in S3 state|";
+ wake_cntrl1 = "∅";
+ seq_printf(s, "S3 🌅 %s| ", wake_cntrl1);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S4))
- wake_cntrl2 = "enable wakeup in S4/S5 state|";
+ wake_cntrl2 = "+";
else
- wake_cntrl2 = "disable wakeup in S4/S5 state|";
+ wake_cntrl2 = "∅";
+ seq_printf(s, "S4/S5 🌅 %s| ", wake_cntrl2);
if (pin_reg & BIT(PULL_UP_ENABLE_OFF)) {
- pull_up_enable = "pull-up is enabled|";
+ pull_up_enable = "+";
if (pin_reg & BIT(PULL_UP_SEL_OFF))
- pull_up_sel = "8k pull-up|";
+ pull_up_sel = "8k";
else
- pull_up_sel = "4k pull-up|";
+ pull_up_sel = "4k";
} else {
- pull_up_enable = "pull-up is disabled|";
- pull_up_sel = " ";
+ pull_up_enable = "∅";
+ pull_up_sel = " ";
}
+ seq_printf(s, "pull-↑ %s (%s)| ",
+ pull_up_enable,
+ pull_up_sel);
if (pin_reg & BIT(PULL_DOWN_ENABLE_OFF))
- pull_down_enable = "pull-down is enabled|";
+ pull_down_enable = "+";
else
- pull_down_enable = "Pull-down is disabled|";
+ pull_down_enable = "∅";
+ seq_printf(s, "pull-↓ %s| ", pull_down_enable);
if (pin_reg & BIT(OUTPUT_ENABLE_OFF)) {
- pin_sts = " ";
- output_enable = "output is enabled|";
+ pin_sts = "output";
if (pin_reg & BIT(OUTPUT_VALUE_OFF))
- output_value = "output is high|";
+ orientation = "↑";
else
- output_value = "output is low|";
+ orientation = "↓";
} else {
- output_enable = "output is disabled|";
- output_value = " ";
-
+ pin_sts = "input ";
if (pin_reg & BIT(PIN_STS_OFF))
- pin_sts = "input is high|";
+ orientation = "↑";
else
- pin_sts = "input is low|";
+ orientation = "↓";
}
+ seq_printf(s, "%s %s| ", pin_sts, orientation);
db_cntrl = (DB_CNTRl_MASK << DB_CNTRL_OFF) & pin_reg;
if (db_cntrl) {
@@ -352,27 +357,18 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
unit = 61;
}
if ((DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF) == db_cntrl)
- debounce_enable = "debouncing filter (high and low) enabled|";
+ debounce_enable = "b +";
else if ((DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF) == db_cntrl)
- debounce_enable = "debouncing filter (low) enabled|";
+ debounce_enable = "↓ +";
else
- debounce_enable = "debouncing filter (high) enabled|";
+ debounce_enable = "↑ +";
- snprintf(debounce_value, sizeof(debounce_value),
- "debouncing timeout is %u (us)|", time * unit);
} else {
- debounce_enable = "debouncing filter disabled|";
- snprintf(debounce_value, sizeof(debounce_value), " ");
+ debounce_enable = " ∅";
}
-
- seq_printf(s, "%s %s %s %s %s %s\n"
- " %s %s %s %s %s %s %s %s %s 0x%x\n",
- level_trig, active_level, interrupt_enable,
- interrupt_mask, wake_cntrl0, wake_cntrl1,
- wake_cntrl2, pin_sts, pull_up_sel,
- pull_up_enable, pull_down_enable,
- output_value, output_enable,
- debounce_enable, debounce_value, pin_reg);
+ snprintf(debounce_value, sizeof(debounce_value), "%u", time * unit);
+ seq_printf(s, "debounce %s (⏰ %sus)| ", debounce_enable, debounce_value);
+ seq_printf(s, " 0x%x\n", pin_reg);
}
}
}
@@ -917,6 +913,7 @@ static int amd_gpio_suspend(struct device *dev)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
int i;
for (i = 0; i < desc->npins; i++) {
@@ -925,7 +922,9 @@ static int amd_gpio_suspend(struct device *dev)
if (!amd_gpio_should_save(gpio_dev, pin))
continue;
- gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
return 0;
@@ -935,6 +934,7 @@ static int amd_gpio_resume(struct device *dev)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
int i;
for (i = 0; i < desc->npins; i++) {
@@ -943,7 +943,10 @@ static int amd_gpio_resume(struct device *dev)
if (!amd_gpio_should_save(gpio_dev, pin))
continue;
- writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
+ writel(gpio_dev->saved_regs[i], gpio_dev->base + pin * 4);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
return 0;
@@ -955,14 +958,115 @@ static const struct dev_pm_ops amd_gpio_pm_ops = {
};
#endif
+static int amd_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(pmx_functions);
+}
+
+static const char *amd_get_fname(struct pinctrl_dev *pctrldev, unsigned int selector)
+{
+ return pmx_functions[selector].name;
+}
+
+static int amd_get_groups(struct pinctrl_dev *pctrldev, unsigned int selector,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctrldev);
+
+ if (!gpio_dev->iomux_base) {
+ dev_err(&gpio_dev->pdev->dev, "iomux function %d group not supported\n", selector);
+ return -EINVAL;
+ }
+
+ *groups = pmx_functions[selector].groups;
+ *num_groups = pmx_functions[selector].ngroups;
+ return 0;
+}
+
+static int amd_set_mux(struct pinctrl_dev *pctrldev, unsigned int function, unsigned int group)
+{
+ struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctrldev);
+ struct device *dev = &gpio_dev->pdev->dev;
+ struct pin_desc *pd;
+ int ind, index;
+
+ if (!gpio_dev->iomux_base)
+ return -EINVAL;
+
+ for (index = 0; index < NSELECTS; index++) {
+ if (strcmp(gpio_dev->groups[group].name, pmx_functions[function].groups[index]))
+ continue;
+
+ if (readb(gpio_dev->iomux_base + pmx_functions[function].index) ==
+ FUNCTION_INVALID) {
+ dev_err(dev, "IOMUX_GPIO 0x%x not present or supported\n",
+ pmx_functions[function].index);
+ return -EINVAL;
+ }
+
+ writeb(index, gpio_dev->iomux_base + pmx_functions[function].index);
+
+ if (index != (readb(gpio_dev->iomux_base + pmx_functions[function].index) &
+ FUNCTION_MASK)) {
+ dev_err(dev, "IOMUX_GPIO 0x%x not present or supported\n",
+ pmx_functions[function].index);
+ return -EINVAL;
+ }
+
+ for (ind = 0; ind < gpio_dev->groups[group].npins; ind++) {
+ if (strncmp(gpio_dev->groups[group].name, "IMX_F", strlen("IMX_F")))
+ continue;
+
+ pd = pin_desc_get(gpio_dev->pctrl, gpio_dev->groups[group].pins[ind]);
+ pd->mux_owner = gpio_dev->groups[group].name;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops amd_pmxops = {
+ .get_functions_count = amd_get_functions_count,
+ .get_function_name = amd_get_fname,
+ .get_function_groups = amd_get_groups,
+ .set_mux = amd_set_mux,
+};
+
static struct pinctrl_desc amd_pinctrl_desc = {
.pins = kerncz_pins,
.npins = ARRAY_SIZE(kerncz_pins),
.pctlops = &amd_pinctrl_ops,
+ .pmxops = &amd_pmxops,
.confops = &amd_pinconf_ops,
.owner = THIS_MODULE,
};
+static void amd_get_iomux_res(struct amd_gpio *gpio_dev)
+{
+ struct pinctrl_desc *desc = &amd_pinctrl_desc;
+ struct device *dev = &gpio_dev->pdev->dev;
+ int index;
+
+ index = device_property_match_string(dev, "pinctrl-resource-names", "iomux");
+ if (index < 0) {
+ dev_warn(dev, "failed to get iomux index\n");
+ goto out_no_pinmux;
+ }
+
+ gpio_dev->iomux_base = devm_platform_ioremap_resource(gpio_dev->pdev, index);
+ if (IS_ERR(gpio_dev->iomux_base)) {
+ dev_warn(dev, "Failed to get iomux %d io resource\n", index);
+ goto out_no_pinmux;
+ }
+
+ return;
+
+out_no_pinmux:
+ desc->pmxops = NULL;
+}
+
static int amd_gpio_probe(struct platform_device *pdev)
{
int ret = 0;
@@ -977,17 +1081,12 @@ static int amd_gpio_probe(struct platform_device *pdev)
raw_spin_lock_init(&gpio_dev->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
+ gpio_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(gpio_dev->base)) {
dev_err(&pdev->dev, "Failed to get gpio io resource.\n");
- return -EINVAL;
+ return PTR_ERR(gpio_dev->base);
}
- gpio_dev->base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!gpio_dev->base)
- return -ENOMEM;
-
gpio_dev->irq = platform_get_irq(pdev, 0);
if (gpio_dev->irq < 0)
return gpio_dev->irq;
@@ -1020,6 +1119,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
gpio_dev->ngroups = ARRAY_SIZE(kerncz_groups);
amd_pinctrl_desc.name = dev_name(&pdev->dev);
+ amd_get_iomux_res(gpio_dev);
gpio_dev->pctrl = devm_pinctrl_register(&pdev->dev, &amd_pinctrl_desc,
gpio_dev);
if (IS_ERR(gpio_dev->pctrl)) {
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 1d4317073654..c8635998465d 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -74,23 +74,24 @@
#define CLR_INTR_STAT 0x1UL
-struct amd_pingroup {
- const char *name;
- const unsigned *pins;
- unsigned npins;
-};
+#define NSELECTS 0x4
+
+#define FUNCTION_MASK GENMASK(1, 0)
+#define FUNCTION_INVALID GENMASK(7, 0)
struct amd_function {
const char *name;
- const char * const *groups;
+ const char * const groups[NSELECTS];
unsigned ngroups;
+ int index;
};
struct amd_gpio {
raw_spinlock_t lock;
void __iomem *base;
+ void __iomem *iomux_base;
- const struct amd_pingroup *groups;
+ const struct pingroup *groups;
u32 ngroups;
struct pinctrl_dev *pctrl;
struct gpio_chip gc;
@@ -288,45 +289,1332 @@ static const struct pinctrl_pin_desc kerncz_pins[] = {
PINCTRL_PIN(183, "GPIO_183"),
};
-static const unsigned i2c0_pins[] = {145, 146};
-static const unsigned i2c1_pins[] = {147, 148};
-static const unsigned i2c2_pins[] = {113, 114};
-static const unsigned i2c3_pins[] = {19, 20};
+#define AMD_PINS(...) (const unsigned int []){__VA_ARGS__}
+
+enum amd_functions {
+ IMX_F0_GPIO0,
+ IMX_F1_GPIO0,
+ IMX_F2_GPIO0,
+ IMX_F3_GPIO0,
+ IMX_F0_GPIO1,
+ IMX_F1_GPIO1,
+ IMX_F2_GPIO1,
+ IMX_F3_GPIO1,
+ IMX_F0_GPIO2,
+ IMX_F1_GPIO2,
+ IMX_F2_GPIO2,
+ IMX_F3_GPIO2,
+ IMX_F0_GPIO3,
+ IMX_F1_GPIO3,
+ IMX_F2_GPIO3,
+ IMX_F3_GPIO3,
+ IMX_F0_GPIO4,
+ IMX_F1_GPIO4,
+ IMX_F2_GPIO4,
+ IMX_F3_GPIO4,
+ IMX_F0_GPIO5,
+ IMX_F1_GPIO5,
+ IMX_F2_GPIO5,
+ IMX_F3_GPIO5,
+ IMX_F0_GPIO6,
+ IMX_F1_GPIO6,
+ IMX_F2_GPIO6,
+ IMX_F3_GPIO6,
+ IMX_F0_GPIO7,
+ IMX_F1_GPIO7,
+ IMX_F2_GPIO7,
+ IMX_F3_GPIO7,
+ IMX_F0_GPIO8,
+ IMX_F1_GPIO8,
+ IMX_F2_GPIO8,
+ IMX_F3_GPIO8,
+ IMX_F0_GPIO9,
+ IMX_F1_GPIO9,
+ IMX_F2_GPIO9,
+ IMX_F3_GPIO9,
+ IMX_F0_GPIO10,
+ IMX_F1_GPIO10,
+ IMX_F2_GPIO10,
+ IMX_F3_GPIO10,
+ IMX_F0_GPIO11,
+ IMX_F1_GPIO11,
+ IMX_F2_GPIO11,
+ IMX_F3_GPIO11,
+ IMX_F0_GPIO12,
+ IMX_F1_GPIO12,
+ IMX_F2_GPIO12,
+ IMX_F3_GPIO12,
+ IMX_F0_GPIO13,
+ IMX_F1_GPIO13,
+ IMX_F2_GPIO13,
+ IMX_F3_GPIO13,
+ IMX_F0_GPIO14,
+ IMX_F1_GPIO14,
+ IMX_F2_GPIO14,
+ IMX_F3_GPIO14,
+ IMX_F0_GPIO15,
+ IMX_F1_GPIO15,
+ IMX_F2_GPIO15,
+ IMX_F3_GPIO15,
+ IMX_F0_GPIO16,
+ IMX_F1_GPIO16,
+ IMX_F2_GPIO16,
+ IMX_F3_GPIO16,
+ IMX_F0_GPIO17,
+ IMX_F1_GPIO17,
+ IMX_F2_GPIO17,
+ IMX_F3_GPIO17,
+ IMX_F0_GPIO18,
+ IMX_F1_GPIO18,
+ IMX_F2_GPIO18,
+ IMX_F3_GPIO18,
+ IMX_F0_GPIO19,
+ IMX_F1_GPIO19,
+ IMX_F2_GPIO19,
+ IMX_F3_GPIO19,
+ IMX_F0_GPIO20,
+ IMX_F1_GPIO20,
+ IMX_F2_GPIO20,
+ IMX_F3_GPIO20,
+ IMX_F0_GPIO21,
+ IMX_F1_GPIO21,
+ IMX_F2_GPIO21,
+ IMX_F3_GPIO21,
+ IMX_F0_GPIO22,
+ IMX_F1_GPIO22,
+ IMX_F2_GPIO22,
+ IMX_F3_GPIO22,
+ IMX_F0_GPIO23,
+ IMX_F1_GPIO23,
+ IMX_F2_GPIO23,
+ IMX_F3_GPIO23,
+ IMX_F0_GPIO24,
+ IMX_F1_GPIO24,
+ IMX_F2_GPIO24,
+ IMX_F3_GPIO24,
+ IMX_F0_GPIO25,
+ IMX_F1_GPIO25,
+ IMX_F2_GPIO25,
+ IMX_F3_GPIO25,
+ IMX_F0_GPIO26,
+ IMX_F1_GPIO26,
+ IMX_F2_GPIO26,
+ IMX_F3_GPIO26,
+ IMX_F0_GPIO27,
+ IMX_F1_GPIO27,
+ IMX_F2_GPIO27,
+ IMX_F3_GPIO27,
+ IMX_F0_GPIO28,
+ IMX_F1_GPIO28,
+ IMX_F2_GPIO28,
+ IMX_F3_GPIO28,
+ IMX_F0_GPIO29,
+ IMX_F1_GPIO29,
+ IMX_F2_GPIO29,
+ IMX_F3_GPIO29,
+ IMX_F0_GPIO30,
+ IMX_F1_GPIO30,
+ IMX_F2_GPIO30,
+ IMX_F3_GPIO30,
+ IMX_F0_GPIO31,
+ IMX_F1_GPIO31,
+ IMX_F2_GPIO31,
+ IMX_F3_GPIO31,
+ IMX_F0_GPIO32,
+ IMX_F1_GPIO32,
+ IMX_F2_GPIO32,
+ IMX_F3_GPIO32,
+ IMX_F0_GPIO33,
+ IMX_F1_GPIO33,
+ IMX_F2_GPIO33,
+ IMX_F3_GPIO33,
+ IMX_F0_GPIO34,
+ IMX_F1_GPIO34,
+ IMX_F2_GPIO34,
+ IMX_F3_GPIO34,
+ IMX_F0_GPIO35,
+ IMX_F1_GPIO35,
+ IMX_F2_GPIO35,
+ IMX_F3_GPIO35,
+ IMX_F0_GPIO36,
+ IMX_F1_GPIO36,
+ IMX_F2_GPIO36,
+ IMX_F3_GPIO36,
+ IMX_F0_GPIO37,
+ IMX_F1_GPIO37,
+ IMX_F2_GPIO37,
+ IMX_F3_GPIO37,
+ IMX_F0_GPIO38,
+ IMX_F1_GPIO38,
+ IMX_F2_GPIO38,
+ IMX_F3_GPIO38,
+ IMX_F0_GPIO39,
+ IMX_F1_GPIO39,
+ IMX_F2_GPIO39,
+ IMX_F3_GPIO39,
+ IMX_F0_GPIO40,
+ IMX_F1_GPIO40,
+ IMX_F2_GPIO40,
+ IMX_F3_GPIO40,
+ IMX_F0_GPIO41,
+ IMX_F1_GPIO41,
+ IMX_F2_GPIO41,
+ IMX_F3_GPIO41,
+ IMX_F0_GPIO42,
+ IMX_F1_GPIO42,
+ IMX_F2_GPIO42,
+ IMX_F3_GPIO42,
+ IMX_F0_GPIO43,
+ IMX_F1_GPIO43,
+ IMX_F2_GPIO43,
+ IMX_F3_GPIO43,
+ IMX_F0_GPIO44,
+ IMX_F1_GPIO44,
+ IMX_F2_GPIO44,
+ IMX_F3_GPIO44,
+ IMX_F0_GPIO45,
+ IMX_F1_GPIO45,
+ IMX_F2_GPIO45,
+ IMX_F3_GPIO45,
+ IMX_F0_GPIO46,
+ IMX_F1_GPIO46,
+ IMX_F2_GPIO46,
+ IMX_F3_GPIO46,
+ IMX_F0_GPIO47,
+ IMX_F1_GPIO47,
+ IMX_F2_GPIO47,
+ IMX_F3_GPIO47,
+ IMX_F0_GPIO48,
+ IMX_F1_GPIO48,
+ IMX_F2_GPIO48,
+ IMX_F3_GPIO48,
+ IMX_F0_GPIO49,
+ IMX_F1_GPIO49,
+ IMX_F2_GPIO49,
+ IMX_F3_GPIO49,
+ IMX_F0_GPIO50,
+ IMX_F1_GPIO50,
+ IMX_F2_GPIO50,
+ IMX_F3_GPIO50,
+ IMX_F0_GPIO51,
+ IMX_F1_GPIO51,
+ IMX_F2_GPIO51,
+ IMX_F3_GPIO51,
+ IMX_F0_GPIO52,
+ IMX_F1_GPIO52,
+ IMX_F2_GPIO52,
+ IMX_F3_GPIO52,
+ IMX_F0_GPIO53,
+ IMX_F1_GPIO53,
+ IMX_F2_GPIO53,
+ IMX_F3_GPIO53,
+ IMX_F0_GPIO54,
+ IMX_F1_GPIO54,
+ IMX_F2_GPIO54,
+ IMX_F3_GPIO54,
+ IMX_F0_GPIO55,
+ IMX_F1_GPIO55,
+ IMX_F2_GPIO55,
+ IMX_F3_GPIO55,
+ IMX_F0_GPIO56,
+ IMX_F1_GPIO56,
+ IMX_F2_GPIO56,
+ IMX_F3_GPIO56,
+ IMX_F0_GPIO57,
+ IMX_F1_GPIO57,
+ IMX_F2_GPIO57,
+ IMX_F3_GPIO57,
+ IMX_F0_GPIO58,
+ IMX_F1_GPIO58,
+ IMX_F2_GPIO58,
+ IMX_F3_GPIO58,
+ IMX_F0_GPIO59,
+ IMX_F1_GPIO59,
+ IMX_F2_GPIO59,
+ IMX_F3_GPIO59,
+ IMX_F0_GPIO60,
+ IMX_F1_GPIO60,
+ IMX_F2_GPIO60,
+ IMX_F3_GPIO60,
+ IMX_F0_GPIO61,
+ IMX_F1_GPIO61,
+ IMX_F2_GPIO61,
+ IMX_F3_GPIO61,
+ IMX_F0_GPIO62,
+ IMX_F1_GPIO62,
+ IMX_F2_GPIO62,
+ IMX_F3_GPIO62,
+ IMX_F0_GPIO64,
+ IMX_F1_GPIO64,
+ IMX_F2_GPIO64,
+ IMX_F3_GPIO64,
+ IMX_F0_GPIO65,
+ IMX_F1_GPIO65,
+ IMX_F2_GPIO65,
+ IMX_F3_GPIO65,
+ IMX_F0_GPIO66,
+ IMX_F1_GPIO66,
+ IMX_F2_GPIO66,
+ IMX_F3_GPIO66,
+ IMX_F0_GPIO67,
+ IMX_F1_GPIO67,
+ IMX_F2_GPIO67,
+ IMX_F3_GPIO67,
+ IMX_F0_GPIO68,
+ IMX_F1_GPIO68,
+ IMX_F2_GPIO68,
+ IMX_F3_GPIO68,
+ IMX_F0_GPIO69,
+ IMX_F1_GPIO69,
+ IMX_F2_GPIO69,
+ IMX_F3_GPIO69,
+ IMX_F0_GPIO70,
+ IMX_F1_GPIO70,
+ IMX_F2_GPIO70,
+ IMX_F3_GPIO70,
+ IMX_F0_GPIO71,
+ IMX_F1_GPIO71,
+ IMX_F2_GPIO71,
+ IMX_F3_GPIO71,
+ IMX_F0_GPIO72,
+ IMX_F1_GPIO72,
+ IMX_F2_GPIO72,
+ IMX_F3_GPIO72,
+ IMX_F0_GPIO73,
+ IMX_F1_GPIO73,
+ IMX_F2_GPIO73,
+ IMX_F3_GPIO73,
+ IMX_F0_GPIO74,
+ IMX_F1_GPIO74,
+ IMX_F2_GPIO74,
+ IMX_F3_GPIO74,
+ IMX_F0_GPIO75,
+ IMX_F1_GPIO75,
+ IMX_F2_GPIO75,
+ IMX_F3_GPIO75,
+ IMX_F0_GPIO76,
+ IMX_F1_GPIO76,
+ IMX_F2_GPIO76,
+ IMX_F3_GPIO76,
+ IMX_F0_GPIO77,
+ IMX_F1_GPIO77,
+ IMX_F2_GPIO77,
+ IMX_F3_GPIO77,
+ IMX_F0_GPIO78,
+ IMX_F1_GPIO78,
+ IMX_F2_GPIO78,
+ IMX_F3_GPIO78,
+ IMX_F0_GPIO79,
+ IMX_F1_GPIO79,
+ IMX_F2_GPIO79,
+ IMX_F3_GPIO79,
+ IMX_F0_GPIO80,
+ IMX_F1_GPIO80,
+ IMX_F2_GPIO80,
+ IMX_F3_GPIO80,
+ IMX_F0_GPIO81,
+ IMX_F1_GPIO81,
+ IMX_F2_GPIO81,
+ IMX_F3_GPIO81,
+ IMX_F0_GPIO82,
+ IMX_F1_GPIO82,
+ IMX_F2_GPIO82,
+ IMX_F3_GPIO82,
+ IMX_F0_GPIO83,
+ IMX_F1_GPIO83,
+ IMX_F2_GPIO83,
+ IMX_F3_GPIO83,
+ IMX_F0_GPIO84,
+ IMX_F1_GPIO84,
+ IMX_F2_GPIO84,
+ IMX_F3_GPIO84,
+ IMX_F0_GPIO85,
+ IMX_F1_GPIO85,
+ IMX_F2_GPIO85,
+ IMX_F3_GPIO85,
+ IMX_F0_GPIO86,
+ IMX_F1_GPIO86,
+ IMX_F2_GPIO86,
+ IMX_F3_GPIO86,
+ IMX_F0_GPIO87,
+ IMX_F1_GPIO87,
+ IMX_F2_GPIO87,
+ IMX_F3_GPIO87,
+ IMX_F0_GPIO88,
+ IMX_F1_GPIO88,
+ IMX_F2_GPIO88,
+ IMX_F3_GPIO88,
+ IMX_F0_GPIO89,
+ IMX_F1_GPIO89,
+ IMX_F2_GPIO89,
+ IMX_F3_GPIO89,
+ IMX_F0_GPIO90,
+ IMX_F1_GPIO90,
+ IMX_F2_GPIO90,
+ IMX_F3_GPIO90,
+ IMX_F0_GPIO91,
+ IMX_F1_GPIO91,
+ IMX_F2_GPIO91,
+ IMX_F3_GPIO91,
+ IMX_F0_GPIO92,
+ IMX_F1_GPIO92,
+ IMX_F2_GPIO92,
+ IMX_F3_GPIO92,
+ IMX_F0_GPIO93,
+ IMX_F1_GPIO93,
+ IMX_F2_GPIO93,
+ IMX_F3_GPIO93,
+ IMX_F0_GPIO94,
+ IMX_F1_GPIO94,
+ IMX_F2_GPIO94,
+ IMX_F3_GPIO94,
+ IMX_F0_GPIO95,
+ IMX_F1_GPIO95,
+ IMX_F2_GPIO95,
+ IMX_F3_GPIO95,
+ IMX_F0_GPIO96,
+ IMX_F1_GPIO96,
+ IMX_F2_GPIO96,
+ IMX_F3_GPIO96,
+ IMX_F0_GPIO97,
+ IMX_F1_GPIO97,
+ IMX_F2_GPIO97,
+ IMX_F3_GPIO97,
+ IMX_F0_GPIO98,
+ IMX_F1_GPIO98,
+ IMX_F2_GPIO98,
+ IMX_F3_GPIO98,
+ IMX_F0_GPIO99,
+ IMX_F1_GPIO99,
+ IMX_F2_GPIO99,
+ IMX_F3_GPIO99,
+ IMX_F0_GPIO100,
+ IMX_F1_GPIO100,
+ IMX_F2_GPIO100,
+ IMX_F3_GPIO100,
+ IMX_F0_GPIO101,
+ IMX_F1_GPIO101,
+ IMX_F2_GPIO101,
+ IMX_F3_GPIO101,
+ IMX_F0_GPIO102,
+ IMX_F1_GPIO102,
+ IMX_F2_GPIO102,
+ IMX_F3_GPIO102,
+ IMX_F0_GPIO103,
+ IMX_F1_GPIO103,
+ IMX_F2_GPIO103,
+ IMX_F3_GPIO103,
+ IMX_F0_GPIO104,
+ IMX_F1_GPIO104,
+ IMX_F2_GPIO104,
+ IMX_F3_GPIO104,
+ IMX_F0_GPIO105,
+ IMX_F1_GPIO105,
+ IMX_F2_GPIO105,
+ IMX_F3_GPIO105,
+ IMX_F0_GPIO106,
+ IMX_F1_GPIO106,
+ IMX_F2_GPIO106,
+ IMX_F3_GPIO106,
+ IMX_F0_GPIO107,
+ IMX_F1_GPIO107,
+ IMX_F2_GPIO107,
+ IMX_F3_GPIO107,
+ IMX_F0_GPIO108,
+ IMX_F1_GPIO108,
+ IMX_F2_GPIO108,
+ IMX_F3_GPIO108,
+ IMX_F0_GPIO109,
+ IMX_F1_GPIO109,
+ IMX_F2_GPIO109,
+ IMX_F3_GPIO109,
+ IMX_F0_GPIO110,
+ IMX_F1_GPIO110,
+ IMX_F2_GPIO110,
+ IMX_F3_GPIO110,
+ IMX_F0_GPIO111,
+ IMX_F1_GPIO111,
+ IMX_F2_GPIO111,
+ IMX_F3_GPIO111,
+ IMX_F0_GPIO112,
+ IMX_F1_GPIO112,
+ IMX_F2_GPIO112,
+ IMX_F3_GPIO112,
+ IMX_F0_GPIO113,
+ IMX_F1_GPIO113,
+ IMX_F2_GPIO113,
+ IMX_F3_GPIO113,
+ IMX_F0_GPIO114,
+ IMX_F1_GPIO114,
+ IMX_F2_GPIO114,
+ IMX_F3_GPIO114,
+ IMX_F0_GPIO115,
+ IMX_F1_GPIO115,
+ IMX_F2_GPIO115,
+ IMX_F3_GPIO115,
+ IMX_F0_GPIO116,
+ IMX_F1_GPIO116,
+ IMX_F2_GPIO116,
+ IMX_F3_GPIO116,
+ IMX_F0_GPIO117,
+ IMX_F1_GPIO117,
+ IMX_F2_GPIO117,
+ IMX_F3_GPIO117,
+ IMX_F0_GPIO118,
+ IMX_F1_GPIO118,
+ IMX_F2_GPIO118,
+ IMX_F3_GPIO118,
+ IMX_F0_GPIO119,
+ IMX_F1_GPIO119,
+ IMX_F2_GPIO119,
+ IMX_F3_GPIO119,
+ IMX_F0_GPIO120,
+ IMX_F1_GPIO120,
+ IMX_F2_GPIO120,
+ IMX_F3_GPIO120,
+ IMX_F0_GPIO121,
+ IMX_F1_GPIO121,
+ IMX_F2_GPIO121,
+ IMX_F3_GPIO121,
+ IMX_F0_GPIO122,
+ IMX_F1_GPIO122,
+ IMX_F2_GPIO122,
+ IMX_F3_GPIO122,
+ IMX_F0_GPIO123,
+ IMX_F1_GPIO123,
+ IMX_F2_GPIO123,
+ IMX_F3_GPIO123,
+ IMX_F0_GPIO124,
+ IMX_F1_GPIO124,
+ IMX_F2_GPIO124,
+ IMX_F3_GPIO124,
+ IMX_F0_GPIO125,
+ IMX_F1_GPIO125,
+ IMX_F2_GPIO125,
+ IMX_F3_GPIO125,
+ IMX_F0_GPIO126,
+ IMX_F1_GPIO126,
+ IMX_F2_GPIO126,
+ IMX_F3_GPIO126,
+ IMX_F0_GPIO127,
+ IMX_F1_GPIO127,
+ IMX_F2_GPIO127,
+ IMX_F3_GPIO127,
+ IMX_F0_GPIO128,
+ IMX_F1_GPIO128,
+ IMX_F2_GPIO128,
+ IMX_F3_GPIO128,
+ IMX_F0_GPIO129,
+ IMX_F1_GPIO129,
+ IMX_F2_GPIO129,
+ IMX_F3_GPIO129,
+ IMX_F0_GPIO130,
+ IMX_F1_GPIO130,
+ IMX_F2_GPIO130,
+ IMX_F3_GPIO130,
+ IMX_F0_GPIO131,
+ IMX_F1_GPIO131,
+ IMX_F2_GPIO131,
+ IMX_F3_GPIO131,
+ IMX_F0_GPIO132,
+ IMX_F1_GPIO132,
+ IMX_F2_GPIO132,
+ IMX_F3_GPIO132,
+ IMX_F0_GPIO133,
+ IMX_F1_GPIO133,
+ IMX_F2_GPIO133,
+ IMX_F3_GPIO133,
+ IMX_F0_GPIO134,
+ IMX_F1_GPIO134,
+ IMX_F2_GPIO134,
+ IMX_F3_GPIO134,
+ IMX_F0_GPIO135,
+ IMX_F1_GPIO135,
+ IMX_F2_GPIO135,
+ IMX_F3_GPIO135,
+ IMX_F0_GPIO136,
+ IMX_F1_GPIO136,
+ IMX_F2_GPIO136,
+ IMX_F3_GPIO136,
+ IMX_F0_GPIO137,
+ IMX_F1_GPIO137,
+ IMX_F2_GPIO137,
+ IMX_F3_GPIO137,
+ IMX_F0_GPIO138,
+ IMX_F1_GPIO138,
+ IMX_F2_GPIO138,
+ IMX_F3_GPIO138,
+ IMX_F0_GPIO139,
+ IMX_F1_GPIO139,
+ IMX_F2_GPIO139,
+ IMX_F3_GPIO139,
+ IMX_F0_GPIO140,
+ IMX_F1_GPIO140,
+ IMX_F2_GPIO140,
+ IMX_F3_GPIO140,
+ IMX_F0_GPIO141,
+ IMX_F1_GPIO141,
+ IMX_F2_GPIO141,
+ IMX_F3_GPIO141,
+ IMX_F0_GPIO142,
+ IMX_F1_GPIO142,
+ IMX_F2_GPIO142,
+ IMX_F3_GPIO142,
+ IMX_F0_GPIO143,
+ IMX_F1_GPIO143,
+ IMX_F2_GPIO143,
+ IMX_F3_GPIO143,
+ IMX_F0_GPIO144,
+ IMX_F1_GPIO144,
+ IMX_F2_GPIO144,
+ IMX_F3_GPIO144,
+};
+
+#define AMD_PINCTRL_FUNC_GRP(_number, _func) \
+ [IMX_F##_func##_GPIO##_number] = \
+ PINCTRL_PINGROUP("IMX_F"#_func "_GPIO"#_number, AMD_PINS(_number), 1)
+
+static const struct pingroup kerncz_groups[] = {
+ AMD_PINCTRL_FUNC_GRP(0, 0),
+ AMD_PINCTRL_FUNC_GRP(0, 1),
+ AMD_PINCTRL_FUNC_GRP(0, 2),
+ AMD_PINCTRL_FUNC_GRP(0, 3),
+ AMD_PINCTRL_FUNC_GRP(1, 0),
+ AMD_PINCTRL_FUNC_GRP(1, 1),
+ AMD_PINCTRL_FUNC_GRP(1, 2),
+ AMD_PINCTRL_FUNC_GRP(1, 3),
+ AMD_PINCTRL_FUNC_GRP(2, 0),
+ AMD_PINCTRL_FUNC_GRP(2, 1),
+ AMD_PINCTRL_FUNC_GRP(2, 2),
+ AMD_PINCTRL_FUNC_GRP(2, 3),
+ AMD_PINCTRL_FUNC_GRP(3, 0),
+ AMD_PINCTRL_FUNC_GRP(3, 1),
+ AMD_PINCTRL_FUNC_GRP(3, 2),
+ AMD_PINCTRL_FUNC_GRP(3, 3),
+ AMD_PINCTRL_FUNC_GRP(4, 0),
+ AMD_PINCTRL_FUNC_GRP(4, 1),
+ AMD_PINCTRL_FUNC_GRP(4, 2),
+ AMD_PINCTRL_FUNC_GRP(4, 3),
+ AMD_PINCTRL_FUNC_GRP(5, 0),
+ AMD_PINCTRL_FUNC_GRP(5, 1),
+ AMD_PINCTRL_FUNC_GRP(5, 2),
+ AMD_PINCTRL_FUNC_GRP(5, 3),
+ AMD_PINCTRL_FUNC_GRP(6, 0),
+ AMD_PINCTRL_FUNC_GRP(6, 1),
+ AMD_PINCTRL_FUNC_GRP(6, 2),
+ AMD_PINCTRL_FUNC_GRP(6, 3),
+ AMD_PINCTRL_FUNC_GRP(7, 0),
+ AMD_PINCTRL_FUNC_GRP(7, 1),
+ AMD_PINCTRL_FUNC_GRP(7, 2),
+ AMD_PINCTRL_FUNC_GRP(7, 3),
+ AMD_PINCTRL_FUNC_GRP(8, 0),
+ AMD_PINCTRL_FUNC_GRP(8, 1),
+ AMD_PINCTRL_FUNC_GRP(8, 2),
+ AMD_PINCTRL_FUNC_GRP(8, 3),
+ AMD_PINCTRL_FUNC_GRP(9, 0),
+ AMD_PINCTRL_FUNC_GRP(9, 1),
+ AMD_PINCTRL_FUNC_GRP(9, 2),
+ AMD_PINCTRL_FUNC_GRP(9, 3),
+ AMD_PINCTRL_FUNC_GRP(10, 0),
+ AMD_PINCTRL_FUNC_GRP(10, 1),
+ AMD_PINCTRL_FUNC_GRP(10, 2),
+ AMD_PINCTRL_FUNC_GRP(10, 3),
+ AMD_PINCTRL_FUNC_GRP(11, 0),
+ AMD_PINCTRL_FUNC_GRP(11, 1),
+ AMD_PINCTRL_FUNC_GRP(11, 2),
+ AMD_PINCTRL_FUNC_GRP(11, 3),
+ AMD_PINCTRL_FUNC_GRP(12, 0),
+ AMD_PINCTRL_FUNC_GRP(12, 1),
+ AMD_PINCTRL_FUNC_GRP(12, 2),
+ AMD_PINCTRL_FUNC_GRP(12, 3),
+ AMD_PINCTRL_FUNC_GRP(13, 0),
+ AMD_PINCTRL_FUNC_GRP(13, 1),
+ AMD_PINCTRL_FUNC_GRP(13, 2),
+ AMD_PINCTRL_FUNC_GRP(13, 3),
+ AMD_PINCTRL_FUNC_GRP(14, 0),
+ AMD_PINCTRL_FUNC_GRP(14, 1),
+ AMD_PINCTRL_FUNC_GRP(14, 2),
+ AMD_PINCTRL_FUNC_GRP(14, 3),
+ AMD_PINCTRL_FUNC_GRP(15, 0),
+ AMD_PINCTRL_FUNC_GRP(15, 1),
+ AMD_PINCTRL_FUNC_GRP(15, 2),
+ AMD_PINCTRL_FUNC_GRP(15, 3),
+ AMD_PINCTRL_FUNC_GRP(16, 0),
+ AMD_PINCTRL_FUNC_GRP(16, 1),
+ AMD_PINCTRL_FUNC_GRP(16, 2),
+ AMD_PINCTRL_FUNC_GRP(16, 3),
+ AMD_PINCTRL_FUNC_GRP(17, 0),
+ AMD_PINCTRL_FUNC_GRP(17, 1),
+ AMD_PINCTRL_FUNC_GRP(17, 2),
+ AMD_PINCTRL_FUNC_GRP(17, 3),
+ AMD_PINCTRL_FUNC_GRP(18, 0),
+ AMD_PINCTRL_FUNC_GRP(18, 1),
+ AMD_PINCTRL_FUNC_GRP(18, 2),
+ AMD_PINCTRL_FUNC_GRP(18, 3),
+ AMD_PINCTRL_FUNC_GRP(19, 0),
+ AMD_PINCTRL_FUNC_GRP(19, 1),
+ AMD_PINCTRL_FUNC_GRP(19, 2),
+ AMD_PINCTRL_FUNC_GRP(19, 3),
+ AMD_PINCTRL_FUNC_GRP(20, 0),
+ AMD_PINCTRL_FUNC_GRP(20, 1),
+ AMD_PINCTRL_FUNC_GRP(20, 2),
+ AMD_PINCTRL_FUNC_GRP(20, 3),
+ AMD_PINCTRL_FUNC_GRP(21, 0),
+ AMD_PINCTRL_FUNC_GRP(21, 1),
+ AMD_PINCTRL_FUNC_GRP(21, 2),
+ AMD_PINCTRL_FUNC_GRP(21, 3),
+ AMD_PINCTRL_FUNC_GRP(22, 0),
+ AMD_PINCTRL_FUNC_GRP(22, 1),
+ AMD_PINCTRL_FUNC_GRP(22, 2),
+ AMD_PINCTRL_FUNC_GRP(22, 3),
+ AMD_PINCTRL_FUNC_GRP(23, 0),
+ AMD_PINCTRL_FUNC_GRP(23, 1),
+ AMD_PINCTRL_FUNC_GRP(23, 2),
+ AMD_PINCTRL_FUNC_GRP(23, 3),
+ AMD_PINCTRL_FUNC_GRP(24, 0),
+ AMD_PINCTRL_FUNC_GRP(24, 1),
+ AMD_PINCTRL_FUNC_GRP(24, 2),
+ AMD_PINCTRL_FUNC_GRP(24, 3),
+ AMD_PINCTRL_FUNC_GRP(25, 0),
+ AMD_PINCTRL_FUNC_GRP(25, 1),
+ AMD_PINCTRL_FUNC_GRP(25, 2),
+ AMD_PINCTRL_FUNC_GRP(25, 3),
+ AMD_PINCTRL_FUNC_GRP(26, 0),
+ AMD_PINCTRL_FUNC_GRP(26, 1),
+ AMD_PINCTRL_FUNC_GRP(26, 2),
+ AMD_PINCTRL_FUNC_GRP(26, 3),
+ AMD_PINCTRL_FUNC_GRP(27, 0),
+ AMD_PINCTRL_FUNC_GRP(27, 1),
+ AMD_PINCTRL_FUNC_GRP(27, 2),
+ AMD_PINCTRL_FUNC_GRP(27, 3),
+ AMD_PINCTRL_FUNC_GRP(28, 0),
+ AMD_PINCTRL_FUNC_GRP(28, 1),
+ AMD_PINCTRL_FUNC_GRP(28, 2),
+ AMD_PINCTRL_FUNC_GRP(28, 3),
+ AMD_PINCTRL_FUNC_GRP(29, 0),
+ AMD_PINCTRL_FUNC_GRP(29, 1),
+ AMD_PINCTRL_FUNC_GRP(29, 2),
+ AMD_PINCTRL_FUNC_GRP(29, 3),
+ AMD_PINCTRL_FUNC_GRP(30, 0),
+ AMD_PINCTRL_FUNC_GRP(30, 1),
+ AMD_PINCTRL_FUNC_GRP(30, 2),
+ AMD_PINCTRL_FUNC_GRP(30, 3),
+ AMD_PINCTRL_FUNC_GRP(31, 0),
+ AMD_PINCTRL_FUNC_GRP(31, 1),
+ AMD_PINCTRL_FUNC_GRP(31, 2),
+ AMD_PINCTRL_FUNC_GRP(31, 3),
+ AMD_PINCTRL_FUNC_GRP(32, 0),
+ AMD_PINCTRL_FUNC_GRP(32, 1),
+ AMD_PINCTRL_FUNC_GRP(32, 2),
+ AMD_PINCTRL_FUNC_GRP(32, 3),
+ AMD_PINCTRL_FUNC_GRP(33, 0),
+ AMD_PINCTRL_FUNC_GRP(33, 1),
+ AMD_PINCTRL_FUNC_GRP(33, 2),
+ AMD_PINCTRL_FUNC_GRP(33, 3),
+ AMD_PINCTRL_FUNC_GRP(34, 0),
+ AMD_PINCTRL_FUNC_GRP(34, 1),
+ AMD_PINCTRL_FUNC_GRP(34, 2),
+ AMD_PINCTRL_FUNC_GRP(34, 3),
+ AMD_PINCTRL_FUNC_GRP(35, 0),
+ AMD_PINCTRL_FUNC_GRP(35, 1),
+ AMD_PINCTRL_FUNC_GRP(35, 2),
+ AMD_PINCTRL_FUNC_GRP(35, 3),
+ AMD_PINCTRL_FUNC_GRP(36, 0),
+ AMD_PINCTRL_FUNC_GRP(36, 1),
+ AMD_PINCTRL_FUNC_GRP(36, 2),
+ AMD_PINCTRL_FUNC_GRP(36, 3),
+ AMD_PINCTRL_FUNC_GRP(37, 0),
+ AMD_PINCTRL_FUNC_GRP(37, 1),
+ AMD_PINCTRL_FUNC_GRP(37, 2),
+ AMD_PINCTRL_FUNC_GRP(37, 3),
+ AMD_PINCTRL_FUNC_GRP(38, 0),
+ AMD_PINCTRL_FUNC_GRP(38, 1),
+ AMD_PINCTRL_FUNC_GRP(38, 2),
+ AMD_PINCTRL_FUNC_GRP(38, 3),
+ AMD_PINCTRL_FUNC_GRP(39, 0),
+ AMD_PINCTRL_FUNC_GRP(39, 1),
+ AMD_PINCTRL_FUNC_GRP(39, 2),
+ AMD_PINCTRL_FUNC_GRP(39, 3),
+ AMD_PINCTRL_FUNC_GRP(40, 0),
+ AMD_PINCTRL_FUNC_GRP(40, 1),
+ AMD_PINCTRL_FUNC_GRP(40, 2),
+ AMD_PINCTRL_FUNC_GRP(40, 3),
+ AMD_PINCTRL_FUNC_GRP(41, 0),
+ AMD_PINCTRL_FUNC_GRP(41, 1),
+ AMD_PINCTRL_FUNC_GRP(41, 2),
+ AMD_PINCTRL_FUNC_GRP(41, 3),
+ AMD_PINCTRL_FUNC_GRP(42, 0),
+ AMD_PINCTRL_FUNC_GRP(42, 1),
+ AMD_PINCTRL_FUNC_GRP(42, 2),
+ AMD_PINCTRL_FUNC_GRP(42, 3),
+ AMD_PINCTRL_FUNC_GRP(43, 0),
+ AMD_PINCTRL_FUNC_GRP(43, 1),
+ AMD_PINCTRL_FUNC_GRP(43, 2),
+ AMD_PINCTRL_FUNC_GRP(43, 3),
+ AMD_PINCTRL_FUNC_GRP(44, 0),
+ AMD_PINCTRL_FUNC_GRP(44, 1),
+ AMD_PINCTRL_FUNC_GRP(44, 2),
+ AMD_PINCTRL_FUNC_GRP(44, 3),
+ AMD_PINCTRL_FUNC_GRP(45, 0),
+ AMD_PINCTRL_FUNC_GRP(45, 1),
+ AMD_PINCTRL_FUNC_GRP(45, 2),
+ AMD_PINCTRL_FUNC_GRP(45, 3),
+ AMD_PINCTRL_FUNC_GRP(46, 0),
+ AMD_PINCTRL_FUNC_GRP(46, 1),
+ AMD_PINCTRL_FUNC_GRP(46, 2),
+ AMD_PINCTRL_FUNC_GRP(46, 3),
+ AMD_PINCTRL_FUNC_GRP(47, 0),
+ AMD_PINCTRL_FUNC_GRP(47, 1),
+ AMD_PINCTRL_FUNC_GRP(47, 2),
+ AMD_PINCTRL_FUNC_GRP(47, 3),
+ AMD_PINCTRL_FUNC_GRP(48, 0),
+ AMD_PINCTRL_FUNC_GRP(48, 1),
+ AMD_PINCTRL_FUNC_GRP(48, 2),
+ AMD_PINCTRL_FUNC_GRP(48, 3),
+ AMD_PINCTRL_FUNC_GRP(49, 0),
+ AMD_PINCTRL_FUNC_GRP(49, 1),
+ AMD_PINCTRL_FUNC_GRP(49, 2),
+ AMD_PINCTRL_FUNC_GRP(49, 3),
+ AMD_PINCTRL_FUNC_GRP(50, 0),
+ AMD_PINCTRL_FUNC_GRP(50, 1),
+ AMD_PINCTRL_FUNC_GRP(50, 2),
+ AMD_PINCTRL_FUNC_GRP(50, 3),
+ AMD_PINCTRL_FUNC_GRP(51, 0),
+ AMD_PINCTRL_FUNC_GRP(51, 1),
+ AMD_PINCTRL_FUNC_GRP(51, 2),
+ AMD_PINCTRL_FUNC_GRP(51, 3),
+ AMD_PINCTRL_FUNC_GRP(52, 0),
+ AMD_PINCTRL_FUNC_GRP(52, 1),
+ AMD_PINCTRL_FUNC_GRP(52, 2),
+ AMD_PINCTRL_FUNC_GRP(52, 3),
+ AMD_PINCTRL_FUNC_GRP(53, 0),
+ AMD_PINCTRL_FUNC_GRP(53, 1),
+ AMD_PINCTRL_FUNC_GRP(53, 2),
+ AMD_PINCTRL_FUNC_GRP(53, 3),
+ AMD_PINCTRL_FUNC_GRP(54, 0),
+ AMD_PINCTRL_FUNC_GRP(54, 1),
+ AMD_PINCTRL_FUNC_GRP(54, 2),
+ AMD_PINCTRL_FUNC_GRP(54, 3),
+ AMD_PINCTRL_FUNC_GRP(55, 0),
+ AMD_PINCTRL_FUNC_GRP(55, 1),
+ AMD_PINCTRL_FUNC_GRP(55, 2),
+ AMD_PINCTRL_FUNC_GRP(55, 3),
+ AMD_PINCTRL_FUNC_GRP(56, 0),
+ AMD_PINCTRL_FUNC_GRP(56, 1),
+ AMD_PINCTRL_FUNC_GRP(56, 2),
+ AMD_PINCTRL_FUNC_GRP(56, 3),
+ AMD_PINCTRL_FUNC_GRP(57, 0),
+ AMD_PINCTRL_FUNC_GRP(57, 1),
+ AMD_PINCTRL_FUNC_GRP(57, 2),
+ AMD_PINCTRL_FUNC_GRP(57, 3),
+ AMD_PINCTRL_FUNC_GRP(58, 0),
+ AMD_PINCTRL_FUNC_GRP(58, 1),
+ AMD_PINCTRL_FUNC_GRP(58, 2),
+ AMD_PINCTRL_FUNC_GRP(58, 3),
+ AMD_PINCTRL_FUNC_GRP(59, 0),
+ AMD_PINCTRL_FUNC_GRP(59, 1),
+ AMD_PINCTRL_FUNC_GRP(59, 2),
+ AMD_PINCTRL_FUNC_GRP(59, 3),
+ AMD_PINCTRL_FUNC_GRP(60, 0),
+ AMD_PINCTRL_FUNC_GRP(60, 1),
+ AMD_PINCTRL_FUNC_GRP(60, 2),
+ AMD_PINCTRL_FUNC_GRP(60, 3),
+ AMD_PINCTRL_FUNC_GRP(61, 0),
+ AMD_PINCTRL_FUNC_GRP(61, 1),
+ AMD_PINCTRL_FUNC_GRP(61, 2),
+ AMD_PINCTRL_FUNC_GRP(61, 3),
+ AMD_PINCTRL_FUNC_GRP(62, 0),
+ AMD_PINCTRL_FUNC_GRP(62, 1),
+ AMD_PINCTRL_FUNC_GRP(62, 2),
+ AMD_PINCTRL_FUNC_GRP(62, 3),
+ AMD_PINCTRL_FUNC_GRP(64, 0),
+ AMD_PINCTRL_FUNC_GRP(64, 1),
+ AMD_PINCTRL_FUNC_GRP(64, 2),
+ AMD_PINCTRL_FUNC_GRP(64, 3),
+ AMD_PINCTRL_FUNC_GRP(65, 0),
+ AMD_PINCTRL_FUNC_GRP(65, 1),
+ AMD_PINCTRL_FUNC_GRP(65, 2),
+ AMD_PINCTRL_FUNC_GRP(65, 3),
+ AMD_PINCTRL_FUNC_GRP(66, 0),
+ AMD_PINCTRL_FUNC_GRP(66, 1),
+ AMD_PINCTRL_FUNC_GRP(66, 2),
+ AMD_PINCTRL_FUNC_GRP(66, 3),
+ AMD_PINCTRL_FUNC_GRP(67, 0),
+ AMD_PINCTRL_FUNC_GRP(67, 1),
+ AMD_PINCTRL_FUNC_GRP(67, 2),
+ AMD_PINCTRL_FUNC_GRP(67, 3),
+ AMD_PINCTRL_FUNC_GRP(68, 0),
+ AMD_PINCTRL_FUNC_GRP(68, 1),
+ AMD_PINCTRL_FUNC_GRP(68, 2),
+ AMD_PINCTRL_FUNC_GRP(68, 3),
+ AMD_PINCTRL_FUNC_GRP(69, 0),
+ AMD_PINCTRL_FUNC_GRP(69, 1),
+ AMD_PINCTRL_FUNC_GRP(69, 2),
+ AMD_PINCTRL_FUNC_GRP(69, 3),
+ AMD_PINCTRL_FUNC_GRP(70, 0),
+ AMD_PINCTRL_FUNC_GRP(70, 1),
+ AMD_PINCTRL_FUNC_GRP(70, 2),
+ AMD_PINCTRL_FUNC_GRP(70, 3),
+ AMD_PINCTRL_FUNC_GRP(71, 0),
+ AMD_PINCTRL_FUNC_GRP(71, 1),
+ AMD_PINCTRL_FUNC_GRP(71, 2),
+ AMD_PINCTRL_FUNC_GRP(71, 3),
+ AMD_PINCTRL_FUNC_GRP(72, 0),
+ AMD_PINCTRL_FUNC_GRP(72, 1),
+ AMD_PINCTRL_FUNC_GRP(72, 2),
+ AMD_PINCTRL_FUNC_GRP(72, 3),
+ AMD_PINCTRL_FUNC_GRP(73, 0),
+ AMD_PINCTRL_FUNC_GRP(73, 1),
+ AMD_PINCTRL_FUNC_GRP(73, 2),
+ AMD_PINCTRL_FUNC_GRP(73, 3),
+ AMD_PINCTRL_FUNC_GRP(74, 0),
+ AMD_PINCTRL_FUNC_GRP(74, 1),
+ AMD_PINCTRL_FUNC_GRP(74, 2),
+ AMD_PINCTRL_FUNC_GRP(74, 3),
+ AMD_PINCTRL_FUNC_GRP(75, 0),
+ AMD_PINCTRL_FUNC_GRP(75, 1),
+ AMD_PINCTRL_FUNC_GRP(75, 2),
+ AMD_PINCTRL_FUNC_GRP(75, 3),
+ AMD_PINCTRL_FUNC_GRP(76, 0),
+ AMD_PINCTRL_FUNC_GRP(76, 1),
+ AMD_PINCTRL_FUNC_GRP(76, 2),
+ AMD_PINCTRL_FUNC_GRP(76, 3),
+ AMD_PINCTRL_FUNC_GRP(77, 0),
+ AMD_PINCTRL_FUNC_GRP(77, 1),
+ AMD_PINCTRL_FUNC_GRP(77, 2),
+ AMD_PINCTRL_FUNC_GRP(77, 3),
+ AMD_PINCTRL_FUNC_GRP(78, 0),
+ AMD_PINCTRL_FUNC_GRP(78, 1),
+ AMD_PINCTRL_FUNC_GRP(78, 2),
+ AMD_PINCTRL_FUNC_GRP(78, 3),
+ AMD_PINCTRL_FUNC_GRP(79, 0),
+ AMD_PINCTRL_FUNC_GRP(79, 1),
+ AMD_PINCTRL_FUNC_GRP(79, 2),
+ AMD_PINCTRL_FUNC_GRP(79, 3),
+ AMD_PINCTRL_FUNC_GRP(80, 0),
+ AMD_PINCTRL_FUNC_GRP(80, 1),
+ AMD_PINCTRL_FUNC_GRP(80, 2),
+ AMD_PINCTRL_FUNC_GRP(80, 3),
+ AMD_PINCTRL_FUNC_GRP(81, 0),
+ AMD_PINCTRL_FUNC_GRP(81, 1),
+ AMD_PINCTRL_FUNC_GRP(81, 2),
+ AMD_PINCTRL_FUNC_GRP(81, 3),
+ AMD_PINCTRL_FUNC_GRP(82, 0),
+ AMD_PINCTRL_FUNC_GRP(82, 1),
+ AMD_PINCTRL_FUNC_GRP(82, 2),
+ AMD_PINCTRL_FUNC_GRP(82, 3),
+ AMD_PINCTRL_FUNC_GRP(83, 0),
+ AMD_PINCTRL_FUNC_GRP(83, 1),
+ AMD_PINCTRL_FUNC_GRP(83, 2),
+ AMD_PINCTRL_FUNC_GRP(83, 3),
+ AMD_PINCTRL_FUNC_GRP(84, 0),
+ AMD_PINCTRL_FUNC_GRP(84, 1),
+ AMD_PINCTRL_FUNC_GRP(84, 2),
+ AMD_PINCTRL_FUNC_GRP(84, 3),
+ AMD_PINCTRL_FUNC_GRP(85, 0),
+ AMD_PINCTRL_FUNC_GRP(85, 1),
+ AMD_PINCTRL_FUNC_GRP(85, 2),
+ AMD_PINCTRL_FUNC_GRP(85, 3),
+ AMD_PINCTRL_FUNC_GRP(86, 0),
+ AMD_PINCTRL_FUNC_GRP(86, 1),
+ AMD_PINCTRL_FUNC_GRP(86, 2),
+ AMD_PINCTRL_FUNC_GRP(86, 3),
+ AMD_PINCTRL_FUNC_GRP(87, 0),
+ AMD_PINCTRL_FUNC_GRP(87, 1),
+ AMD_PINCTRL_FUNC_GRP(87, 2),
+ AMD_PINCTRL_FUNC_GRP(87, 3),
+ AMD_PINCTRL_FUNC_GRP(88, 0),
+ AMD_PINCTRL_FUNC_GRP(88, 1),
+ AMD_PINCTRL_FUNC_GRP(88, 2),
+ AMD_PINCTRL_FUNC_GRP(88, 3),
+ AMD_PINCTRL_FUNC_GRP(89, 0),
+ AMD_PINCTRL_FUNC_GRP(89, 1),
+ AMD_PINCTRL_FUNC_GRP(89, 2),
+ AMD_PINCTRL_FUNC_GRP(89, 3),
+ AMD_PINCTRL_FUNC_GRP(90, 0),
+ AMD_PINCTRL_FUNC_GRP(90, 1),
+ AMD_PINCTRL_FUNC_GRP(90, 2),
+ AMD_PINCTRL_FUNC_GRP(90, 3),
+ AMD_PINCTRL_FUNC_GRP(91, 0),
+ AMD_PINCTRL_FUNC_GRP(91, 1),
+ AMD_PINCTRL_FUNC_GRP(91, 2),
+ AMD_PINCTRL_FUNC_GRP(91, 3),
+ AMD_PINCTRL_FUNC_GRP(92, 0),
+ AMD_PINCTRL_FUNC_GRP(92, 1),
+ AMD_PINCTRL_FUNC_GRP(92, 2),
+ AMD_PINCTRL_FUNC_GRP(92, 3),
+ AMD_PINCTRL_FUNC_GRP(93, 0),
+ AMD_PINCTRL_FUNC_GRP(93, 1),
+ AMD_PINCTRL_FUNC_GRP(93, 2),
+ AMD_PINCTRL_FUNC_GRP(93, 3),
+ AMD_PINCTRL_FUNC_GRP(94, 0),
+ AMD_PINCTRL_FUNC_GRP(94, 1),
+ AMD_PINCTRL_FUNC_GRP(94, 2),
+ AMD_PINCTRL_FUNC_GRP(94, 3),
+ AMD_PINCTRL_FUNC_GRP(95, 0),
+ AMD_PINCTRL_FUNC_GRP(95, 1),
+ AMD_PINCTRL_FUNC_GRP(95, 2),
+ AMD_PINCTRL_FUNC_GRP(95, 3),
+ AMD_PINCTRL_FUNC_GRP(96, 0),
+ AMD_PINCTRL_FUNC_GRP(96, 1),
+ AMD_PINCTRL_FUNC_GRP(96, 2),
+ AMD_PINCTRL_FUNC_GRP(96, 3),
+ AMD_PINCTRL_FUNC_GRP(97, 0),
+ AMD_PINCTRL_FUNC_GRP(97, 1),
+ AMD_PINCTRL_FUNC_GRP(97, 2),
+ AMD_PINCTRL_FUNC_GRP(97, 3),
+ AMD_PINCTRL_FUNC_GRP(98, 0),
+ AMD_PINCTRL_FUNC_GRP(98, 1),
+ AMD_PINCTRL_FUNC_GRP(98, 2),
+ AMD_PINCTRL_FUNC_GRP(98, 3),
+ AMD_PINCTRL_FUNC_GRP(99, 0),
+ AMD_PINCTRL_FUNC_GRP(99, 1),
+ AMD_PINCTRL_FUNC_GRP(99, 2),
+ AMD_PINCTRL_FUNC_GRP(99, 3),
+ AMD_PINCTRL_FUNC_GRP(100, 0),
+ AMD_PINCTRL_FUNC_GRP(100, 1),
+ AMD_PINCTRL_FUNC_GRP(100, 2),
+ AMD_PINCTRL_FUNC_GRP(100, 3),
+ AMD_PINCTRL_FUNC_GRP(101, 0),
+ AMD_PINCTRL_FUNC_GRP(101, 1),
+ AMD_PINCTRL_FUNC_GRP(101, 2),
+ AMD_PINCTRL_FUNC_GRP(101, 3),
+ AMD_PINCTRL_FUNC_GRP(102, 0),
+ AMD_PINCTRL_FUNC_GRP(102, 1),
+ AMD_PINCTRL_FUNC_GRP(102, 2),
+ AMD_PINCTRL_FUNC_GRP(102, 3),
+ AMD_PINCTRL_FUNC_GRP(103, 0),
+ AMD_PINCTRL_FUNC_GRP(103, 1),
+ AMD_PINCTRL_FUNC_GRP(103, 2),
+ AMD_PINCTRL_FUNC_GRP(103, 3),
+ AMD_PINCTRL_FUNC_GRP(104, 0),
+ AMD_PINCTRL_FUNC_GRP(104, 1),
+ AMD_PINCTRL_FUNC_GRP(104, 2),
+ AMD_PINCTRL_FUNC_GRP(104, 3),
+ AMD_PINCTRL_FUNC_GRP(105, 0),
+ AMD_PINCTRL_FUNC_GRP(105, 1),
+ AMD_PINCTRL_FUNC_GRP(105, 2),
+ AMD_PINCTRL_FUNC_GRP(105, 3),
+ AMD_PINCTRL_FUNC_GRP(106, 0),
+ AMD_PINCTRL_FUNC_GRP(106, 1),
+ AMD_PINCTRL_FUNC_GRP(106, 2),
+ AMD_PINCTRL_FUNC_GRP(106, 3),
+ AMD_PINCTRL_FUNC_GRP(107, 0),
+ AMD_PINCTRL_FUNC_GRP(107, 1),
+ AMD_PINCTRL_FUNC_GRP(107, 2),
+ AMD_PINCTRL_FUNC_GRP(107, 3),
+ AMD_PINCTRL_FUNC_GRP(108, 0),
+ AMD_PINCTRL_FUNC_GRP(108, 1),
+ AMD_PINCTRL_FUNC_GRP(108, 2),
+ AMD_PINCTRL_FUNC_GRP(108, 3),
+ AMD_PINCTRL_FUNC_GRP(109, 0),
+ AMD_PINCTRL_FUNC_GRP(109, 1),
+ AMD_PINCTRL_FUNC_GRP(109, 2),
+ AMD_PINCTRL_FUNC_GRP(109, 3),
+ AMD_PINCTRL_FUNC_GRP(110, 0),
+ AMD_PINCTRL_FUNC_GRP(110, 1),
+ AMD_PINCTRL_FUNC_GRP(110, 2),
+ AMD_PINCTRL_FUNC_GRP(110, 3),
+ AMD_PINCTRL_FUNC_GRP(111, 0),
+ AMD_PINCTRL_FUNC_GRP(111, 1),
+ AMD_PINCTRL_FUNC_GRP(111, 2),
+ AMD_PINCTRL_FUNC_GRP(111, 3),
+ AMD_PINCTRL_FUNC_GRP(112, 0),
+ AMD_PINCTRL_FUNC_GRP(112, 1),
+ AMD_PINCTRL_FUNC_GRP(112, 2),
+ AMD_PINCTRL_FUNC_GRP(112, 3),
+ AMD_PINCTRL_FUNC_GRP(113, 0),
+ AMD_PINCTRL_FUNC_GRP(113, 1),
+ AMD_PINCTRL_FUNC_GRP(113, 2),
+ AMD_PINCTRL_FUNC_GRP(113, 3),
+ AMD_PINCTRL_FUNC_GRP(114, 0),
+ AMD_PINCTRL_FUNC_GRP(114, 1),
+ AMD_PINCTRL_FUNC_GRP(114, 2),
+ AMD_PINCTRL_FUNC_GRP(114, 3),
+ AMD_PINCTRL_FUNC_GRP(115, 0),
+ AMD_PINCTRL_FUNC_GRP(115, 1),
+ AMD_PINCTRL_FUNC_GRP(115, 2),
+ AMD_PINCTRL_FUNC_GRP(115, 3),
+ AMD_PINCTRL_FUNC_GRP(116, 0),
+ AMD_PINCTRL_FUNC_GRP(116, 1),
+ AMD_PINCTRL_FUNC_GRP(116, 2),
+ AMD_PINCTRL_FUNC_GRP(116, 3),
+ AMD_PINCTRL_FUNC_GRP(117, 0),
+ AMD_PINCTRL_FUNC_GRP(117, 1),
+ AMD_PINCTRL_FUNC_GRP(117, 2),
+ AMD_PINCTRL_FUNC_GRP(117, 3),
+ AMD_PINCTRL_FUNC_GRP(118, 0),
+ AMD_PINCTRL_FUNC_GRP(118, 1),
+ AMD_PINCTRL_FUNC_GRP(118, 2),
+ AMD_PINCTRL_FUNC_GRP(118, 3),
+ AMD_PINCTRL_FUNC_GRP(119, 0),
+ AMD_PINCTRL_FUNC_GRP(119, 1),
+ AMD_PINCTRL_FUNC_GRP(119, 2),
+ AMD_PINCTRL_FUNC_GRP(119, 3),
+ AMD_PINCTRL_FUNC_GRP(120, 0),
+ AMD_PINCTRL_FUNC_GRP(120, 1),
+ AMD_PINCTRL_FUNC_GRP(120, 2),
+ AMD_PINCTRL_FUNC_GRP(120, 3),
+ AMD_PINCTRL_FUNC_GRP(121, 0),
+ AMD_PINCTRL_FUNC_GRP(121, 1),
+ AMD_PINCTRL_FUNC_GRP(121, 2),
+ AMD_PINCTRL_FUNC_GRP(121, 3),
+ AMD_PINCTRL_FUNC_GRP(122, 0),
+ AMD_PINCTRL_FUNC_GRP(122, 1),
+ AMD_PINCTRL_FUNC_GRP(122, 2),
+ AMD_PINCTRL_FUNC_GRP(122, 3),
+ AMD_PINCTRL_FUNC_GRP(123, 0),
+ AMD_PINCTRL_FUNC_GRP(123, 1),
+ AMD_PINCTRL_FUNC_GRP(123, 2),
+ AMD_PINCTRL_FUNC_GRP(123, 3),
+ AMD_PINCTRL_FUNC_GRP(124, 0),
+ AMD_PINCTRL_FUNC_GRP(124, 1),
+ AMD_PINCTRL_FUNC_GRP(124, 2),
+ AMD_PINCTRL_FUNC_GRP(124, 3),
+ AMD_PINCTRL_FUNC_GRP(125, 0),
+ AMD_PINCTRL_FUNC_GRP(125, 1),
+ AMD_PINCTRL_FUNC_GRP(125, 2),
+ AMD_PINCTRL_FUNC_GRP(125, 3),
+ AMD_PINCTRL_FUNC_GRP(126, 0),
+ AMD_PINCTRL_FUNC_GRP(126, 1),
+ AMD_PINCTRL_FUNC_GRP(126, 2),
+ AMD_PINCTRL_FUNC_GRP(126, 3),
+ AMD_PINCTRL_FUNC_GRP(127, 0),
+ AMD_PINCTRL_FUNC_GRP(127, 1),
+ AMD_PINCTRL_FUNC_GRP(127, 2),
+ AMD_PINCTRL_FUNC_GRP(127, 3),
+ AMD_PINCTRL_FUNC_GRP(128, 0),
+ AMD_PINCTRL_FUNC_GRP(128, 1),
+ AMD_PINCTRL_FUNC_GRP(128, 2),
+ AMD_PINCTRL_FUNC_GRP(128, 3),
+ AMD_PINCTRL_FUNC_GRP(129, 0),
+ AMD_PINCTRL_FUNC_GRP(129, 1),
+ AMD_PINCTRL_FUNC_GRP(129, 2),
+ AMD_PINCTRL_FUNC_GRP(129, 3),
+ AMD_PINCTRL_FUNC_GRP(130, 0),
+ AMD_PINCTRL_FUNC_GRP(130, 1),
+ AMD_PINCTRL_FUNC_GRP(130, 2),
+ AMD_PINCTRL_FUNC_GRP(130, 3),
+ AMD_PINCTRL_FUNC_GRP(131, 0),
+ AMD_PINCTRL_FUNC_GRP(131, 1),
+ AMD_PINCTRL_FUNC_GRP(131, 2),
+ AMD_PINCTRL_FUNC_GRP(131, 3),
+ AMD_PINCTRL_FUNC_GRP(132, 0),
+ AMD_PINCTRL_FUNC_GRP(132, 1),
+ AMD_PINCTRL_FUNC_GRP(132, 2),
+ AMD_PINCTRL_FUNC_GRP(132, 3),
+ AMD_PINCTRL_FUNC_GRP(133, 0),
+ AMD_PINCTRL_FUNC_GRP(133, 1),
+ AMD_PINCTRL_FUNC_GRP(133, 2),
+ AMD_PINCTRL_FUNC_GRP(133, 3),
+ AMD_PINCTRL_FUNC_GRP(134, 0),
+ AMD_PINCTRL_FUNC_GRP(134, 1),
+ AMD_PINCTRL_FUNC_GRP(134, 2),
+ AMD_PINCTRL_FUNC_GRP(134, 3),
+ AMD_PINCTRL_FUNC_GRP(135, 0),
+ AMD_PINCTRL_FUNC_GRP(135, 1),
+ AMD_PINCTRL_FUNC_GRP(135, 2),
+ AMD_PINCTRL_FUNC_GRP(135, 3),
+ AMD_PINCTRL_FUNC_GRP(136, 0),
+ AMD_PINCTRL_FUNC_GRP(136, 1),
+ AMD_PINCTRL_FUNC_GRP(136, 2),
+ AMD_PINCTRL_FUNC_GRP(136, 3),
+ AMD_PINCTRL_FUNC_GRP(137, 0),
+ AMD_PINCTRL_FUNC_GRP(137, 1),
+ AMD_PINCTRL_FUNC_GRP(137, 2),
+ AMD_PINCTRL_FUNC_GRP(137, 3),
+ AMD_PINCTRL_FUNC_GRP(138, 0),
+ AMD_PINCTRL_FUNC_GRP(138, 1),
+ AMD_PINCTRL_FUNC_GRP(138, 2),
+ AMD_PINCTRL_FUNC_GRP(138, 3),
+ AMD_PINCTRL_FUNC_GRP(139, 0),
+ AMD_PINCTRL_FUNC_GRP(139, 1),
+ AMD_PINCTRL_FUNC_GRP(139, 2),
+ AMD_PINCTRL_FUNC_GRP(139, 3),
+ AMD_PINCTRL_FUNC_GRP(140, 0),
+ AMD_PINCTRL_FUNC_GRP(140, 1),
+ AMD_PINCTRL_FUNC_GRP(140, 2),
+ AMD_PINCTRL_FUNC_GRP(140, 3),
+ AMD_PINCTRL_FUNC_GRP(141, 0),
+ AMD_PINCTRL_FUNC_GRP(141, 1),
+ AMD_PINCTRL_FUNC_GRP(141, 2),
+ AMD_PINCTRL_FUNC_GRP(141, 3),
+ AMD_PINCTRL_FUNC_GRP(142, 0),
+ AMD_PINCTRL_FUNC_GRP(142, 1),
+ AMD_PINCTRL_FUNC_GRP(142, 2),
+ AMD_PINCTRL_FUNC_GRP(142, 3),
+ AMD_PINCTRL_FUNC_GRP(143, 0),
+ AMD_PINCTRL_FUNC_GRP(143, 1),
+ AMD_PINCTRL_FUNC_GRP(143, 2),
+ AMD_PINCTRL_FUNC_GRP(143, 3),
+ AMD_PINCTRL_FUNC_GRP(144, 0),
+ AMD_PINCTRL_FUNC_GRP(144, 1),
+ AMD_PINCTRL_FUNC_GRP(144, 2),
+ AMD_PINCTRL_FUNC_GRP(144, 3),
+
+ PINCTRL_PINGROUP("i2c0", AMD_PINS(145, 146), 2),
+ PINCTRL_PINGROUP("i2c1", AMD_PINS(147, 148), 2),
+ PINCTRL_PINGROUP("i2c2", AMD_PINS(113, 114), 2),
+ PINCTRL_PINGROUP("i2c3", AMD_PINS(19, 20), 2),
+ PINCTRL_PINGROUP("uart0", AMD_PINS(135, 136, 137, 138, 139), 5),
+ PINCTRL_PINGROUP("uart1", AMD_PINS(140, 141, 142, 143, 144), 5),
+};
-static const unsigned uart0_pins[] = {135, 136, 137, 138, 139};
-static const unsigned uart1_pins[] = {140, 141, 142, 143, 144};
+#define AMD_PMUX_FUNC(_number) { \
+ .name = "iomux_gpio_"#_number, \
+ .groups = { \
+ "IMX_F0_GPIO"#_number, "IMX_F1_GPIO"#_number, \
+ "IMX_F2_GPIO"#_number, "IMX_F3_GPIO"#_number, \
+ }, \
+ .index = _number, \
+ .ngroups = NSELECTS, \
+}
-static const struct amd_pingroup kerncz_groups[] = {
- {
- .name = "i2c0",
- .pins = i2c0_pins,
- .npins = 2,
- },
- {
- .name = "i2c1",
- .pins = i2c1_pins,
- .npins = 2,
- },
- {
- .name = "i2c2",
- .pins = i2c2_pins,
- .npins = 2,
- },
- {
- .name = "i2c3",
- .pins = i2c3_pins,
- .npins = 2,
- },
- {
- .name = "uart0",
- .pins = uart0_pins,
- .npins = 5,
- },
- {
- .name = "uart1",
- .pins = uart1_pins,
- .npins = 5,
- },
+static const struct amd_function pmx_functions[] = {
+ AMD_PMUX_FUNC(0),
+ AMD_PMUX_FUNC(1),
+ AMD_PMUX_FUNC(2),
+ AMD_PMUX_FUNC(3),
+ AMD_PMUX_FUNC(4),
+ AMD_PMUX_FUNC(5),
+ AMD_PMUX_FUNC(6),
+ AMD_PMUX_FUNC(7),
+ AMD_PMUX_FUNC(8),
+ AMD_PMUX_FUNC(9),
+ AMD_PMUX_FUNC(10),
+ AMD_PMUX_FUNC(11),
+ AMD_PMUX_FUNC(12),
+ AMD_PMUX_FUNC(13),
+ AMD_PMUX_FUNC(14),
+ AMD_PMUX_FUNC(15),
+ AMD_PMUX_FUNC(16),
+ AMD_PMUX_FUNC(17),
+ AMD_PMUX_FUNC(18),
+ AMD_PMUX_FUNC(19),
+ AMD_PMUX_FUNC(20),
+ AMD_PMUX_FUNC(21),
+ AMD_PMUX_FUNC(22),
+ AMD_PMUX_FUNC(23),
+ AMD_PMUX_FUNC(24),
+ AMD_PMUX_FUNC(25),
+ AMD_PMUX_FUNC(26),
+ AMD_PMUX_FUNC(27),
+ AMD_PMUX_FUNC(28),
+ AMD_PMUX_FUNC(29),
+ AMD_PMUX_FUNC(30),
+ AMD_PMUX_FUNC(31),
+ AMD_PMUX_FUNC(32),
+ AMD_PMUX_FUNC(33),
+ AMD_PMUX_FUNC(34),
+ AMD_PMUX_FUNC(35),
+ AMD_PMUX_FUNC(36),
+ AMD_PMUX_FUNC(37),
+ AMD_PMUX_FUNC(38),
+ AMD_PMUX_FUNC(39),
+ AMD_PMUX_FUNC(40),
+ AMD_PMUX_FUNC(41),
+ AMD_PMUX_FUNC(42),
+ AMD_PMUX_FUNC(43),
+ AMD_PMUX_FUNC(44),
+ AMD_PMUX_FUNC(45),
+ AMD_PMUX_FUNC(46),
+ AMD_PMUX_FUNC(47),
+ AMD_PMUX_FUNC(48),
+ AMD_PMUX_FUNC(49),
+ AMD_PMUX_FUNC(50),
+ AMD_PMUX_FUNC(51),
+ AMD_PMUX_FUNC(52),
+ AMD_PMUX_FUNC(53),
+ AMD_PMUX_FUNC(54),
+ AMD_PMUX_FUNC(55),
+ AMD_PMUX_FUNC(56),
+ AMD_PMUX_FUNC(57),
+ AMD_PMUX_FUNC(58),
+ AMD_PMUX_FUNC(59),
+ AMD_PMUX_FUNC(60),
+ AMD_PMUX_FUNC(61),
+ AMD_PMUX_FUNC(62),
+ AMD_PMUX_FUNC(64),
+ AMD_PMUX_FUNC(65),
+ AMD_PMUX_FUNC(66),
+ AMD_PMUX_FUNC(67),
+ AMD_PMUX_FUNC(68),
+ AMD_PMUX_FUNC(69),
+ AMD_PMUX_FUNC(70),
+ AMD_PMUX_FUNC(71),
+ AMD_PMUX_FUNC(72),
+ AMD_PMUX_FUNC(73),
+ AMD_PMUX_FUNC(74),
+ AMD_PMUX_FUNC(75),
+ AMD_PMUX_FUNC(76),
+ AMD_PMUX_FUNC(77),
+ AMD_PMUX_FUNC(78),
+ AMD_PMUX_FUNC(79),
+ AMD_PMUX_FUNC(80),
+ AMD_PMUX_FUNC(81),
+ AMD_PMUX_FUNC(82),
+ AMD_PMUX_FUNC(83),
+ AMD_PMUX_FUNC(84),
+ AMD_PMUX_FUNC(85),
+ AMD_PMUX_FUNC(86),
+ AMD_PMUX_FUNC(87),
+ AMD_PMUX_FUNC(88),
+ AMD_PMUX_FUNC(89),
+ AMD_PMUX_FUNC(90),
+ AMD_PMUX_FUNC(91),
+ AMD_PMUX_FUNC(92),
+ AMD_PMUX_FUNC(93),
+ AMD_PMUX_FUNC(94),
+ AMD_PMUX_FUNC(95),
+ AMD_PMUX_FUNC(96),
+ AMD_PMUX_FUNC(97),
+ AMD_PMUX_FUNC(98),
+ AMD_PMUX_FUNC(99),
+ AMD_PMUX_FUNC(100),
+ AMD_PMUX_FUNC(101),
+ AMD_PMUX_FUNC(102),
+ AMD_PMUX_FUNC(103),
+ AMD_PMUX_FUNC(104),
+ AMD_PMUX_FUNC(105),
+ AMD_PMUX_FUNC(106),
+ AMD_PMUX_FUNC(107),
+ AMD_PMUX_FUNC(108),
+ AMD_PMUX_FUNC(109),
+ AMD_PMUX_FUNC(110),
+ AMD_PMUX_FUNC(111),
+ AMD_PMUX_FUNC(112),
+ AMD_PMUX_FUNC(113),
+ AMD_PMUX_FUNC(114),
+ AMD_PMUX_FUNC(115),
+ AMD_PMUX_FUNC(116),
+ AMD_PMUX_FUNC(117),
+ AMD_PMUX_FUNC(118),
+ AMD_PMUX_FUNC(119),
+ AMD_PMUX_FUNC(120),
+ AMD_PMUX_FUNC(121),
+ AMD_PMUX_FUNC(122),
+ AMD_PMUX_FUNC(123),
+ AMD_PMUX_FUNC(124),
+ AMD_PMUX_FUNC(125),
+ AMD_PMUX_FUNC(126),
+ AMD_PMUX_FUNC(127),
+ AMD_PMUX_FUNC(128),
+ AMD_PMUX_FUNC(129),
+ AMD_PMUX_FUNC(130),
+ AMD_PMUX_FUNC(131),
+ AMD_PMUX_FUNC(132),
+ AMD_PMUX_FUNC(133),
+ AMD_PMUX_FUNC(134),
+ AMD_PMUX_FUNC(135),
+ AMD_PMUX_FUNC(136),
+ AMD_PMUX_FUNC(137),
+ AMD_PMUX_FUNC(138),
+ AMD_PMUX_FUNC(139),
+ AMD_PMUX_FUNC(140),
+ AMD_PMUX_FUNC(141),
+ AMD_PMUX_FUNC(142),
+ AMD_PMUX_FUNC(143),
+ AMD_PMUX_FUNC(144),
};
#endif
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 517f2a6330ad..82b921fd630d 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -237,8 +237,6 @@ static void atmel_gpio_irq_unmask(struct irq_data *d)
BIT(pin->line));
}
-#ifdef CONFIG_PM_SLEEP
-
static int atmel_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct atmel_pioctrl *atmel_pioctrl = irq_data_get_irq_chip_data(d);
@@ -255,9 +253,6 @@ static int atmel_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
-#else
-#define atmel_gpio_irq_set_wake NULL
-#endif /* CONFIG_PM_SLEEP */
static struct irq_chip atmel_gpio_irq_chip = {
.name = "GPIO",
@@ -265,7 +260,7 @@ static struct irq_chip atmel_gpio_irq_chip = {
.irq_mask = atmel_gpio_irq_mask,
.irq_unmask = atmel_gpio_irq_unmask,
.irq_set_type = atmel_gpio_irq_set_type,
- .irq_set_wake = atmel_gpio_irq_set_wake,
+ .irq_set_wake = pm_sleep_ptr(atmel_gpio_irq_set_wake),
};
static int atmel_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index d91a010e65f5..5634fa063ebf 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1615,8 +1615,6 @@ static void gpio_irq_ack(struct irq_data *d)
/* the interrupt is already cleared before by reading ISR */
}
-#ifdef CONFIG_PM
-
static u32 wakeups[MAX_GPIO_BANKS];
static u32 backups[MAX_GPIO_BANKS];
@@ -1683,10 +1681,6 @@ void at91_pinctrl_gpio_resume(void)
}
}
-#else
-#define gpio_irq_set_wake NULL
-#endif /* CONFIG_PM */
-
static void gpio_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -1741,14 +1735,14 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
gpio_irqchip->irq_disable = gpio_irq_mask;
gpio_irqchip->irq_mask = gpio_irq_mask;
gpio_irqchip->irq_unmask = gpio_irq_unmask;
- gpio_irqchip->irq_set_wake = gpio_irq_set_wake;
+ gpio_irqchip->irq_set_wake = pm_ptr(gpio_irq_set_wake);
gpio_irqchip->irq_set_type = at91_gpio->ops->irq_type;
/* Disable irqs of this PIO controller */
writel_relaxed(~0, at91_gpio->regbase + PIO_IDR);
/*
- * Let the generic code handle this edge IRQ, the the chained
+ * Let the generic code handle this edge IRQ, the chained
* handler will perform the actual work of handling the parent
* interrupt.
*/
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
index 207cbae3a7bf..7ab20ac15391 100644
--- a/drivers/pinctrl/pinctrl-axp209.c
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -73,7 +73,7 @@ static const struct pinctrl_pin_desc axp209_pins[] = {
PINCTRL_PIN(2, "GPIO2"),
};
-static const struct pinctrl_pin_desc axp813_pins[] = {
+static const struct pinctrl_pin_desc axp22x_pins[] = {
PINCTRL_PIN(0, "GPIO0"),
PINCTRL_PIN(1, "GPIO1"),
};
@@ -87,9 +87,16 @@ static const struct axp20x_pctrl_desc axp20x_data = {
.adc_mux = AXP20X_MUX_ADC,
};
+static const struct axp20x_pctrl_desc axp22x_data = {
+ .pins = axp22x_pins,
+ .npins = ARRAY_SIZE(axp22x_pins),
+ .ldo_mask = BIT(0) | BIT(1),
+ .gpio_status_offset = 0,
+};
+
static const struct axp20x_pctrl_desc axp813_data = {
- .pins = axp813_pins,
- .npins = ARRAY_SIZE(axp813_pins),
+ .pins = axp22x_pins,
+ .npins = ARRAY_SIZE(axp22x_pins),
.ldo_mask = BIT(0) | BIT(1),
.adc_mask = BIT(0),
.gpio_status_offset = 0,
@@ -388,6 +395,7 @@ static int axp20x_build_funcs_groups(struct platform_device *pdev)
static const struct of_device_id axp20x_pctl_match[] = {
{ .compatible = "x-powers,axp209-gpio", .data = &axp20x_data, },
+ { .compatible = "x-powers,axp221-gpio", .data = &axp22x_data, },
{ .compatible = "x-powers,axp813-gpio", .data = &axp813_data, },
{ }
};
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 1ca11616db74..3a9ee9c8af11 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -21,6 +21,7 @@
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include "core.h"
@@ -135,7 +136,6 @@ struct ingenic_pinctrl {
struct ingenic_gpio_chip {
struct ingenic_pinctrl *jzpc;
struct gpio_chip gc;
- struct irq_chip irq_chip;
unsigned int irq, reg_base;
};
@@ -3393,7 +3393,7 @@ static void ingenic_gpio_irq_mask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, true);
@@ -3405,7 +3405,7 @@ static void ingenic_gpio_irq_unmask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, false);
@@ -3417,7 +3417,9 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ gpiochip_enable_irq(gc, irq);
if (is_soc_or_above(jzgc->jzpc, ID_JZ4770))
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
@@ -3433,7 +3435,7 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
ingenic_gpio_irq_mask(irqd);
@@ -3443,13 +3445,15 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIER, irq, false);
+
+ gpiochip_disable_irq(gc, irq);
}
static void ingenic_gpio_irq_ack(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
bool high;
if ((irqd_get_trigger_type(irqd) == IRQ_TYPE_EDGE_BOTH) &&
@@ -3477,6 +3481,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
@@ -3498,12 +3503,12 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
* best we can do is to set up a single-edge interrupt and then
* switch to the opposing edge when ACKing the interrupt.
*/
- bool high = ingenic_gpio_get_value(jzgc, irqd->hwirq);
+ bool high = ingenic_gpio_get_value(jzgc, irq);
type = high ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_LEVEL_HIGH;
}
- irq_set_type(jzgc, irqd->hwirq, type);
+ irq_set_type(jzgc, irq, type);
return 0;
}
@@ -3668,22 +3673,45 @@ static const struct pinctrl_ops ingenic_pctlops = {
static int ingenic_gpio_irq_request(struct irq_data *data)
{
struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t irq = irqd_to_hwirq(data);
int ret;
- ret = ingenic_gpio_direction_input(gpio_chip, data->hwirq);
+ ret = ingenic_gpio_direction_input(gpio_chip, irq);
if (ret)
return ret;
- return gpiochip_reqres_irq(gpio_chip, data->hwirq);
+ return gpiochip_reqres_irq(gpio_chip, irq);
}
static void ingenic_gpio_irq_release(struct irq_data *data)
{
struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t irq = irqd_to_hwirq(data);
+
+ return gpiochip_relres_irq(gpio_chip, irq);
+}
+
+static void ingenic_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
- return gpiochip_relres_irq(gpio_chip, data->hwirq);
+ seq_printf(p, "%s", gpio_chip->label);
}
+static const struct irq_chip ingenic_gpio_irqchip = {
+ .irq_enable = ingenic_gpio_irq_enable,
+ .irq_disable = ingenic_gpio_irq_disable,
+ .irq_unmask = ingenic_gpio_irq_unmask,
+ .irq_mask = ingenic_gpio_irq_mask,
+ .irq_ack = ingenic_gpio_irq_ack,
+ .irq_set_type = ingenic_gpio_irq_set_type,
+ .irq_set_wake = ingenic_gpio_irq_set_wake,
+ .irq_request_resources = ingenic_gpio_irq_request,
+ .irq_release_resources = ingenic_gpio_irq_release,
+ .irq_print_chip = ingenic_gpio_irq_print_chip,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+};
+
static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
int pin, int func)
{
@@ -4172,20 +4200,8 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
if (!jzgc->irq)
return -EINVAL;
- jzgc->irq_chip.name = jzgc->gc.label;
- jzgc->irq_chip.irq_enable = ingenic_gpio_irq_enable;
- jzgc->irq_chip.irq_disable = ingenic_gpio_irq_disable;
- jzgc->irq_chip.irq_unmask = ingenic_gpio_irq_unmask;
- jzgc->irq_chip.irq_mask = ingenic_gpio_irq_mask;
- jzgc->irq_chip.irq_ack = ingenic_gpio_irq_ack;
- jzgc->irq_chip.irq_set_type = ingenic_gpio_irq_set_type;
- jzgc->irq_chip.irq_set_wake = ingenic_gpio_irq_set_wake;
- jzgc->irq_chip.irq_request_resources = ingenic_gpio_irq_request;
- jzgc->irq_chip.irq_release_resources = ingenic_gpio_irq_release;
- jzgc->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
-
girq = &jzgc->gc.irq;
- girq->chip = &jzgc->irq_chip;
+ gpio_irq_chip_set_chip(girq, &ingenic_gpio_irqchip);
girq->parent_handler = ingenic_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 771dd1f4fbe0..c7df8c5fe585 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -331,6 +331,7 @@ struct ocelot_pinctrl {
const struct ocelot_pincfg_data *pincfg_data;
struct ocelot_pmx_func func[FUNC_MAX];
u8 stride;
+ struct workqueue_struct *wq;
};
struct ocelot_match_data {
@@ -338,6 +339,11 @@ struct ocelot_match_data {
struct ocelot_pincfg_data pincfg_data;
};
+struct ocelot_irq_work {
+ struct work_struct irq_work;
+ struct irq_desc *irq_desc;
+};
+
#define LUTON_P(p, f0, f1) \
static struct ocelot_pin_caps luton_pin_##p = { \
.pin = p, \
@@ -1813,6 +1819,75 @@ static void ocelot_irq_mask(struct irq_data *data)
gpiochip_disable_irq(chip, gpio);
}
+static void ocelot_irq_work(struct work_struct *work)
+{
+ struct ocelot_irq_work *w = container_of(work, struct ocelot_irq_work, irq_work);
+ struct irq_chip *parent_chip = irq_desc_get_chip(w->irq_desc);
+ struct gpio_chip *chip = irq_desc_get_chip_data(w->irq_desc);
+ struct irq_data *data = irq_desc_get_irq_data(w->irq_desc);
+ unsigned int gpio = irqd_to_hwirq(data);
+
+ local_irq_disable();
+ chained_irq_enter(parent_chip, w->irq_desc);
+ generic_handle_domain_irq(chip->irq.domain, gpio);
+ chained_irq_exit(parent_chip, w->irq_desc);
+ local_irq_enable();
+
+ kfree(w);
+}
+
+static void ocelot_irq_unmask_level(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+ struct irq_desc *desc = irq_data_to_desc(data);
+ unsigned int gpio = irqd_to_hwirq(data);
+ unsigned int bit = BIT(gpio % 32);
+ bool ack = false, active = false;
+ u8 trigger_level;
+ int val;
+
+ trigger_level = irqd_get_trigger_type(data);
+
+ /* Check if the interrupt line is still active. */
+ regmap_read(info->map, REG(OCELOT_GPIO_IN, info, gpio), &val);
+ if ((!(val & bit) && trigger_level == IRQ_TYPE_LEVEL_LOW) ||
+ (val & bit && trigger_level == IRQ_TYPE_LEVEL_HIGH))
+ active = true;
+
+ /*
+ * Check if the interrupt controller has seen any changes in the
+ * interrupt line.
+ */
+ regmap_read(info->map, REG(OCELOT_GPIO_INTR, info, gpio), &val);
+ if (val & bit)
+ ack = true;
+
+ /* Enable the interrupt now */
+ gpiochip_enable_irq(chip, gpio);
+ regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio),
+ bit, bit);
+
+ /*
+ * In case the interrupt line is still active and the interrupt
+ * controller has not seen any changes in the interrupt line, then it
+ * means that there happen another interrupt while the line was active.
+ * So we missed that one, so we need to kick the interrupt again
+ * handler.
+ */
+ if (active && !ack) {
+ struct ocelot_irq_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ work->irq_desc = desc;
+ INIT_WORK(&work->irq_work, ocelot_irq_work);
+ queue_work(info->wq, &work->irq_work);
+ }
+}
+
static void ocelot_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
@@ -1836,13 +1911,12 @@ static void ocelot_irq_ack(struct irq_data *data)
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type);
-static struct irq_chip ocelot_eoi_irqchip = {
+static struct irq_chip ocelot_level_irqchip = {
.name = "gpio",
.irq_mask = ocelot_irq_mask,
- .irq_eoi = ocelot_irq_ack,
- .irq_unmask = ocelot_irq_unmask,
- .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
- IRQCHIP_IMMUTABLE,
+ .irq_ack = ocelot_irq_ack,
+ .irq_unmask = ocelot_irq_unmask_level,
+ .flags = IRQCHIP_IMMUTABLE,
.irq_set_type = ocelot_irq_set_type,
GPIOCHIP_IRQ_RESOURCE_HELPERS
};
@@ -1859,14 +1933,9 @@ static struct irq_chip ocelot_irqchip = {
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type)
{
- type &= IRQ_TYPE_SENSE_MASK;
-
- if (!(type & (IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_HIGH)))
- return -EINVAL;
-
- if (type & IRQ_TYPE_LEVEL_HIGH)
- irq_set_chip_handler_name_locked(data, &ocelot_eoi_irqchip,
- handle_fasteoi_irq, NULL);
+ if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ irq_set_chip_handler_name_locked(data, &ocelot_level_irqchip,
+ handle_level_irq, NULL);
if (type & IRQ_TYPE_EDGE_BOTH)
irq_set_chip_handler_name_locked(data, &ocelot_irqchip,
handle_edge_irq, NULL);
@@ -1944,6 +2013,7 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = {
{ .compatible = "microchip,lan966x-pinctrl", .data = &lan966x_desc },
{},
};
+MODULE_DEVICE_TABLE(of, ocelot_pinctrl_of_match);
static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev,
const struct ocelot_pinctrl *info)
@@ -1995,6 +2065,10 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
if (!info->desc)
return -ENOMEM;
+ info->wq = alloc_ordered_workqueue("ocelot_ordered", 0);
+ if (!info->wq)
+ return -ENOMEM;
+
info->pincfg_data = &data->pincfg_data;
reset = devm_reset_control_get_optional_shared(dev, "switch");
@@ -2017,7 +2091,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
dev_err(dev, "Failed to create regmap\n");
return PTR_ERR(info->map);
}
- dev_set_drvdata(dev, info->map);
+ dev_set_drvdata(dev, info);
info->dev = dev;
/* Pinconf registers */
@@ -2042,6 +2116,15 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+static int ocelot_pinctrl_remove(struct platform_device *pdev)
+{
+ struct ocelot_pinctrl *info = platform_get_drvdata(pdev);
+
+ destroy_workqueue(info->wq);
+
+ return 0;
+}
+
static struct platform_driver ocelot_pinctrl_driver = {
.driver = {
.name = "pinctrl-ocelot",
@@ -2049,5 +2132,7 @@ static struct platform_driver ocelot_pinctrl_driver = {
.suppress_bind_attrs = true,
},
.probe = ocelot_pinctrl_probe,
+ .remove = ocelot_pinctrl_remove,
};
-builtin_platform_driver(ocelot_pinctrl_driver);
+module_platform_driver(ocelot_pinctrl_driver);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/pinctrl-starfive.c
index 2a86c1035cc8..3eb40e230d98 100644
--- a/drivers/pinctrl/pinctrl-starfive.c
+++ b/drivers/pinctrl/pinctrl-starfive.c
@@ -207,6 +207,7 @@ struct starfive_pinctrl {
void __iomem *base;
void __iomem *padctl;
struct pinctrl_dev *pctl;
+ struct mutex mutex; /* serialize adding groups and functions */
};
static inline unsigned int starfive_pin_to_gpio(const struct starfive_pinctrl *sfp,
@@ -522,6 +523,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
nmaps = 0;
ngroups = 0;
+ mutex_lock(&sfp->mutex);
for_each_child_of_node(np, child) {
int npins;
int i;
@@ -615,12 +617,14 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
*maps = map;
*num_maps = nmaps;
+ mutex_unlock(&sfp->mutex);
return 0;
put_child:
of_node_put(child);
free_map:
pinctrl_utils_free_map(pctldev, map, nmaps);
+ mutex_unlock(&sfp->mutex);
return ret;
}
@@ -1267,6 +1271,7 @@ static int starfive_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sfp);
sfp->gc.parent = dev;
raw_spin_lock_init(&sfp->lock);
+ mutex_init(&sfp->mutex);
ret = devm_pinctrl_register_and_init(dev, &starfive_desc, sfp, &sfp->pctl);
if (ret)
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
index e14012209992..7d2fbf8a02cd 100644
--- a/drivers/pinctrl/pinctrl-zynqmp.c
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -163,6 +163,8 @@ static const char *zynqmp_pmux_get_function_name(struct pinctrl_dev *pctldev,
* @num_groups: Number of function groups.
*
* Get function's group count and group names.
+ *
+ * Return: 0
*/
static int zynqmp_pmux_get_function_groups(struct pinctrl_dev *pctldev,
unsigned int selector,
@@ -410,6 +412,10 @@ static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ param = PM_PINCTRL_CONFIG_TRI_STATE;
+ arg = PM_PINCTRL_TRI_STATE_ENABLE;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
case PIN_CONFIG_MODE_LOW_POWER:
/*
* These cases are mentioned in dts but configurable
@@ -418,6 +424,11 @@ static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
*/
ret = 0;
break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ param = PM_PINCTRL_CONFIG_TRI_STATE;
+ arg = PM_PINCTRL_TRI_STATE_DISABLE;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
default:
dev_warn(pctldev->dev,
"unsupported configuration parameter '%u'\n",
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 3daeb9772391..f415c13caae0 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -113,6 +113,14 @@ config PINCTRL_MSM8X74
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm 8974 platform.
+config PINCTRL_MSM8909
+ tristate "Qualcomm 8909 pin controller driver"
+ depends on OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found on the Qualcomm MSM8909 platform.
+
config PINCTRL_MSM8916
tristate "Qualcomm 8916 pin controller driver"
depends on OF
@@ -320,6 +328,15 @@ config PINCTRL_SM6350
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM6350 platform.
+config PINCTRL_SM6375
+ tristate "Qualcomm Technologies Inc SM6375 pin controller driver"
+ depends on GPIOLIB && OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM6375 platform.
+
config PINCTRL_SDX65
tristate "Qualcomm Technologies Inc SDX65 pin controller driver"
depends on GPIOLIB && OF
@@ -367,7 +384,7 @@ config PINCTRL_SM8350
config PINCTRL_SM8450
tristate "Qualcomm Technologies Inc SM8450 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 4f0ee7597f81..fbd64853a24d 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_PINCTRL_MSM8226) += pinctrl-msm8226.o
obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
+obj-$(CONFIG_PINCTRL_MSM8909) += pinctrl-msm8909.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
obj-$(CONFIG_PINCTRL_MSM8953) += pinctrl-msm8953.o
obj-$(CONFIG_PINCTRL_MSM8976) += pinctrl-msm8976.o
@@ -37,6 +38,7 @@ obj-$(CONFIG_PINCTRL_SDX55) += pinctrl-sdx55.o
obj-$(CONFIG_PINCTRL_SM6115) += pinctrl-sm6115.o
obj-$(CONFIG_PINCTRL_SM6125) += pinctrl-sm6125.o
obj-$(CONFIG_PINCTRL_SM6350) += pinctrl-sm6350.o
+obj-$(CONFIG_PINCTRL_SM6375) += pinctrl-sm6375.o
obj-$(CONFIG_PINCTRL_SDX65) += pinctrl-sdx65.o
obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o
obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 74810ec4df44..e97ce45b6d53 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -401,7 +401,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(pctrl->slew_base),
"Slew resource not provided\n");
- if (data->is_clk_optional)
+ if (of_property_read_bool(dev->of_node, "qcom,adsp-bypass-mode"))
ret = devm_clk_bulk_get_optional(dev, MAX_LPI_NUM_CLKS, pctrl->clks);
else
ret = devm_clk_bulk_get(dev, MAX_LPI_NUM_CLKS, pctrl->clks);
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
index 759d5d8da562..afbac2a6c82c 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
@@ -77,7 +77,6 @@ struct lpi_pinctrl_variant_data {
int ngroups;
const struct lpi_function *functions;
int nfunctions;
- bool is_clk_optional;
};
int lpi_pinctrl_probe(struct platform_device *pdev);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8909.c b/drivers/pinctrl/qcom/pinctrl-msm8909.c
new file mode 100644
index 000000000000..6dd15b910632
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8909.c
@@ -0,0 +1,956 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2022, Kernkonzept GmbH.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9, \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+static const struct pinctrl_pin_desc msm8909_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "SDC1_CLK"),
+ PINCTRL_PIN(114, "SDC1_CMD"),
+ PINCTRL_PIN(115, "SDC1_DATA"),
+ PINCTRL_PIN(116, "SDC2_CLK"),
+ PINCTRL_PIN(117, "SDC2_CMD"),
+ PINCTRL_PIN(118, "SDC2_DATA"),
+ PINCTRL_PIN(119, "QDSD_CLK"),
+ PINCTRL_PIN(120, "QDSD_CMD"),
+ PINCTRL_PIN(121, "QDSD_DATA0"),
+ PINCTRL_PIN(122, "QDSD_DATA1"),
+ PINCTRL_PIN(123, "QDSD_DATA2"),
+ PINCTRL_PIN(124, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+
+static const unsigned int sdc1_clk_pins[] = { 113 };
+static const unsigned int sdc1_cmd_pins[] = { 114 };
+static const unsigned int sdc1_data_pins[] = { 115 };
+static const unsigned int sdc2_clk_pins[] = { 116 };
+static const unsigned int sdc2_cmd_pins[] = { 117 };
+static const unsigned int sdc2_data_pins[] = { 118 };
+static const unsigned int qdsd_clk_pins[] = { 119 };
+static const unsigned int qdsd_cmd_pins[] = { 120 };
+static const unsigned int qdsd_data0_pins[] = { 121 };
+static const unsigned int qdsd_data1_pins[] = { 122 };
+static const unsigned int qdsd_data2_pins[] = { 123 };
+static const unsigned int qdsd_data3_pins[] = { 124 };
+
+enum msm8909_functions {
+ msm_mux_gpio,
+ msm_mux_adsp_ext,
+ msm_mux_atest_bbrx0,
+ msm_mux_atest_bbrx1,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_combodac,
+ msm_mux_atest_gpsadc0,
+ msm_mux_atest_gpsadc1,
+ msm_mux_atest_wlan0,
+ msm_mux_atest_wlan1,
+ msm_mux_bimc_dte0,
+ msm_mux_bimc_dte1,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_i2c2,
+ msm_mux_blsp_i2c3,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_i2c5,
+ msm_mux_blsp_i2c6,
+ msm_mux_blsp_spi1,
+ msm_mux_blsp_spi1_cs1,
+ msm_mux_blsp_spi1_cs2,
+ msm_mux_blsp_spi1_cs3,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_spi2_cs1,
+ msm_mux_blsp_spi2_cs2,
+ msm_mux_blsp_spi2_cs3,
+ msm_mux_blsp_spi3,
+ msm_mux_blsp_spi3_cs1,
+ msm_mux_blsp_spi3_cs2,
+ msm_mux_blsp_spi3_cs3,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_uim1,
+ msm_mux_blsp_uim2,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cdc_pdm0,
+ msm_mux_dbg_out,
+ msm_mux_dmic0_clk,
+ msm_mux_dmic0_data,
+ msm_mux_ebi0_wrcdc,
+ msm_mux_ebi2_a,
+ msm_mux_ebi2_lcd,
+ msm_mux_ext_lpass,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_gcc_plltest,
+ msm_mux_gsm0_tx,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_m_voc,
+ msm_mux_mdp_vsync,
+ msm_mux_modem_tsync,
+ msm_mux_nav_pps,
+ msm_mux_nav_tsync,
+ msm_mux_pa_indicator,
+ msm_mux_pbs0,
+ msm_mux_pbs1,
+ msm_mux_pbs2,
+ msm_mux_pri_mi2s_data0_a,
+ msm_mux_pri_mi2s_data0_b,
+ msm_mux_pri_mi2s_data1_a,
+ msm_mux_pri_mi2s_data1_b,
+ msm_mux_pri_mi2s_mclk_a,
+ msm_mux_pri_mi2s_mclk_b,
+ msm_mux_pri_mi2s_sck_a,
+ msm_mux_pri_mi2s_sck_b,
+ msm_mux_pri_mi2s_ws_a,
+ msm_mux_pri_mi2s_ws_b,
+ msm_mux_prng_rosc,
+ msm_mux_pwr_crypto_enabled_a,
+ msm_mux_pwr_crypto_enabled_b,
+ msm_mux_pwr_modem_enabled_a,
+ msm_mux_pwr_modem_enabled_b,
+ msm_mux_pwr_nav_enabled_a,
+ msm_mux_pwr_nav_enabled_b,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_sd_write,
+ msm_mux_sec_mi2s,
+ msm_mux_smb_int,
+ msm_mux_ssbi0,
+ msm_mux_ssbi1,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_uim3_clk,
+ msm_mux_uim3_data,
+ msm_mux_uim3_present,
+ msm_mux_uim3_reset,
+ msm_mux_uim_batt,
+ msm_mux_wcss_bt,
+ msm_mux_wcss_fm,
+ msm_mux_wcss_wlan,
+ msm_mux__,
+};
+
+static const char * const adsp_ext_groups[] = { "gpio38" };
+static const char * const atest_bbrx0_groups[] = { "gpio37" };
+static const char * const atest_bbrx1_groups[] = { "gpio36" };
+static const char * const atest_char0_groups[] = { "gpio62" };
+static const char * const atest_char1_groups[] = { "gpio61" };
+static const char * const atest_char2_groups[] = { "gpio60" };
+static const char * const atest_char3_groups[] = { "gpio59" };
+static const char * const atest_char_groups[] = { "gpio63" };
+static const char * const atest_combodac_groups[] = {
+ "gpio32", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44", "gpio45", "gpio47", "gpio48", "gpio66", "gpio81", "gpio83",
+ "gpio84", "gpio85", "gpio86", "gpio94", "gpio95", "gpio110"
+};
+static const char * const atest_gpsadc0_groups[] = { "gpio65" };
+static const char * const atest_gpsadc1_groups[] = { "gpio79" };
+static const char * const atest_wlan0_groups[] = { "gpio96" };
+static const char * const atest_wlan1_groups[] = { "gpio97" };
+static const char * const bimc_dte0_groups[] = { "gpio6", "gpio59" };
+static const char * const bimc_dte1_groups[] = { "gpio7", "gpio60" };
+static const char * const blsp_i2c1_groups[] = { "gpio6", "gpio7" };
+static const char * const blsp_i2c2_groups[] = { "gpio111", "gpio112" };
+static const char * const blsp_i2c3_groups[] = { "gpio29", "gpio30" };
+static const char * const blsp_i2c4_groups[] = { "gpio14", "gpio15" };
+static const char * const blsp_i2c5_groups[] = { "gpio18", "gpio19" };
+static const char * const blsp_i2c6_groups[] = { "gpio10", "gpio11" };
+static const char * const blsp_spi1_cs1_groups[] = { "gpio97" };
+static const char * const blsp_spi1_cs2_groups[] = { "gpio37" };
+static const char * const blsp_spi1_cs3_groups[] = { "gpio65" };
+static const char * const blsp_spi1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+static const char * const blsp_spi2_cs1_groups[] = { "gpio98" };
+static const char * const blsp_spi2_cs2_groups[] = { "gpio17" };
+static const char * const blsp_spi2_cs3_groups[] = { "gpio5" };
+static const char * const blsp_spi2_groups[] = {
+ "gpio20", "gpio21", "gpio111", "gpio112"
+};
+static const char * const blsp_spi3_cs1_groups[] = { "gpio95" };
+static const char * const blsp_spi3_cs2_groups[] = { "gpio65" };
+static const char * const blsp_spi3_cs3_groups[] = { "gpio4" };
+static const char * const blsp_spi3_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3"
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15"
+};
+static const char * const blsp_spi5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19"
+};
+static const char * const blsp_spi6_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11"
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio20", "gpio21", "gpio111", "gpio112"
+};
+static const char * const blsp_uim1_groups[] = { "gpio4", "gpio5" };
+static const char * const blsp_uim2_groups[] = { "gpio20", "gpio21" };
+static const char * const cam_mclk_groups[] = { "gpio26", "gpio27" };
+static const char * const cci_async_groups[] = { "gpio33" };
+static const char * const cci_timer0_groups[] = { "gpio31" };
+static const char * const cci_timer1_groups[] = { "gpio32" };
+static const char * const cci_timer2_groups[] = { "gpio38" };
+static const char * const cdc_pdm0_groups[] = {
+ "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", "gpio64"
+};
+static const char * const dbg_out_groups[] = { "gpio10" };
+static const char * const dmic0_clk_groups[] = { "gpio4" };
+static const char * const dmic0_data_groups[] = { "gpio5" };
+static const char * const ebi0_wrcdc_groups[] = { "gpio64" };
+static const char * const ebi2_a_groups[] = { "gpio99" };
+static const char * const ebi2_lcd_groups[] = {
+ "gpio24", "gpio24", "gpio25", "gpio95"
+};
+static const char * const ext_lpass_groups[] = { "gpio45" };
+static const char * const gcc_gp1_clk_a_groups[] = { "gpio49" };
+static const char * const gcc_gp1_clk_b_groups[] = { "gpio14" };
+static const char * const gcc_gp2_clk_a_groups[] = { "gpio50" };
+static const char * const gcc_gp2_clk_b_groups[] = { "gpio12" };
+static const char * const gcc_gp3_clk_a_groups[] = { "gpio51" };
+static const char * const gcc_gp3_clk_b_groups[] = { "gpio13" };
+static const char * const gcc_plltest_groups[] = { "gpio66", "gpio67" };
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112"
+};
+static const char * const gsm0_tx_groups[] = { "gpio85" };
+static const char * const ldo_en_groups[] = { "gpio99" };
+static const char * const ldo_update_groups[] = { "gpio98" };
+static const char * const m_voc_groups[] = { "gpio8", "gpio95" };
+static const char * const mdp_vsync_groups[] = { "gpio24", "gpio25" };
+static const char * const modem_tsync_groups[] = { "gpio83" };
+static const char * const nav_pps_groups[] = { "gpio83" };
+static const char * const nav_tsync_groups[] = { "gpio83" };
+static const char * const pa_indicator_groups[] = { "gpio82" };
+static const char * const pbs0_groups[] = { "gpio90" };
+static const char * const pbs1_groups[] = { "gpio91" };
+static const char * const pbs2_groups[] = { "gpio92" };
+static const char * const pri_mi2s_data0_a_groups[] = { "gpio62" };
+static const char * const pri_mi2s_data0_b_groups[] = { "gpio95" };
+static const char * const pri_mi2s_data1_a_groups[] = { "gpio63" };
+static const char * const pri_mi2s_data1_b_groups[] = { "gpio96" };
+static const char * const pri_mi2s_mclk_a_groups[] = { "gpio59" };
+static const char * const pri_mi2s_mclk_b_groups[] = { "gpio98" };
+static const char * const pri_mi2s_sck_a_groups[] = { "gpio60" };
+static const char * const pri_mi2s_sck_b_groups[] = { "gpio94" };
+static const char * const pri_mi2s_ws_a_groups[] = { "gpio61" };
+static const char * const pri_mi2s_ws_b_groups[] = { "gpio110" };
+static const char * const prng_rosc_groups[] = { "gpio43" };
+static const char * const pwr_crypto_enabled_a_groups[] = { "gpio35" };
+static const char * const pwr_crypto_enabled_b_groups[] = { "gpio96" };
+static const char * const pwr_modem_enabled_a_groups[] = { "gpio28" };
+static const char * const pwr_modem_enabled_b_groups[] = { "gpio94" };
+static const char * const pwr_nav_enabled_a_groups[] = { "gpio34" };
+static const char * const pwr_nav_enabled_b_groups[] = { "gpio95" };
+static const char * const qdss_cti_trig_in_a0_groups[] = { "gpio20" };
+static const char * const qdss_cti_trig_in_a1_groups[] = { "gpio49" };
+static const char * const qdss_cti_trig_in_b0_groups[] = { "gpio21" };
+static const char * const qdss_cti_trig_in_b1_groups[] = { "gpio50" };
+static const char * const qdss_cti_trig_out_a0_groups[] = { "gpio23" };
+static const char * const qdss_cti_trig_out_a1_groups[] = { "gpio52" };
+static const char * const qdss_cti_trig_out_b0_groups[] = { "gpio22" };
+static const char * const qdss_cti_trig_out_b1_groups[] = { "gpio51" };
+static const char * const qdss_traceclk_a_groups[] = { "gpio46" };
+static const char * const qdss_tracectl_a_groups[] = { "gpio45" };
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio47", "gpio48", "gpio58", "gpio65", "gpio94", "gpio96",
+ "gpio97"
+};
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio14", "gpio16", "gpio17", "gpio29", "gpio30", "gpio31", "gpio32",
+ "gpio33", "gpio34", "gpio35", "gpio36", "gpio37", "gpio93"
+};
+static const char * const sd_write_groups[] = { "gpio99" };
+static const char * const sec_mi2s_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio98"
+};
+static const char * const smb_int_groups[] = { "gpio58" };
+static const char * const ssbi0_groups[] = { "gpio88" };
+static const char * const ssbi1_groups[] = { "gpio89" };
+static const char * const uim1_clk_groups[] = { "gpio54" };
+static const char * const uim1_data_groups[] = { "gpio53" };
+static const char * const uim1_present_groups[] = { "gpio56" };
+static const char * const uim1_reset_groups[] = { "gpio55" };
+static const char * const uim2_clk_groups[] = { "gpio50" };
+static const char * const uim2_data_groups[] = { "gpio49" };
+static const char * const uim2_present_groups[] = { "gpio52" };
+static const char * const uim2_reset_groups[] = { "gpio51" };
+static const char * const uim3_clk_groups[] = { "gpio23" };
+static const char * const uim3_data_groups[] = { "gpio20" };
+static const char * const uim3_present_groups[] = { "gpio21" };
+static const char * const uim3_reset_groups[] = { "gpio22" };
+static const char * const uim_batt_groups[] = { "gpio57" };
+static const char * const wcss_bt_groups[] = { "gpio39", "gpio47", "gpio48" };
+static const char * const wcss_fm_groups[] = { "gpio45", "gpio46" };
+static const char * const wcss_wlan_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43", "gpio44"
+};
+
+static const struct msm_function msm8909_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(atest_bbrx0),
+ FUNCTION(atest_bbrx1),
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_combodac),
+ FUNCTION(atest_gpsadc0),
+ FUNCTION(atest_gpsadc1),
+ FUNCTION(atest_wlan0),
+ FUNCTION(atest_wlan1),
+ FUNCTION(bimc_dte0),
+ FUNCTION(bimc_dte1),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi1_cs1),
+ FUNCTION(blsp_spi1_cs2),
+ FUNCTION(blsp_spi1_cs3),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_spi2_cs1),
+ FUNCTION(blsp_spi2_cs2),
+ FUNCTION(blsp_spi2_cs3),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_spi3_cs1),
+ FUNCTION(blsp_spi3_cs2),
+ FUNCTION(blsp_spi3_cs3),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uim1),
+ FUNCTION(blsp_uim2),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cdc_pdm0),
+ FUNCTION(dbg_out),
+ FUNCTION(dmic0_clk),
+ FUNCTION(dmic0_data),
+ FUNCTION(ebi0_wrcdc),
+ FUNCTION(ebi2_a),
+ FUNCTION(ebi2_lcd),
+ FUNCTION(ext_lpass),
+ FUNCTION(gcc_gp1_clk_a),
+ FUNCTION(gcc_gp1_clk_b),
+ FUNCTION(gcc_gp2_clk_a),
+ FUNCTION(gcc_gp2_clk_b),
+ FUNCTION(gcc_gp3_clk_a),
+ FUNCTION(gcc_gp3_clk_b),
+ FUNCTION(gcc_plltest),
+ FUNCTION(gpio),
+ FUNCTION(gsm0_tx),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(m_voc),
+ FUNCTION(mdp_vsync),
+ FUNCTION(modem_tsync),
+ FUNCTION(nav_pps),
+ FUNCTION(nav_tsync),
+ FUNCTION(pa_indicator),
+ FUNCTION(pbs0),
+ FUNCTION(pbs1),
+ FUNCTION(pbs2),
+ FUNCTION(pri_mi2s_data0_a),
+ FUNCTION(pri_mi2s_data0_b),
+ FUNCTION(pri_mi2s_data1_a),
+ FUNCTION(pri_mi2s_data1_b),
+ FUNCTION(pri_mi2s_mclk_a),
+ FUNCTION(pri_mi2s_mclk_b),
+ FUNCTION(pri_mi2s_sck_a),
+ FUNCTION(pri_mi2s_sck_b),
+ FUNCTION(pri_mi2s_ws_a),
+ FUNCTION(pri_mi2s_ws_b),
+ FUNCTION(prng_rosc),
+ FUNCTION(pwr_crypto_enabled_a),
+ FUNCTION(pwr_crypto_enabled_b),
+ FUNCTION(pwr_modem_enabled_a),
+ FUNCTION(pwr_modem_enabled_b),
+ FUNCTION(pwr_nav_enabled_a),
+ FUNCTION(pwr_nav_enabled_b),
+ FUNCTION(qdss_cti_trig_in_a0),
+ FUNCTION(qdss_cti_trig_in_a1),
+ FUNCTION(qdss_cti_trig_in_b0),
+ FUNCTION(qdss_cti_trig_in_b1),
+ FUNCTION(qdss_cti_trig_out_a0),
+ FUNCTION(qdss_cti_trig_out_a1),
+ FUNCTION(qdss_cti_trig_out_b0),
+ FUNCTION(qdss_cti_trig_out_b1),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(sd_write),
+ FUNCTION(sec_mi2s),
+ FUNCTION(smb_int),
+ FUNCTION(ssbi0),
+ FUNCTION(ssbi1),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(uim3_clk),
+ FUNCTION(uim3_data),
+ FUNCTION(uim3_present),
+ FUNCTION(uim3_reset),
+ FUNCTION(uim_batt),
+ FUNCTION(wcss_bt),
+ FUNCTION(wcss_fm),
+ FUNCTION(wcss_wlan),
+};
+
+static const struct msm_pingroup msm8909_groups[] = {
+ PINGROUP(0, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(1, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(2, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(3, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(4, blsp_spi1, blsp_uart1, blsp_uim1, blsp_spi3_cs3, dmic0_clk, _, _, _, _),
+ PINGROUP(5, blsp_spi1, blsp_uart1, blsp_uim1, blsp_spi2_cs3, dmic0_data, _, _, _, _),
+ PINGROUP(6, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, bimc_dte0),
+ PINGROUP(7, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, bimc_dte1),
+ PINGROUP(8, blsp_spi6, m_voc, _, _, _, _, _, qdss_tracedata_a, _),
+ PINGROUP(9, blsp_spi6, _, _, _, _, _, qdss_tracedata_a, _, _),
+ PINGROUP(10, blsp_spi6, blsp_i2c6, dbg_out, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(11, blsp_spi6, blsp_i2c6, _, _, _, _, _, _, _),
+ PINGROUP(12, blsp_spi4, gcc_gp2_clk_b, _, _, _, _, _, _, _),
+ PINGROUP(13, blsp_spi4, gcc_gp3_clk_b, _, _, _, _, _, _, _),
+ PINGROUP(14, blsp_spi4, blsp_i2c4, gcc_gp1_clk_b, _, _, _, _, _, qdss_tracedata_b),
+ PINGROUP(15, blsp_spi4, blsp_i2c4, _, _, _, _, _, _, _),
+ PINGROUP(16, blsp_spi5, _, _, _, _, _, qdss_tracedata_b, _, _),
+ PINGROUP(17, blsp_spi5, blsp_spi2_cs2, _, _, _, _, _, qdss_tracedata_b, _),
+ PINGROUP(18, blsp_spi5, blsp_i2c5, _, _, _, _, _, _, _),
+ PINGROUP(19, blsp_spi5, blsp_i2c5, _, _, _, _, _, _, _),
+ PINGROUP(20, uim3_data, blsp_spi2, blsp_uart2, blsp_uim2, _, qdss_cti_trig_in_a0, _, _, _),
+ PINGROUP(21, uim3_present, blsp_spi2, blsp_uart2, blsp_uim2, _, qdss_cti_trig_in_b0, _, _, _),
+ PINGROUP(22, uim3_reset, _, qdss_cti_trig_out_b0, _, _, _, _, _, _),
+ PINGROUP(23, uim3_clk, qdss_cti_trig_out_a0, _, _, _, _, _, _, _),
+ PINGROUP(24, mdp_vsync, ebi2_lcd, ebi2_lcd, _, _, _, _, _, _),
+ PINGROUP(25, mdp_vsync, ebi2_lcd, _, _, _, _, _, _, _),
+ PINGROUP(26, cam_mclk, _, _, _, _, _, _, _, _),
+ PINGROUP(27, cam_mclk, _, _, _, _, _, _, _, _),
+ PINGROUP(28, _, pwr_modem_enabled_a, _, _, _, _, _, _, _),
+ PINGROUP(29, blsp_i2c3, _, _, _, _, _, qdss_tracedata_b, _, _),
+ PINGROUP(30, blsp_i2c3, _, _, _, _, _, qdss_tracedata_b, _, _),
+ PINGROUP(31, cci_timer0, _, _, _, _, _, _, qdss_tracedata_b, _),
+ PINGROUP(32, cci_timer1, _, qdss_tracedata_b, _, atest_combodac, _, _, _, _),
+ PINGROUP(33, cci_async, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(34, pwr_nav_enabled_a, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(35, pwr_crypto_enabled_a, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(36, qdss_tracedata_b, _, atest_bbrx1, _, _, _, _, _, _),
+ PINGROUP(37, blsp_spi1_cs2, qdss_tracedata_b, _, atest_bbrx0, _, _, _, _, _),
+ PINGROUP(38, cci_timer2, adsp_ext, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(39, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(40, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(41, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(42, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(43, wcss_wlan, prng_rosc, qdss_tracedata_a, _, atest_combodac, _, _, _, _),
+ PINGROUP(44, wcss_wlan, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(45, wcss_fm, ext_lpass, qdss_tracectl_a, _, atest_combodac, _, _, _, _),
+ PINGROUP(46, wcss_fm, qdss_traceclk_a, _, _, _, _, _, _, _),
+ PINGROUP(47, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(48, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(49, uim2_data, gcc_gp1_clk_a, qdss_cti_trig_in_a1, _, _, _, _, _, _),
+ PINGROUP(50, uim2_clk, gcc_gp2_clk_a, qdss_cti_trig_in_b1, _, _, _, _, _, _),
+ PINGROUP(51, uim2_reset, gcc_gp3_clk_a, qdss_cti_trig_out_b1, _, _, _, _, _, _),
+ PINGROUP(52, uim2_present, qdss_cti_trig_out_a1, _, _, _, _, _, _, _),
+ PINGROUP(53, uim1_data, _, _, _, _, _, _, _, _),
+ PINGROUP(54, uim1_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(55, uim1_reset, _, _, _, _, _, _, _, _),
+ PINGROUP(56, uim1_present, _, _, _, _, _, _, _, _),
+ PINGROUP(57, uim_batt, _, _, _, _, _, _, _, _),
+ PINGROUP(58, qdss_tracedata_a, smb_int, _, _, _, _, _, _, _),
+ PINGROUP(59, cdc_pdm0, pri_mi2s_mclk_a, atest_char3, _, _, _, _, _, bimc_dte0),
+ PINGROUP(60, cdc_pdm0, pri_mi2s_sck_a, atest_char2, _, _, _, _, _, bimc_dte1),
+ PINGROUP(61, cdc_pdm0, pri_mi2s_ws_a, atest_char1, _, _, _, _, _, _),
+ PINGROUP(62, cdc_pdm0, pri_mi2s_data0_a, atest_char0, _, _, _, _, _, _),
+ PINGROUP(63, cdc_pdm0, pri_mi2s_data1_a, atest_char, _, _, _, _, _, _),
+ PINGROUP(64, cdc_pdm0, _, _, _, _, _, ebi0_wrcdc, _, _),
+ PINGROUP(65, blsp_spi3_cs2, blsp_spi1_cs3, qdss_tracedata_a, _, atest_gpsadc0, _, _, _, _),
+ PINGROUP(66, _, gcc_plltest, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(67, _, gcc_plltest, _, _, _, _, _, _, _),
+ PINGROUP(68, _, _, _, _, _, _, _, _, _),
+ PINGROUP(69, _, _, _, _, _, _, _, _, _),
+ PINGROUP(70, _, _, _, _, _, _, _, _, _),
+ PINGROUP(71, _, _, _, _, _, _, _, _, _),
+ PINGROUP(72, _, _, _, _, _, _, _, _, _),
+ PINGROUP(73, _, _, _, _, _, _, _, _, _),
+ PINGROUP(74, _, _, _, _, _, _, _, _, _),
+ PINGROUP(75, _, _, _, _, _, _, _, _, _),
+ PINGROUP(76, _, _, _, _, _, _, _, _, _),
+ PINGROUP(77, _, _, _, _, _, _, _, _, _),
+ PINGROUP(78, _, _, _, _, _, _, _, _, _),
+ PINGROUP(79, _, _, atest_gpsadc1, _, _, _, _, _, _),
+ PINGROUP(80, _, _, _, _, _, _, _, _, _),
+ PINGROUP(81, _, _, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(82, _, pa_indicator, _, _, _, _, _, _, _),
+ PINGROUP(83, _, modem_tsync, nav_tsync, nav_pps, _, atest_combodac, _, _, _),
+ PINGROUP(84, _, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(85, gsm0_tx, _, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(86, _, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(87, _, _, _, _, _, _, _, _, _),
+ PINGROUP(88, _, ssbi0, _, _, _, _, _, _, _),
+ PINGROUP(89, _, ssbi1, _, _, _, _, _, _, _),
+ PINGROUP(90, pbs0, _, _, _, _, _, _, _, _),
+ PINGROUP(91, pbs1, _, _, _, _, _, _, _, _),
+ PINGROUP(92, pbs2, _, _, _, _, _, _, _, _),
+ PINGROUP(93, qdss_tracedata_b, _, _, _, _, _, _, _, _),
+ PINGROUP(94, pri_mi2s_sck_b, pwr_modem_enabled_b, qdss_tracedata_a, _, atest_combodac, _, _, _, _),
+ PINGROUP(95, blsp_spi3_cs1, pri_mi2s_data0_b, ebi2_lcd, m_voc, pwr_nav_enabled_b, _, atest_combodac, _, _),
+ PINGROUP(96, pri_mi2s_data1_b, _, pwr_crypto_enabled_b, qdss_tracedata_a, _, atest_wlan0, _, _, _),
+ PINGROUP(97, blsp_spi1_cs1, qdss_tracedata_a, _, atest_wlan1, _, _, _, _, _),
+ PINGROUP(98, sec_mi2s, pri_mi2s_mclk_b, blsp_spi2_cs1, ldo_update, _, _, _, _, _),
+ PINGROUP(99, ebi2_a, sd_write, ldo_en, _, _, _, _, _, _),
+ PINGROUP(100, _, _, _, _, _, _, _, _, _),
+ PINGROUP(101, _, _, _, _, _, _, _, _, _),
+ PINGROUP(102, _, _, _, _, _, _, _, _, _),
+ PINGROUP(103, _, _, _, _, _, _, _, _, _),
+ PINGROUP(104, _, _, _, _, _, _, _, _, _),
+ PINGROUP(105, _, _, _, _, _, _, _, _, _),
+ PINGROUP(106, _, _, _, _, _, _, _, _, _),
+ PINGROUP(107, _, _, _, _, _, _, _, _, _),
+ PINGROUP(108, _, _, _, _, _, _, _, _, _),
+ PINGROUP(109, _, _, _, _, _, _, _, _, _),
+ PINGROUP(110, pri_mi2s_ws_b, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(111, blsp_spi2, blsp_uart2, blsp_i2c2, _, _, _, _, _, _),
+ PINGROUP(112, blsp_spi2, blsp_uart2, blsp_i2c2, _, _, _, _, _, _),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x109000, 9, 0),
+ SDC_QDSD_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_QDSD_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_QDSD_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_QDSD_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_QDSD_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_QDSD_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+};
+
+static const struct msm_gpio_wakeirq_map msm8909_mpm_map[] = {
+ { 65, 3 }, { 5, 4 }, { 11, 5 }, { 12, 6 }, { 64, 7 }, { 58, 8 },
+ { 50, 9 }, { 13, 10 }, { 49, 11 }, { 20, 12 }, { 21, 13 }, { 25, 14 },
+ { 46, 15 }, { 45, 16 }, { 28, 17 }, { 44, 18 }, { 31, 19 }, { 43, 20 },
+ { 42, 21 }, { 34, 22 }, { 35, 23 }, { 36, 24 }, { 37, 25 }, { 38, 26 },
+ { 39, 27 }, { 40, 28 }, { 41, 29 }, { 90, 30 }, { 91, 32 }, { 92, 33 },
+ { 94, 34 }, { 95, 35 }, { 96, 36 }, { 97, 37 }, { 98, 38 },
+ { 110, 39 }, { 111, 40 }, { 112, 41 }, { 105, 42 }, { 107, 43 },
+ { 47, 50 }, { 48, 51 },
+};
+
+static const struct msm_pinctrl_soc_data msm8909_pinctrl = {
+ .pins = msm8909_pins,
+ .npins = ARRAY_SIZE(msm8909_pins),
+ .functions = msm8909_functions,
+ .nfunctions = ARRAY_SIZE(msm8909_functions),
+ .groups = msm8909_groups,
+ .ngroups = ARRAY_SIZE(msm8909_groups),
+ .ngpios = 113,
+ .wakeirq_map = msm8909_mpm_map,
+ .nwakeirq_map = ARRAY_SIZE(msm8909_mpm_map),
+};
+
+static int msm8909_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8909_pinctrl);
+}
+
+static const struct of_device_id msm8909_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8909-tlmm", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm8909_pinctrl_of_match);
+
+static struct platform_driver msm8909_pinctrl_driver = {
+ .driver = {
+ .name = "msm8909-pinctrl",
+ .of_match_table = msm8909_pinctrl_of_match,
+ },
+ .probe = msm8909_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8909_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8909_pinctrl_driver);
+}
+arch_initcall(msm8909_pinctrl_init);
+
+static void __exit msm8909_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8909_pinctrl_driver);
+}
+module_exit(msm8909_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm MSM8909 TLMM pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
index 396db12ae904..bf68913ba821 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
@@ -844,8 +844,8 @@ static const struct msm_pingroup msm8916_groups[] = {
PINGROUP(28, pwr_modem_enabled_a, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
PINGROUP(29, cci_i2c, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
PINGROUP(30, cci_i2c, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
- PINGROUP(31, cci_timer0, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(32, cci_timer1, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(31, cci_timer0, flash_strobe, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, cci_timer1, flash_strobe, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(33, cci_async, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
PINGROUP(34, pwr_nav_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
PINGROUP(35, pwr_crypto_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
index 2add9a4520c2..d615b6c55b89 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
@@ -141,7 +141,6 @@ static const struct lpi_pinctrl_variant_data sc7280_lpi_data = {
.ngroups = ARRAY_SIZE(sc7280_groups),
.functions = sc7280_functions,
.nfunctions = ARRAY_SIZE(sc7280_functions),
- .is_clk_optional = true,
};
static const struct of_device_id lpi_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
index 6bec7f143134..704a99d2f93c 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
@@ -530,10 +530,10 @@ DECLARE_MSM_GPIO_PINS(187);
DECLARE_MSM_GPIO_PINS(188);
DECLARE_MSM_GPIO_PINS(189);
-static const unsigned int sdc2_clk_pins[] = { 190 };
-static const unsigned int sdc2_cmd_pins[] = { 191 };
-static const unsigned int sdc2_data_pins[] = { 192 };
-static const unsigned int ufs_reset_pins[] = { 193 };
+static const unsigned int ufs_reset_pins[] = { 190 };
+static const unsigned int sdc2_clk_pins[] = { 191 };
+static const unsigned int sdc2_cmd_pins[] = { 192 };
+static const unsigned int sdc2_data_pins[] = { 193 };
enum sc8180x_functions {
msm_mux_adsp_ext,
@@ -1582,7 +1582,7 @@ static const int sc8180x_acpi_reserved_gpios[] = {
static const struct msm_gpio_wakeirq_map sc8180x_pdc_map[] = {
{ 3, 31 }, { 5, 32 }, { 8, 33 }, { 9, 34 }, { 10, 100 }, { 12, 104 },
{ 24, 37 }, { 26, 38 }, { 27, 41 }, { 28, 42 }, { 30, 39 }, { 36, 43 },
- { 37, 43 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
+ { 37, 44 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
{ 42, 48 }, { 46, 50 }, { 47, 49 }, { 48, 51 }, { 49, 53 }, { 50, 52 },
{ 51, 116 }, { 51, 123 }, { 53, 54 }, { 54, 55 }, { 55, 56 },
{ 56, 57 }, { 58, 58 }, { 60, 60 }, { 68, 62 }, { 70, 63 }, { 76, 86 },
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6375.c b/drivers/pinctrl/qcom/pinctrl-sm6375.c
new file mode 100644
index 000000000000..1138e683e6f4
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm6375.c
@@ -0,0 +1,1544 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_BASE 0x100000
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = REG_SIZE * id + 0x4, \
+ .intr_cfg_reg = REG_SIZE * id + 0x8, \
+ .intr_status_reg = REG_SIZE * id + 0xc, \
+ .intr_target_reg = REG_SIZE * id + 0x8, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sm6375_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "UFS_RESET"),
+ PINCTRL_PIN(157, "SDC1_RCLK"),
+ PINCTRL_PIN(158, "SDC1_CLK"),
+ PINCTRL_PIN(159, "SDC1_CMD"),
+ PINCTRL_PIN(160, "SDC1_DATA"),
+ PINCTRL_PIN(161, "SDC2_CLK"),
+ PINCTRL_PIN(162, "SDC2_CMD"),
+ PINCTRL_PIN(163, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+
+
+static const unsigned int sdc1_rclk_pins[] = { 157 };
+static const unsigned int sdc1_clk_pins[] = { 158 };
+static const unsigned int sdc1_cmd_pins[] = { 159 };
+static const unsigned int sdc1_data_pins[] = { 160 };
+static const unsigned int sdc2_clk_pins[] = { 161 };
+static const unsigned int sdc2_cmd_pins[] = { 162 };
+static const unsigned int sdc2_data_pins[] = { 163 };
+static const unsigned int ufs_reset_pins[] = { 156 };
+
+enum sm6375_functions {
+ msm_mux_adsp_ext,
+ msm_mux_agera_pll,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_tsens,
+ msm_mux_atest_tsens2,
+ msm_mux_atest_usb1,
+ msm_mux_atest_usb10,
+ msm_mux_atest_usb11,
+ msm_mux_atest_usb12,
+ msm_mux_atest_usb13,
+ msm_mux_atest_usb2,
+ msm_mux_atest_usb20,
+ msm_mux_atest_usb21,
+ msm_mux_atest_usb22,
+ msm_mux_atest_usb23,
+ msm_mux_audio_ref,
+ msm_mux_btfm_slimbus,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cci_timer3,
+ msm_mux_cci_timer4,
+ msm_mux_cri_trng,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_dp_hot,
+ msm_mux_edp_lcd,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gp_pdm0,
+ msm_mux_gp_pdm1,
+ msm_mux_gp_pdm2,
+ msm_mux_gpio,
+ msm_mux_gps_tx,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_lpass_ext,
+ msm_mux_m_voc,
+ msm_mux_mclk,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync0,
+ msm_mux_mdp_vsync1,
+ msm_mux_mdp_vsync2,
+ msm_mux_mdp_vsync3,
+ msm_mux_mi2s_0,
+ msm_mux_mi2s_1,
+ msm_mux_mi2s_2,
+ msm_mux_mss_lte,
+ msm_mux_nav_gpio,
+ msm_mux_nav_pps,
+ msm_mux_pa_indicator,
+ msm_mux_phase_flag0,
+ msm_mux_phase_flag1,
+ msm_mux_phase_flag10,
+ msm_mux_phase_flag11,
+ msm_mux_phase_flag12,
+ msm_mux_phase_flag13,
+ msm_mux_phase_flag14,
+ msm_mux_phase_flag15,
+ msm_mux_phase_flag16,
+ msm_mux_phase_flag17,
+ msm_mux_phase_flag18,
+ msm_mux_phase_flag19,
+ msm_mux_phase_flag2,
+ msm_mux_phase_flag20,
+ msm_mux_phase_flag21,
+ msm_mux_phase_flag22,
+ msm_mux_phase_flag23,
+ msm_mux_phase_flag24,
+ msm_mux_phase_flag25,
+ msm_mux_phase_flag26,
+ msm_mux_phase_flag27,
+ msm_mux_phase_flag28,
+ msm_mux_phase_flag29,
+ msm_mux_phase_flag3,
+ msm_mux_phase_flag30,
+ msm_mux_phase_flag31,
+ msm_mux_phase_flag4,
+ msm_mux_phase_flag5,
+ msm_mux_phase_flag6,
+ msm_mux_phase_flag7,
+ msm_mux_phase_flag8,
+ msm_mux_phase_flag9,
+ msm_mux_pll_bist,
+ msm_mux_pll_bypassnl,
+ msm_mux_pll_clk,
+ msm_mux_pll_reset,
+ msm_mux_prng_rosc0,
+ msm_mux_prng_rosc1,
+ msm_mux_prng_rosc2,
+ msm_mux_prng_rosc3,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qdss_gpio0,
+ msm_mux_qdss_gpio1,
+ msm_mux_qdss_gpio10,
+ msm_mux_qdss_gpio11,
+ msm_mux_qdss_gpio12,
+ msm_mux_qdss_gpio13,
+ msm_mux_qdss_gpio14,
+ msm_mux_qdss_gpio15,
+ msm_mux_qdss_gpio2,
+ msm_mux_qdss_gpio3,
+ msm_mux_qdss_gpio4,
+ msm_mux_qdss_gpio5,
+ msm_mux_qdss_gpio6,
+ msm_mux_qdss_gpio7,
+ msm_mux_qdss_gpio8,
+ msm_mux_qdss_gpio9,
+ msm_mux_qlink0_enable,
+ msm_mux_qlink0_request,
+ msm_mux_qlink0_wmss,
+ msm_mux_qlink1_enable,
+ msm_mux_qlink1_request,
+ msm_mux_qlink1_wmss,
+ msm_mux_qup00,
+ msm_mux_qup01,
+ msm_mux_qup02,
+ msm_mux_qup10,
+ msm_mux_qup11_f1,
+ msm_mux_qup11_f2,
+ msm_mux_qup12,
+ msm_mux_qup13_f1,
+ msm_mux_qup13_f2,
+ msm_mux_qup14,
+ msm_mux_sd_write,
+ msm_mux_sdc1_tb,
+ msm_mux_sdc2_tb,
+ msm_mux_sp_cmu,
+ msm_mux_tgu_ch0,
+ msm_mux_tgu_ch1,
+ msm_mux_tgu_ch2,
+ msm_mux_tgu_ch3,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_usb2phy_ac,
+ msm_mux_usb_phy,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger,
+ msm_mux_wlan1_adc0,
+ msm_mux_wlan1_adc1,
+ msm_mux_wlan2_adc0,
+ msm_mux_wlan2_adc1,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio11", "gpio12", "gpio13", "gpio14", "gpio15",
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+ "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29",
+ "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
+ "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", "gpio50",
+ "gpio51", "gpio52", "gpio53", "gpio56", "gpio57", "gpio58", "gpio59",
+ "gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65", "gpio66",
+ "gpio67", "gpio68", "gpio69", "gpio75", "gpio76", "gpio77", "gpio78",
+ "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", "gpio85",
+ "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", "gpio92",
+ "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", "gpio99",
+ "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", "gpio105",
+ "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", "gpio111",
+ "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", "gpio117",
+ "gpio118", "gpio119", "gpio120", "gpio124", "gpio125", "gpio126",
+ "gpio127", "gpio128", "gpio129", "gpio130", "gpio131", "gpio132",
+ "gpio133", "gpio134", "gpio135", "gpio136", "gpio141", "gpio142",
+ "gpio143", "gpio150", "gpio151", "gpio152", "gpio153", "gpio154",
+ "gpio155",
+};
+static const char * const agera_pll_groups[] = {
+ "gpio89",
+};
+static const char * const cci_async_groups[] = {
+ "gpio35", "gpio36", "gpio48", "gpio52", "gpio53",
+};
+static const char * const cci_i2c_groups[] = {
+ "gpio2", "gpio3", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44",
+};
+static const char * const gps_tx_groups[] = {
+ "gpio101", "gpio102", "gpio107", "gpio108",
+};
+static const char * const gp_pdm0_groups[] = {
+ "gpio37", "gpio68",
+};
+static const char * const gp_pdm1_groups[] = {
+ "gpio8", "gpio52",
+};
+static const char * const gp_pdm2_groups[] = {
+ "gpio57",
+};
+static const char * const jitter_bist_groups[] = {
+ "gpio90",
+};
+static const char * const mclk_groups[] = {
+ "gpio93",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio6", "gpio23", "gpio24", "gpio27", "gpio28",
+};
+static const char * const mss_lte_groups[] = {
+ "gpio65", "gpio66",
+};
+static const char * const nav_pps_groups[] = {
+ "gpio101", "gpio101", "gpio102", "gpio102",
+};
+static const char * const pll_bist_groups[] = {
+ "gpio27",
+};
+static const char * const qlink0_wmss_groups[] = {
+ "gpio103",
+};
+static const char * const qlink1_wmss_groups[] = {
+ "gpio106",
+};
+static const char * const usb_phy_groups[] = {
+ "gpio124",
+};
+static const char * const adsp_ext_groups[] = {
+ "gpio87",
+};
+static const char * const atest_char_groups[] = {
+ "gpio95",
+};
+static const char * const atest_char0_groups[] = {
+ "gpio96",
+};
+static const char * const atest_char1_groups[] = {
+ "gpio97",
+};
+static const char * const atest_char2_groups[] = {
+ "gpio98",
+};
+static const char * const atest_char3_groups[] = {
+ "gpio99",
+};
+static const char * const atest_tsens_groups[] = {
+ "gpio92",
+};
+static const char * const atest_tsens2_groups[] = {
+ "gpio93",
+};
+static const char * const atest_usb1_groups[] = {
+ "gpio83",
+};
+static const char * const atest_usb10_groups[] = {
+ "gpio84",
+};
+static const char * const atest_usb11_groups[] = {
+ "gpio85",
+};
+static const char * const atest_usb12_groups[] = {
+ "gpio86",
+};
+static const char * const atest_usb13_groups[] = {
+ "gpio87",
+};
+static const char * const atest_usb2_groups[] = {
+ "gpio88",
+};
+static const char * const atest_usb20_groups[] = {
+ "gpio89",
+};
+static const char * const atest_usb21_groups[] = {
+ "gpio90",
+};
+static const char * const atest_usb22_groups[] = {
+ "gpio91",
+};
+static const char * const atest_usb23_groups[] = {
+ "gpio92",
+};
+static const char * const audio_ref_groups[] = {
+ "gpio60",
+};
+static const char * const btfm_slimbus_groups[] = {
+ "gpio67", "gpio68", "gpio86", "gpio87",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33",
+};
+static const char * const cci_timer0_groups[] = {
+ "gpio34",
+};
+static const char * const cci_timer1_groups[] = {
+ "gpio35",
+};
+static const char * const cci_timer2_groups[] = {
+ "gpio36",
+};
+static const char * const cci_timer3_groups[] = {
+ "gpio37",
+};
+static const char * const cci_timer4_groups[] = {
+ "gpio38",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio0", "gpio1", "gpio2",
+};
+static const char * const dbg_out_groups[] = {
+ "gpio3",
+};
+static const char * const ddr_bist_groups[] = {
+ "gpio19", "gpio20", "gpio21", "gpio22",
+};
+static const char * const ddr_pxi0_groups[] = {
+ "gpio86", "gpio90",
+};
+static const char * const ddr_pxi1_groups[] = {
+ "gpio87", "gpio91",
+};
+static const char * const ddr_pxi2_groups[] = {
+ "gpio88", "gpio92",
+};
+static const char * const ddr_pxi3_groups[] = {
+ "gpio89", "gpio93",
+};
+static const char * const dp_hot_groups[] = {
+ "gpio12", "gpio118",
+};
+static const char * const edp_lcd_groups[] = {
+ "gpio23",
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio48", "gpio58",
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio21",
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio22",
+};
+static const char * const ibi_i3c_groups[] = {
+ "gpio0", "gpio1",
+};
+static const char * const ldo_en_groups[] = {
+ "gpio95",
+};
+static const char * const ldo_update_groups[] = {
+ "gpio96",
+};
+static const char * const lpass_ext_groups[] = {
+ "gpio60", "gpio93",
+};
+static const char * const m_voc_groups[] = {
+ "gpio12",
+};
+static const char * const mdp_vsync0_groups[] = {
+ "gpio47",
+};
+static const char * const mdp_vsync1_groups[] = {
+ "gpio48",
+};
+static const char * const mdp_vsync2_groups[] = {
+ "gpio56",
+};
+static const char * const mdp_vsync3_groups[] = {
+ "gpio57",
+};
+static const char * const mi2s_0_groups[] = {
+ "gpio88", "gpio89", "gpio90", "gpio91",
+};
+static const char * const mi2s_1_groups[] = {
+ "gpio67", "gpio68", "gpio86", "gpio87",
+};
+static const char * const mi2s_2_groups[] = {
+ "gpio60",
+};
+static const char * const nav_gpio_groups[] = {
+ "gpio101", "gpio102",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio118",
+};
+static const char * const phase_flag0_groups[] = {
+ "gpio12",
+};
+static const char * const phase_flag1_groups[] = {
+ "gpio17",
+};
+static const char * const phase_flag10_groups[] = {
+ "gpio41",
+};
+static const char * const phase_flag11_groups[] = {
+ "gpio42",
+};
+static const char * const phase_flag12_groups[] = {
+ "gpio43",
+};
+static const char * const phase_flag13_groups[] = {
+ "gpio44",
+};
+static const char * const phase_flag14_groups[] = {
+ "gpio45",
+};
+static const char * const phase_flag15_groups[] = {
+ "gpio46",
+};
+static const char * const phase_flag16_groups[] = {
+ "gpio47",
+};
+static const char * const phase_flag17_groups[] = {
+ "gpio48",
+};
+static const char * const phase_flag18_groups[] = {
+ "gpio49",
+};
+static const char * const phase_flag19_groups[] = {
+ "gpio50",
+};
+static const char * const phase_flag2_groups[] = {
+ "gpio18",
+};
+static const char * const phase_flag20_groups[] = {
+ "gpio51",
+};
+static const char * const phase_flag21_groups[] = {
+ "gpio52",
+};
+static const char * const phase_flag22_groups[] = {
+ "gpio53",
+};
+static const char * const phase_flag23_groups[] = {
+ "gpio56",
+};
+static const char * const phase_flag24_groups[] = {
+ "gpio57",
+};
+static const char * const phase_flag25_groups[] = {
+ "gpio60",
+};
+static const char * const phase_flag26_groups[] = {
+ "gpio61",
+};
+static const char * const phase_flag27_groups[] = {
+ "gpio62",
+};
+static const char * const phase_flag28_groups[] = {
+ "gpio63",
+};
+static const char * const phase_flag29_groups[] = {
+ "gpio64",
+};
+static const char * const phase_flag3_groups[] = {
+ "gpio34",
+};
+static const char * const phase_flag30_groups[] = {
+ "gpio67",
+};
+static const char * const phase_flag31_groups[] = {
+ "gpio68",
+};
+static const char * const phase_flag4_groups[] = {
+ "gpio35",
+};
+static const char * const phase_flag5_groups[] = {
+ "gpio36",
+};
+static const char * const phase_flag6_groups[] = {
+ "gpio37",
+};
+static const char * const phase_flag7_groups[] = {
+ "gpio38",
+};
+static const char * const phase_flag8_groups[] = {
+ "gpio39",
+};
+static const char * const phase_flag9_groups[] = {
+ "gpio40",
+};
+static const char * const pll_bypassnl_groups[] = {
+ "gpio13",
+};
+static const char * const pll_clk_groups[] = {
+ "gpio98",
+};
+static const char * const pll_reset_groups[] = {
+ "gpio14",
+};
+static const char * const prng_rosc0_groups[] = {
+ "gpio97",
+};
+static const char * const prng_rosc1_groups[] = {
+ "gpio98",
+};
+static const char * const prng_rosc2_groups[] = {
+ "gpio99",
+};
+static const char * const prng_rosc3_groups[] = {
+ "gpio100",
+};
+static const char * const qdss_cti_groups[] = {
+ "gpio2", "gpio3", "gpio6", "gpio7", "gpio61", "gpio62", "gpio86",
+ "gpio87",
+};
+static const char * const qdss_gpio_groups[] = {
+ "gpio8", "gpio9", "gpio63", "gpio64",
+};
+static const char * const qdss_gpio0_groups[] = {
+ "gpio39", "gpio65",
+};
+static const char * const qdss_gpio1_groups[] = {
+ "gpio40", "gpio66",
+};
+static const char * const qdss_gpio10_groups[] = {
+ "gpio50", "gpio56",
+};
+static const char * const qdss_gpio11_groups[] = {
+ "gpio51", "gpio57",
+};
+static const char * const qdss_gpio12_groups[] = {
+ "gpio34", "gpio52",
+};
+static const char * const qdss_gpio13_groups[] = {
+ "gpio35", "gpio53",
+};
+static const char * const qdss_gpio14_groups[] = {
+ "gpio27", "gpio36",
+};
+static const char * const qdss_gpio15_groups[] = {
+ "gpio28", "gpio37",
+};
+static const char * const qdss_gpio2_groups[] = {
+ "gpio38", "gpio41",
+};
+static const char * const qdss_gpio3_groups[] = {
+ "gpio42", "gpio47",
+};
+static const char * const qdss_gpio4_groups[] = {
+ "gpio43", "gpio88",
+};
+static const char * const qdss_gpio5_groups[] = {
+ "gpio44", "gpio89",
+};
+static const char * const qdss_gpio6_groups[] = {
+ "gpio45", "gpio90",
+};
+static const char * const qdss_gpio7_groups[] = {
+ "gpio46", "gpio91",
+};
+static const char * const qdss_gpio8_groups[] = {
+ "gpio48", "gpio92",
+};
+static const char * const qdss_gpio9_groups[] = {
+ "gpio49", "gpio93",
+};
+static const char * const qlink0_enable_groups[] = {
+ "gpio105",
+};
+static const char * const qlink0_request_groups[] = {
+ "gpio104",
+};
+static const char * const qlink1_enable_groups[] = {
+ "gpio108",
+};
+static const char * const qlink1_request_groups[] = {
+ "gpio107",
+};
+static const char * const qup00_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const qup01_groups[] = {
+ "gpio61", "gpio62", "gpio63", "gpio64",
+};
+static const char * const qup02_groups[] = {
+ "gpio45", "gpio46", "gpio48", "gpio56", "gpio57",
+};
+static const char * const qup10_groups[] = {
+ "gpio13", "gpio14", "gpio15", "gpio16", "gpio17",
+};
+static const char * const qup11_f1_groups[] = {
+ "gpio27", "gpio28",
+};
+static const char * const qup11_f2_groups[] = {
+ "gpio27", "gpio28",
+};
+
+static const char * const qup12_groups[] = {
+ "gpio19", "gpio19", "gpio20", "gpio20",
+};
+static const char * const qup13_f1_groups[] = {
+ "gpio25", "gpio26",
+};
+static const char * const qup13_f2_groups[] = {
+ "gpio25", "gpio26",
+};
+static const char * const qup14_groups[] = {
+ "gpio4", "gpio4", "gpio5", "gpio5",
+};
+static const char * const sd_write_groups[] = {
+ "gpio85",
+};
+static const char * const sdc1_tb_groups[] = {
+ "gpio4",
+};
+static const char * const sdc2_tb_groups[] = {
+ "gpio5",
+};
+static const char * const sp_cmu_groups[] = {
+ "gpio3",
+};
+static const char * const tgu_ch0_groups[] = {
+ "gpio61",
+};
+static const char * const tgu_ch1_groups[] = {
+ "gpio62",
+};
+static const char * const tgu_ch2_groups[] = {
+ "gpio63",
+};
+static const char * const tgu_ch3_groups[] = {
+ "gpio64",
+};
+static const char * const tsense_pwm1_groups[] = {
+ "gpio88",
+};
+static const char * const tsense_pwm2_groups[] = {
+ "gpio88",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio80",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio79",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio82",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio81",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio76",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio75",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio78",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio77",
+};
+static const char * const usb2phy_ac_groups[] = {
+ "gpio47",
+};
+static const char * const vfr_1_groups[] = {
+ "gpio49",
+};
+static const char * const vsense_trigger_groups[] = {
+ "gpio89",
+};
+static const char * const wlan1_adc0_groups[] = {
+ "gpio90",
+};
+static const char * const wlan1_adc1_groups[] = {
+ "gpio92",
+};
+static const char * const wlan2_adc0_groups[] = {
+ "gpio91",
+};
+static const char * const wlan2_adc1_groups[] = {
+ "gpio93",
+};
+
+static const struct msm_function sm6375_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(agera_pll),
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_tsens),
+ FUNCTION(atest_tsens2),
+ FUNCTION(atest_usb1),
+ FUNCTION(atest_usb10),
+ FUNCTION(atest_usb11),
+ FUNCTION(atest_usb12),
+ FUNCTION(atest_usb13),
+ FUNCTION(atest_usb2),
+ FUNCTION(atest_usb20),
+ FUNCTION(atest_usb21),
+ FUNCTION(atest_usb22),
+ FUNCTION(atest_usb23),
+ FUNCTION(audio_ref),
+ FUNCTION(btfm_slimbus),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cci_timer3),
+ FUNCTION(cci_timer4),
+ FUNCTION(cri_trng),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(ddr_pxi2),
+ FUNCTION(ddr_pxi3),
+ FUNCTION(dp_hot),
+ FUNCTION(edp_lcd),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gp_pdm0),
+ FUNCTION(gp_pdm1),
+ FUNCTION(gp_pdm2),
+ FUNCTION(gpio),
+ FUNCTION(gps_tx),
+ FUNCTION(ibi_i3c),
+ FUNCTION(jitter_bist),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(lpass_ext),
+ FUNCTION(m_voc),
+ FUNCTION(mclk),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mdp_vsync0),
+ FUNCTION(mdp_vsync1),
+ FUNCTION(mdp_vsync2),
+ FUNCTION(mdp_vsync3),
+ FUNCTION(mi2s_0),
+ FUNCTION(mi2s_1),
+ FUNCTION(mi2s_2),
+ FUNCTION(mss_lte),
+ FUNCTION(nav_gpio),
+ FUNCTION(nav_pps),
+ FUNCTION(pa_indicator),
+ FUNCTION(phase_flag0),
+ FUNCTION(phase_flag1),
+ FUNCTION(phase_flag10),
+ FUNCTION(phase_flag11),
+ FUNCTION(phase_flag12),
+ FUNCTION(phase_flag13),
+ FUNCTION(phase_flag14),
+ FUNCTION(phase_flag15),
+ FUNCTION(phase_flag16),
+ FUNCTION(phase_flag17),
+ FUNCTION(phase_flag18),
+ FUNCTION(phase_flag19),
+ FUNCTION(phase_flag2),
+ FUNCTION(phase_flag20),
+ FUNCTION(phase_flag21),
+ FUNCTION(phase_flag22),
+ FUNCTION(phase_flag23),
+ FUNCTION(phase_flag24),
+ FUNCTION(phase_flag25),
+ FUNCTION(phase_flag26),
+ FUNCTION(phase_flag27),
+ FUNCTION(phase_flag28),
+ FUNCTION(phase_flag29),
+ FUNCTION(phase_flag3),
+ FUNCTION(phase_flag30),
+ FUNCTION(phase_flag31),
+ FUNCTION(phase_flag4),
+ FUNCTION(phase_flag5),
+ FUNCTION(phase_flag6),
+ FUNCTION(phase_flag7),
+ FUNCTION(phase_flag8),
+ FUNCTION(phase_flag9),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_bypassnl),
+ FUNCTION(pll_clk),
+ FUNCTION(pll_reset),
+ FUNCTION(prng_rosc0),
+ FUNCTION(prng_rosc1),
+ FUNCTION(prng_rosc2),
+ FUNCTION(prng_rosc3),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qdss_gpio0),
+ FUNCTION(qdss_gpio1),
+ FUNCTION(qdss_gpio10),
+ FUNCTION(qdss_gpio11),
+ FUNCTION(qdss_gpio12),
+ FUNCTION(qdss_gpio13),
+ FUNCTION(qdss_gpio14),
+ FUNCTION(qdss_gpio15),
+ FUNCTION(qdss_gpio2),
+ FUNCTION(qdss_gpio3),
+ FUNCTION(qdss_gpio4),
+ FUNCTION(qdss_gpio5),
+ FUNCTION(qdss_gpio6),
+ FUNCTION(qdss_gpio7),
+ FUNCTION(qdss_gpio8),
+ FUNCTION(qdss_gpio9),
+ FUNCTION(qlink0_enable),
+ FUNCTION(qlink0_request),
+ FUNCTION(qlink0_wmss),
+ FUNCTION(qlink1_enable),
+ FUNCTION(qlink1_request),
+ FUNCTION(qlink1_wmss),
+ FUNCTION(qup00),
+ FUNCTION(qup01),
+ FUNCTION(qup02),
+ FUNCTION(qup10),
+ FUNCTION(qup11_f1),
+ FUNCTION(qup11_f2),
+ FUNCTION(qup12),
+ FUNCTION(qup13_f1),
+ FUNCTION(qup13_f2),
+ FUNCTION(qup14),
+ FUNCTION(sd_write),
+ FUNCTION(sdc1_tb),
+ FUNCTION(sdc2_tb),
+ FUNCTION(sp_cmu),
+ FUNCTION(tgu_ch0),
+ FUNCTION(tgu_ch1),
+ FUNCTION(tgu_ch2),
+ FUNCTION(tgu_ch3),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(usb_phy),
+ FUNCTION(vfr_1),
+ FUNCTION(vsense_trigger),
+ FUNCTION(wlan1_adc0),
+ FUNCTION(wlan1_adc1),
+ FUNCTION(wlan2_adc0),
+ FUNCTION(wlan2_adc1),
+};
+
+/*
+ * Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm6375_groups[] = {
+ [0] = PINGROUP(0, ibi_i3c, qup00, cri_trng, _, _, _, _, _, _),
+ [1] = PINGROUP(1, ibi_i3c, qup00, cri_trng, _, _, _, _, _, _),
+ [2] = PINGROUP(2, qup00, cci_i2c, cri_trng, qdss_cti, _, _, _, _, _),
+ [3] = PINGROUP(3, qup00, cci_i2c, sp_cmu, dbg_out, qdss_cti, _, _, _, _),
+ [4] = PINGROUP(4, qup14, qup14, sdc1_tb, _, _, _, _, _, _),
+ [5] = PINGROUP(5, qup14, qup14, sdc2_tb, _, _, _, _, _, _),
+ [6] = PINGROUP(6, mdp_vsync, qdss_cti, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, qdss_cti, _, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, gp_pdm1, qdss_gpio, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, qdss_gpio, _, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, _, _, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, _, _, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, m_voc, dp_hot, _, phase_flag0, _, _, _, _, _),
+ [13] = PINGROUP(13, qup10, pll_bypassnl, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, qup10, pll_reset, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, qup10, _, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, qup10, _, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, _, phase_flag1, qup10, _, _, _, _, _, _),
+ [18] = PINGROUP(18, _, phase_flag2, _, _, _, _, _, _, _),
+ [19] = PINGROUP(19, qup12, qup12, ddr_bist, _, _, _, _, _, _),
+ [20] = PINGROUP(20, qup12, qup12, ddr_bist, _, _, _, _, _, _),
+ [21] = PINGROUP(21, gcc_gp2, ddr_bist, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, gcc_gp3, ddr_bist, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, mdp_vsync, edp_lcd, _, _, _, _, _, _, _),
+ [24] = PINGROUP(24, mdp_vsync, _, _, _, _, _, _, _, _),
+ [25] = PINGROUP(25, qup13_f1, qup13_f2, _, _, _, _, _, _, _),
+ [26] = PINGROUP(26, qup13_f1, qup13_f2, _, _, _, _, _, _, _),
+ [27] = PINGROUP(27, qup11_f1, qup11_f2, mdp_vsync, pll_bist, _, qdss_gpio14, _, _, _),
+ [28] = PINGROUP(28, qup11_f1, qup11_f2, mdp_vsync, _, qdss_gpio15, _, _, _, _),
+ [29] = PINGROUP(29, cam_mclk, _, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, cam_mclk, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, cam_mclk, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, cam_mclk, _, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, cam_mclk, _, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, cci_timer0, _, phase_flag3, qdss_gpio12, _, _, _, _, _),
+ [35] = PINGROUP(35, cci_timer1, cci_async, _, phase_flag4, qdss_gpio13, _, _, _, _),
+ [36] = PINGROUP(36, cci_timer2, cci_async, _, phase_flag5, qdss_gpio14, _, _, _, _),
+ [37] = PINGROUP(37, cci_timer3, gp_pdm0, _, phase_flag6, qdss_gpio15, _, _, _, _),
+ [38] = PINGROUP(38, cci_timer4, _, phase_flag7, qdss_gpio2, _, _, _, _, _),
+ [39] = PINGROUP(39, cci_i2c, _, phase_flag8, qdss_gpio0, _, _, _, _, _),
+ [40] = PINGROUP(40, cci_i2c, _, phase_flag9, qdss_gpio1, _, _, _, _, _),
+ [41] = PINGROUP(41, cci_i2c, _, phase_flag10, qdss_gpio2, _, _, _, _, _),
+ [42] = PINGROUP(42, cci_i2c, _, phase_flag11, qdss_gpio3, _, _, _, _, _),
+ [43] = PINGROUP(43, cci_i2c, _, phase_flag12, qdss_gpio4, _, _, _, _, _),
+ [44] = PINGROUP(44, cci_i2c, _, phase_flag13, qdss_gpio5, _, _, _, _, _),
+ [45] = PINGROUP(45, qup02, _, phase_flag14, qdss_gpio6, _, _, _, _, _),
+ [46] = PINGROUP(46, qup02, _, phase_flag15, qdss_gpio7, _, _, _, _, _),
+ [47] = PINGROUP(47, mdp_vsync0, _, phase_flag16, qdss_gpio3, _, _, usb2phy_ac, _, _),
+ [48] = PINGROUP(48, cci_async, mdp_vsync1, gcc_gp1, _, phase_flag17, qdss_gpio8, qup02,
+ _, _),
+ [49] = PINGROUP(49, vfr_1, _, phase_flag18, qdss_gpio9, _, _, _, _, _),
+ [50] = PINGROUP(50, _, phase_flag19, qdss_gpio10, _, _, _, _, _, _),
+ [51] = PINGROUP(51, _, phase_flag20, qdss_gpio11, _, _, _, _, _, _),
+ [52] = PINGROUP(52, cci_async, gp_pdm1, _, phase_flag21, qdss_gpio12, _, _, _, _),
+ [53] = PINGROUP(53, cci_async, _, phase_flag22, qdss_gpio13, _, _, _, _, _),
+ [54] = PINGROUP(54, _, _, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, _, _, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, qup02, mdp_vsync2, _, phase_flag23, qdss_gpio10, _, _, _, _),
+ [57] = PINGROUP(57, qup02, mdp_vsync3, gp_pdm2, _, phase_flag24, qdss_gpio11, _, _, _),
+ [58] = PINGROUP(58, gcc_gp1, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, _, _, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, audio_ref, lpass_ext, mi2s_2, _, phase_flag25, _, _, _, _),
+ [61] = PINGROUP(61, qup01, tgu_ch0, _, phase_flag26, qdss_cti, _, _, _, _),
+ [62] = PINGROUP(62, qup01, tgu_ch1, _, phase_flag27, qdss_cti, _, _, _, _),
+ [63] = PINGROUP(63, qup01, tgu_ch2, _, phase_flag28, qdss_gpio, _, _, _, _),
+ [64] = PINGROUP(64, qup01, tgu_ch3, _, phase_flag29, qdss_gpio, _, _, _, _),
+ [65] = PINGROUP(65, mss_lte, _, qdss_gpio0, _, _, _, _, _, _),
+ [66] = PINGROUP(66, mss_lte, _, qdss_gpio1, _, _, _, _, _, _),
+ [67] = PINGROUP(67, btfm_slimbus, mi2s_1, _, phase_flag30, _, _, _, _, _),
+ [68] = PINGROUP(68, btfm_slimbus, mi2s_1, gp_pdm0, _, phase_flag31, _, _, _, _),
+ [69] = PINGROUP(69, _, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, _, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, _, _, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, _, _, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, _, _, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, _, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, uim2_data, _, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, uim2_clk, _, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, uim2_reset, _, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, uim2_present, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, uim1_data, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, uim1_clk, _, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, uim1_reset, _, _, _, _, _, _, _, _),
+ [82] = PINGROUP(82, uim1_present, _, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, atest_usb1, _, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, _, atest_usb10, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, sd_write, _, atest_usb11, _, _, _, _, _, _),
+ [86] = PINGROUP(86, btfm_slimbus, mi2s_1, _, qdss_cti, atest_usb12, ddr_pxi0, _, _, _),
+ [87] = PINGROUP(87, btfm_slimbus, mi2s_1, adsp_ext, _, qdss_cti, atest_usb13, ddr_pxi1, _,
+ _),
+ [88] = PINGROUP(88, mi2s_0, _, qdss_gpio4, _, atest_usb2, ddr_pxi2, tsense_pwm1,
+ tsense_pwm2, _),
+ [89] = PINGROUP(89, mi2s_0, agera_pll, _, qdss_gpio5, _, vsense_trigger, atest_usb20,
+ ddr_pxi3, _),
+ [90] = PINGROUP(90, mi2s_0, jitter_bist, _, qdss_gpio6, _, wlan1_adc0, atest_usb21,
+ ddr_pxi0, _),
+ [91] = PINGROUP(91, mi2s_0, _, qdss_gpio7, _, wlan2_adc0, atest_usb22, ddr_pxi1, _, _),
+ [92] = PINGROUP(92, _, qdss_gpio8, atest_tsens, wlan1_adc1, atest_usb23, ddr_pxi2, _, _,
+ _),
+ [93] = PINGROUP(93, mclk, lpass_ext, _, qdss_gpio9, atest_tsens2, wlan2_adc1, ddr_pxi3,
+ _, _),
+ [94] = PINGROUP(94, _, _, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, ldo_en, _, atest_char, _, _, _, _, _, _),
+ [96] = PINGROUP(96, ldo_update, _, atest_char0, _, _, _, _, _, _),
+ [97] = PINGROUP(97, prng_rosc0, _, atest_char1, _, _, _, _, _, _),
+ [98] = PINGROUP(98, _, atest_char2, _, _, prng_rosc1, pll_clk, _, _, _),
+ [99] = PINGROUP(99, _, atest_char3, _, _, prng_rosc2, _, _, _, _),
+ [100] = PINGROUP(100, _, _, prng_rosc3, _, _, _, _, _, _),
+ [101] = PINGROUP(101, nav_gpio, nav_pps, nav_pps, gps_tx, _, _, _, _, _),
+ [102] = PINGROUP(102, nav_gpio, nav_pps, nav_pps, gps_tx, _, _, _, _, _),
+ [103] = PINGROUP(103, qlink0_wmss, _, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, qlink0_request, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, qlink0_enable, _, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, qlink1_wmss, _, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, qlink1_request, gps_tx, _, _, _, _, _, _, _),
+ [108] = PINGROUP(108, qlink1_enable, gps_tx, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, _, _, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, _, _, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, _, _, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, _, _, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, _, _, _, _, _, _, _, _, _),
+ [114] = PINGROUP(114, _, _, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, _, _, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, _, _, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, _, _, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, _, _, pa_indicator, dp_hot, _, _, _, _, _),
+ [119] = PINGROUP(119, _, _, _, _, _, _, _, _, _),
+ [120] = PINGROUP(120, _, _, _, _, _, _, _, _, _),
+ [121] = PINGROUP(121, _, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, _, _, _, _, _, _, _, _, _),
+ [123] = PINGROUP(123, _, _, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, usb_phy, _, _, _, _, _, _, _, _),
+ [125] = PINGROUP(125, _, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, _, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, _, _, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, _, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, _, _, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, _, _, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, _, _, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, _, _, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, _, _, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, _, _, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, _, _, _, _, _, _, _, _, _),
+ [136] = PINGROUP(136, _, _, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, _, _, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, _, _, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, _, _, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, _, _, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, _, _, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, _, _, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, _, _, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, _, _, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, _, _, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, _, _, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, _, _, _, _, _, _, _, _, _),
+ [151] = PINGROUP(151, _, _, _, _, _, _, _, _, _),
+ [152] = PINGROUP(152, _, _, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, _, _, _, _, _, _, _, _, _),
+ [154] = PINGROUP(154, _, _, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, _, _, _, _, _, _, _, _, _),
+ [156] = UFS_RESET(ufs_reset, 0x1ae000),
+ [157] = SDC_PINGROUP(sdc1_rclk, 0x1a1000, 0, 0),
+ [158] = SDC_PINGROUP(sdc1_clk, 0x1a0000, 13, 6),
+ [159] = SDC_PINGROUP(sdc1_cmd, 0x1a0000, 11, 3),
+ [160] = SDC_PINGROUP(sdc1_data, 0x1a0000, 9, 0),
+ [161] = SDC_PINGROUP(sdc2_clk, 0x1a2000, 14, 6),
+ [162] = SDC_PINGROUP(sdc2_cmd, 0x1a2000, 11, 3),
+ [163] = SDC_PINGROUP(sdc2_data, 0x1a2000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map sm6375_mpm_map[] = {
+ { 0, 84 }, { 3, 6 }, { 4, 7 }, { 7, 8 }, { 8, 9 }, { 9, 10 }, { 11, 11 }, { 12, 13 },
+ { 13, 14 }, { 16, 16 }, { 17, 17 }, { 18, 18 }, { 19, 19 }, { 21, 20 }, { 22, 21 },
+ { 23, 23 }, { 24, 24 }, { 25, 25 }, { 27, 26 }, { 28, 27 }, { 37, 28 }, { 38, 29 },
+ { 48, 30 }, { 50, 31 }, { 51, 32 }, { 52, 33 }, { 57, 34 }, { 59, 35 }, { 60, 37 },
+ { 61, 38 }, { 62, 39 }, { 64, 40 }, { 66, 41 }, { 67, 42 }, { 68, 43 }, { 69, 44 },
+ { 78, 45 }, { 82, 36 }, { 83, 47 }, { 84, 48 }, { 85, 49 }, { 87, 50 }, { 88, 51 },
+ { 91, 52 }, { 94, 53 }, { 95, 54 }, { 96, 55 }, { 97, 56 }, { 98, 57 }, { 99, 58 },
+ { 100, 59 }, { 104, 60 }, { 107, 61 }, { 118, 62 }, { 124, 63 }, { 125, 64 }, { 126, 65 },
+ { 128, 66 }, { 129, 67 }, { 131, 69 }, { 133, 70 }, { 134, 71 }, { 136, 73 }, { 142, 74 },
+ { 150, 75 }, { 153, 76 }, { 155, 77 },
+};
+
+static const struct msm_pinctrl_soc_data sm6375_tlmm = {
+ .pins = sm6375_pins,
+ .npins = ARRAY_SIZE(sm6375_pins),
+ .functions = sm6375_functions,
+ .nfunctions = ARRAY_SIZE(sm6375_functions),
+ .groups = sm6375_groups,
+ .ngroups = ARRAY_SIZE(sm6375_groups),
+ .ngpios = 157,
+ .wakeirq_map = sm6375_mpm_map,
+ .nwakeirq_map = ARRAY_SIZE(sm6375_mpm_map),
+};
+
+static int sm6375_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sm6375_tlmm);
+}
+
+static const struct of_device_id sm6375_tlmm_of_match[] = {
+ { .compatible = "qcom,sm6375-tlmm", },
+ { },
+};
+
+static struct platform_driver sm6375_tlmm_driver = {
+ .driver = {
+ .name = "sm6375-tlmm",
+ .of_match_table = sm6375_tlmm_of_match,
+ },
+ .probe = sm6375_tlmm_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sm6375_tlmm_init(void)
+{
+ return platform_driver_register(&sm6375_tlmm_driver);
+}
+arch_initcall(sm6375_tlmm_init);
+
+static void __exit sm6375_tlmm_exit(void)
+{
+ platform_driver_unregister(&sm6375_tlmm_driver);
+}
+module_exit(sm6375_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI SM6375 TLMM driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, sm6375_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
index af144e724bd9..3bd7f9fedcc3 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -1316,7 +1316,7 @@ static const struct msm_pingroup sm8250_groups[] = {
static const struct msm_gpio_wakeirq_map sm8250_pdc_map[] = {
{ 0, 79 }, { 1, 84 }, { 2, 80 }, { 3, 82 }, { 4, 107 }, { 7, 43 },
{ 11, 42 }, { 14, 44 }, { 15, 52 }, { 19, 67 }, { 23, 68 }, { 24, 105 },
- { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 37 },
+ { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 73 },
{ 40, 108 }, { 43, 71 }, { 45, 72 }, { 47, 83 }, { 51, 74 }, { 55, 77 },
{ 59, 78 }, { 63, 75 }, { 64, 81 }, { 65, 87 }, { 66, 88 }, { 67, 89 },
{ 68, 54 }, { 70, 85 }, { 77, 46 }, { 80, 90 }, { 81, 91 }, { 83, 97 },
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 3be2a08ae3a6..ccaf40a9c0e6 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1159,6 +1159,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
/* pm8150l has 12 GPIOs with holes on 7 */
{ .compatible = "qcom,pm8150l-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pmc8180c-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm8226-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pm8350-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm8350b-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pm8350c-gpio", .data = (void *) 9 },
@@ -1175,6 +1176,8 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pmi8998-gpio", .data = (void *) 14 },
{ .compatible = "qcom,pmk8350-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pmm8155au-gpio", .data = (void *) 10 },
+ /* pmp8074 has 12 GPIOs with holes on 1 and 12 */
+ { .compatible = "qcom,pmp8074-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pmr735a-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pmr735b-gpio", .data = (void *) 4 },
/* pms405 has 12 GPIOs with holes on 1, 9, and 10 */
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 961007ce7b3a..0903a0a41831 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -38,7 +38,9 @@ config PINCTRL_RENESAS
select PINCTRL_PFC_R8A77995 if ARCH_R8A77995
select PINCTRL_PFC_R8A779A0 if ARCH_R8A779A0
select PINCTRL_PFC_R8A779F0 if ARCH_R8A779F0
+ select PINCTRL_PFC_R8A779G0 if ARCH_R8A779G0
select PINCTRL_RZG2L if ARCH_RZG2L
+ select PINCTRL_RZV2M if ARCH_R9A09G011
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
select PINCTRL_PFC_SH7269 if CPU_SUBTYPE_SH7269
@@ -153,6 +155,10 @@ config PINCTRL_PFC_R8A779A0
bool "pin control support for R-Car V3U" if COMPILE_TEST
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A779G0
+ bool "pin control support for R-Car V4H" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A7740
bool "pin control support for R-Mobile A1" if COMPILE_TEST
select PINCTRL_SH_PFC_GPIO
@@ -237,6 +243,18 @@ config PINCTRL_RZN1
help
This selects pinctrl driver for Renesas RZ/N1 devices.
+config PINCTRL_RZV2M
+ bool "pin control support for RZ/V2M"
+ depends on OF
+ depends on ARCH_R9A09G011 || COMPILE_TEST
+ select GPIOLIB
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ help
+ This selects GPIO and pinctrl driver for Renesas RZ/V2M
+ platforms.
+
config PINCTRL_PFC_SH7203
bool "pin control support for SH7203" if COMPILE_TEST
select PINCTRL_SH_FUNC_GPIO
diff --git a/drivers/pinctrl/renesas/Makefile b/drivers/pinctrl/renesas/Makefile
index 5d936c154a6f..558b30ce0dec 100644
--- a/drivers/pinctrl/renesas/Makefile
+++ b/drivers/pinctrl/renesas/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A77990) += pfc-r8a77990.o
obj-$(CONFIG_PINCTRL_PFC_R8A77995) += pfc-r8a77995.o
obj-$(CONFIG_PINCTRL_PFC_R8A779A0) += pfc-r8a779a0.o
obj-$(CONFIG_PINCTRL_PFC_R8A779F0) += pfc-r8a779f0.o
+obj-$(CONFIG_PINCTRL_PFC_R8A779G0) += pfc-r8a779g0.o
obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
obj-$(CONFIG_PINCTRL_PFC_SH7269) += pfc-sh7269.o
@@ -49,6 +50,7 @@ obj-$(CONFIG_PINCTRL_RZA1) += pinctrl-rza1.o
obj-$(CONFIG_PINCTRL_RZA2) += pinctrl-rza2.o
obj-$(CONFIG_PINCTRL_RZG2L) += pinctrl-rzg2l.o
obj-$(CONFIG_PINCTRL_RZN1) += pinctrl-rzn1.o
+obj-$(CONFIG_PINCTRL_RZV2M) += pinctrl-rzv2m.o
ifeq ($(CONFIG_COMPILE_TEST),y)
CFLAGS_pfc-sh7203.o += -I$(srctree)/arch/sh/include/cpu-sh2a
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index 8c14b2021bf0..c91102d3f1d1 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -644,6 +644,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a779f0_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A779G0
+ {
+ .compatible = "renesas,pfc-r8a779g0",
+ .data = &r8a779g0_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_SH73A0
{
.compatible = "renesas,pfc-sh73a0",
diff --git a/drivers/pinctrl/renesas/pfc-r8a779f0.c b/drivers/pinctrl/renesas/pfc-r8a779f0.c
index aaca4ee2af55..417c357f16b1 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779f0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779f0.c
@@ -1902,7 +1902,6 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
enum ioctrl_regs {
POC0,
POC1,
- POC2,
POC3,
TD0SEL1,
};
@@ -1910,7 +1909,6 @@ enum ioctrl_regs {
static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
[POC0] = { 0xe60500a0, },
[POC1] = { 0xe60508a0, },
- [POC2] = { 0xe60510a0, },
[POC3] = { 0xe60518a0, },
[TD0SEL1] = { 0xe6050920, },
{ /* sentinel */ },
diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
new file mode 100644
index 000000000000..5dd1c2c7708a
--- /dev/null
+++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
@@ -0,0 +1,4262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A779A0 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ * This file is based on the drivers/pinctrl/renesas/pfc-r8a779a0.c
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include "sh_pfc.h"
+
+#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
+
+#define CPU_ALL_GP(fn, sfx) \
+ PORT_GP_CFG_19(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_23(1, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(1, 23, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 24, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 25, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 26, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 27, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 28, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_20(2, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_13(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 16, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 17, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 18, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 20, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 21, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 22, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 23, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 24, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 25, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 26, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 27, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 28, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 29, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_25(4, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(5, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(6, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(7, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_14(8, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33)
+
+/* GPSR0 */
+#define GPSR0_18 F_(MSIOF2_RXD, IP2SR0_11_8)
+#define GPSR0_17 F_(MSIOF2_SCK, IP2SR0_7_4)
+#define GPSR0_16 F_(MSIOF2_TXD, IP2SR0_3_0)
+#define GPSR0_15 F_(MSIOF2_SYNC, IP1SR0_31_28)
+#define GPSR0_14 F_(MSIOF2_SS1, IP1SR0_27_24)
+#define GPSR0_13 F_(MSIOF2_SS2, IP1SR0_23_20)
+#define GPSR0_12 F_(MSIOF5_RXD, IP1SR0_19_16)
+#define GPSR0_11 F_(MSIOF5_SCK, IP1SR0_15_12)
+#define GPSR0_10 F_(MSIOF5_TXD, IP1SR0_11_8)
+#define GPSR0_9 F_(MSIOF5_SYNC, IP1SR0_7_4)
+#define GPSR0_8 F_(MSIOF5_SS1, IP1SR0_3_0)
+#define GPSR0_7 F_(MSIOF5_SS2, IP0SR0_31_28)
+#define GPSR0_6 F_(IRQ0, IP0SR0_27_24)
+#define GPSR0_5 F_(IRQ1, IP0SR0_23_20)
+#define GPSR0_4 F_(IRQ2, IP0SR0_19_16)
+#define GPSR0_3 F_(IRQ3, IP0SR0_15_12)
+#define GPSR0_2 F_(GP0_02, IP0SR0_11_8)
+#define GPSR0_1 F_(GP0_01, IP0SR0_7_4)
+#define GPSR0_0 F_(GP0_00, IP0SR0_3_0)
+
+/* GPSR1 */
+#define GPSR1_28 F_(HTX3, IP3SR1_19_16)
+#define GPSR1_27 F_(HCTS3_N, IP3SR1_15_12)
+#define GPSR1_26 F_(HRTS3_N, IP3SR1_11_8)
+#define GPSR1_25 F_(HSCK3, IP3SR1_7_4)
+#define GPSR1_24 F_(HRX3, IP3SR1_3_0)
+#define GPSR1_23 F_(GP1_23, IP2SR1_31_28)
+#define GPSR1_22 F_(AUDIO_CLKIN, IP2SR1_27_24)
+#define GPSR1_21 F_(AUDIO_CLKOUT, IP2SR1_23_20)
+#define GPSR1_20 F_(SSI_SD, IP2SR1_19_16)
+#define GPSR1_19 F_(SSI_WS, IP2SR1_15_12)
+#define GPSR1_18 F_(SSI_SCK, IP2SR1_11_8)
+#define GPSR1_17 F_(SCIF_CLK, IP2SR1_7_4)
+#define GPSR1_16 F_(HRX0, IP2SR1_3_0)
+#define GPSR1_15 F_(HSCK0, IP1SR1_31_28)
+#define GPSR1_14 F_(HRTS0_N, IP1SR1_27_24)
+#define GPSR1_13 F_(HCTS0_N, IP1SR1_23_20)
+#define GPSR1_12 F_(HTX0, IP1SR1_19_16)
+#define GPSR1_11 F_(MSIOF0_RXD, IP1SR1_15_12)
+#define GPSR1_10 F_(MSIOF0_SCK, IP1SR1_11_8)
+#define GPSR1_9 F_(MSIOF0_TXD, IP1SR1_7_4)
+#define GPSR1_8 F_(MSIOF0_SYNC, IP1SR1_3_0)
+#define GPSR1_7 F_(MSIOF0_SS1, IP0SR1_31_28)
+#define GPSR1_6 F_(MSIOF0_SS2, IP0SR1_27_24)
+#define GPSR1_5 F_(MSIOF1_RXD, IP0SR1_23_20)
+#define GPSR1_4 F_(MSIOF1_TXD, IP0SR1_19_16)
+#define GPSR1_3 F_(MSIOF1_SCK, IP0SR1_15_12)
+#define GPSR1_2 F_(MSIOF1_SYNC, IP0SR1_11_8)
+#define GPSR1_1 F_(MSIOF1_SS1, IP0SR1_7_4)
+#define GPSR1_0 F_(MSIOF1_SS2, IP0SR1_3_0)
+
+/* GPSR2 */
+#define GPSR2_19 F_(CANFD7_RX, IP2SR2_15_12)
+#define GPSR2_18 F_(CANFD7_TX, IP2SR2_11_8)
+#define GPSR2_17 F_(CANFD4_RX, IP2SR2_7_4)
+#define GPSR2_16 F_(CANFD4_TX, IP2SR2_3_0)
+#define GPSR2_15 F_(CANFD3_RX, IP1SR2_31_28)
+#define GPSR2_14 F_(CANFD3_TX, IP1SR2_27_24)
+#define GPSR2_13 F_(CANFD2_RX, IP1SR2_23_20)
+#define GPSR2_12 F_(CANFD2_TX, IP1SR2_19_16)
+#define GPSR2_11 F_(CANFD0_RX, IP1SR2_15_12)
+#define GPSR2_10 F_(CANFD0_TX, IP1SR2_11_8)
+#define GPSR2_9 F_(CAN_CLK, IP1SR2_7_4)
+#define GPSR2_8 F_(TPU0TO0, IP1SR2_3_0)
+#define GPSR2_7 F_(TPU0TO1, IP0SR2_31_28)
+#define GPSR2_6 F_(FXR_TXDB, IP0SR2_27_24)
+#define GPSR2_5 F_(FXR_TXENB_N, IP0SR2_23_20)
+#define GPSR2_4 F_(RXDB_EXTFXR, IP0SR2_19_16)
+#define GPSR2_3 F_(CLK_EXTFXR, IP0SR2_15_12)
+#define GPSR2_2 F_(RXDA_EXTFXR, IP0SR2_11_8)
+#define GPSR2_1 F_(FXR_TXENA_N, IP0SR2_7_4)
+#define GPSR2_0 F_(FXR_TXDA, IP0SR2_3_0)
+
+/* GPSR3 */
+#define GPSR3_29 F_(RPC_INT_N, IP3SR3_23_20)
+#define GPSR3_28 F_(RPC_WP_N, IP3SR3_19_16)
+#define GPSR3_27 F_(RPC_RESET_N, IP3SR3_15_12)
+#define GPSR3_26 F_(QSPI1_IO3, IP3SR3_11_8)
+#define GPSR3_25 F_(QSPI1_SSL, IP3SR3_7_4)
+#define GPSR3_24 F_(QSPI1_IO2, IP3SR3_3_0)
+#define GPSR3_23 F_(QSPI1_MISO_IO1, IP2SR3_31_28)
+#define GPSR3_22 F_(QSPI1_SPCLK, IP2SR3_27_24)
+#define GPSR3_21 F_(QSPI1_MOSI_IO0, IP2SR3_23_20)
+#define GPSR3_20 F_(QSPI0_SPCLK, IP2SR3_19_16)
+#define GPSR3_19 F_(QSPI0_MOSI_IO0, IP2SR3_15_12)
+#define GPSR3_18 F_(QSPI0_MISO_IO1, IP2SR3_11_8)
+#define GPSR3_17 F_(QSPI0_IO2, IP2SR3_7_4)
+#define GPSR3_16 F_(QSPI0_IO3, IP2SR3_3_0)
+#define GPSR3_15 F_(QSPI0_SSL, IP1SR3_31_28)
+#define GPSR3_14 F_(IPC_CLKOUT, IP1SR3_27_24)
+#define GPSR3_13 F_(IPC_CLKIN, IP1SR3_23_20)
+#define GPSR3_12 F_(SD_WP, IP1SR3_19_16)
+#define GPSR3_11 F_(SD_CD, IP1SR3_15_12)
+#define GPSR3_10 F_(MMC_SD_CMD, IP1SR3_11_8)
+#define GPSR3_9 F_(MMC_D6, IP1SR3_7_4)
+#define GPSR3_8 F_(MMC_D7, IP1SR3_3_0)
+#define GPSR3_7 F_(MMC_D4, IP0SR3_31_28)
+#define GPSR3_6 F_(MMC_D5, IP0SR3_27_24)
+#define GPSR3_5 F_(MMC_SD_D3, IP0SR3_23_20)
+#define GPSR3_4 F_(MMC_DS, IP0SR3_19_16)
+#define GPSR3_3 F_(MMC_SD_CLK, IP0SR3_15_12)
+#define GPSR3_2 F_(MMC_SD_D2, IP0SR3_11_8)
+#define GPSR3_1 F_(MMC_SD_D0, IP0SR3_7_4)
+#define GPSR3_0 F_(MMC_SD_D1, IP0SR3_3_0)
+
+/* GPSR4 */
+#define GPSR4_24 FM(AVS1)
+#define GPSR4_23 FM(AVS0)
+#define GPSR4_22 FM(PCIE1_CLKREQ_N)
+#define GPSR4_21 FM(PCIE0_CLKREQ_N)
+#define GPSR4_20 FM(TSN0_TXCREFCLK)
+#define GPSR4_19 FM(TSN0_TD2)
+#define GPSR4_18 FM(TSN0_TD3)
+#define GPSR4_17 FM(TSN0_RD2)
+#define GPSR4_16 FM(TSN0_RD3)
+#define GPSR4_15 FM(TSN0_TD0)
+#define GPSR4_14 FM(TSN0_TD1)
+#define GPSR4_13 FM(TSN0_RD1)
+#define GPSR4_12 FM(TSN0_TXC)
+#define GPSR4_11 FM(TSN0_RXC)
+#define GPSR4_10 FM(TSN0_RD0)
+#define GPSR4_9 FM(TSN0_TX_CTL)
+#define GPSR4_8 FM(TSN0_AVTP_PPS0)
+#define GPSR4_7 FM(TSN0_RX_CTL)
+#define GPSR4_6 FM(TSN0_AVTP_CAPTURE)
+#define GPSR4_5 FM(TSN0_AVTP_MATCH)
+#define GPSR4_4 FM(TSN0_LINK)
+#define GPSR4_3 FM(TSN0_PHY_INT)
+#define GPSR4_2 FM(TSN0_AVTP_PPS1)
+#define GPSR4_1 FM(TSN0_MDC)
+#define GPSR4_0 FM(TSN0_MDIO)
+
+/* GPSR 5 */
+#define GPSR5_20 FM(AVB2_RX_CTL)
+#define GPSR5_19 FM(AVB2_TX_CTL)
+#define GPSR5_18 FM(AVB2_RXC)
+#define GPSR5_17 FM(AVB2_RD0)
+#define GPSR5_16 FM(AVB2_TXC)
+#define GPSR5_15 FM(AVB2_TD0)
+#define GPSR5_14 FM(AVB2_RD1)
+#define GPSR5_13 FM(AVB2_RD2)
+#define GPSR5_12 FM(AVB2_TD1)
+#define GPSR5_11 FM(AVB2_TD2)
+#define GPSR5_10 FM(AVB2_MDIO)
+#define GPSR5_9 FM(AVB2_RD3)
+#define GPSR5_8 FM(AVB2_TD3)
+#define GPSR5_7 FM(AVB2_TXCREFCLK)
+#define GPSR5_6 FM(AVB2_MDC)
+#define GPSR5_5 FM(AVB2_MAGIC)
+#define GPSR5_4 FM(AVB2_PHY_INT)
+#define GPSR5_3 FM(AVB2_LINK)
+#define GPSR5_2 FM(AVB2_AVTP_MATCH)
+#define GPSR5_1 FM(AVB2_AVTP_CAPTURE)
+#define GPSR5_0 FM(AVB2_AVTP_PPS)
+
+/* GPSR 6 */
+#define GPSR6_20 F_(AVB1_TXCREFCLK, IP2SR6_19_16)
+#define GPSR6_19 F_(AVB1_RD3, IP2SR6_15_12)
+#define GPSR6_18 F_(AVB1_TD3, IP2SR6_11_8)
+#define GPSR6_17 F_(AVB1_RD2, IP2SR6_7_4)
+#define GPSR6_16 F_(AVB1_TD2, IP2SR6_3_0)
+#define GPSR6_15 F_(AVB1_RD0, IP1SR6_31_28)
+#define GPSR6_14 F_(AVB1_RD1, IP1SR6_27_24)
+#define GPSR6_13 F_(AVB1_TD0, IP1SR6_23_20)
+#define GPSR6_12 F_(AVB1_TD1, IP1SR6_19_16)
+#define GPSR6_11 F_(AVB1_AVTP_CAPTURE, IP1SR6_15_12)
+#define GPSR6_10 F_(AVB1_AVTP_PPS, IP1SR6_11_8)
+#define GPSR6_9 F_(AVB1_RX_CTL, IP1SR6_7_4)
+#define GPSR6_8 F_(AVB1_RXC, IP1SR6_3_0)
+#define GPSR6_7 F_(AVB1_TX_CTL, IP0SR6_31_28)
+#define GPSR6_6 F_(AVB1_TXC, IP0SR6_27_24)
+#define GPSR6_5 F_(AVB1_AVTP_MATCH, IP0SR6_23_20)
+#define GPSR6_4 F_(AVB1_LINK, IP0SR6_19_16)
+#define GPSR6_3 F_(AVB1_PHY_INT, IP0SR6_15_12)
+#define GPSR6_2 F_(AVB1_MDC, IP0SR6_11_8)
+#define GPSR6_1 F_(AVB1_MAGIC, IP0SR6_7_4)
+#define GPSR6_0 F_(AVB1_MDIO, IP0SR6_3_0)
+
+/* GPSR7 */
+#define GPSR7_20 F_(AVB0_RX_CTL, IP2SR7_19_16)
+#define GPSR7_19 F_(AVB0_RXC, IP2SR7_15_12)
+#define GPSR7_18 F_(AVB0_RD0, IP2SR7_11_8)
+#define GPSR7_17 F_(AVB0_RD1, IP2SR7_7_4)
+#define GPSR7_16 F_(AVB0_TX_CTL, IP2SR7_3_0)
+#define GPSR7_15 F_(AVB0_TXC, IP1SR7_31_28)
+#define GPSR7_14 F_(AVB0_MDIO, IP1SR7_27_24)
+#define GPSR7_13 F_(AVB0_MDC, IP1SR7_23_20)
+#define GPSR7_12 F_(AVB0_RD2, IP1SR7_19_16)
+#define GPSR7_11 F_(AVB0_TD0, IP1SR7_15_12)
+#define GPSR7_10 F_(AVB0_MAGIC, IP1SR7_11_8)
+#define GPSR7_9 F_(AVB0_TXCREFCLK, IP1SR7_7_4)
+#define GPSR7_8 F_(AVB0_RD3, IP1SR7_3_0)
+#define GPSR7_7 F_(AVB0_TD1, IP0SR7_31_28)
+#define GPSR7_6 F_(AVB0_TD2, IP0SR7_27_24)
+#define GPSR7_5 F_(AVB0_PHY_INT, IP0SR7_23_20)
+#define GPSR7_4 F_(AVB0_LINK, IP0SR7_19_16)
+#define GPSR7_3 F_(AVB0_TD3, IP0SR7_15_12)
+#define GPSR7_2 F_(AVB0_AVTP_MATCH, IP0SR7_11_8)
+#define GPSR7_1 F_(AVB0_AVTP_CAPTURE, IP0SR7_7_4)
+#define GPSR7_0 F_(AVB0_AVTP_PPS, IP0SR7_3_0)
+
+/* GPSR8 */
+#define GPSR8_13 F_(GP8_13, IP1SR8_23_20)
+#define GPSR8_12 F_(GP8_12, IP1SR8_19_16)
+#define GPSR8_11 F_(SDA5, IP1SR8_15_12)
+#define GPSR8_10 F_(SCL5, IP1SR8_11_8)
+#define GPSR8_9 F_(SDA4, IP1SR8_7_4)
+#define GPSR8_8 F_(SCL4, IP1SR8_3_0)
+#define GPSR8_7 F_(SDA3, IP0SR8_31_28)
+#define GPSR8_6 F_(SCL3, IP0SR8_27_24)
+#define GPSR8_5 F_(SDA2, IP0SR8_23_20)
+#define GPSR8_4 F_(SCL2, IP0SR8_19_16)
+#define GPSR8_3 F_(SDA1, IP0SR8_15_12)
+#define GPSR8_2 F_(SCL1, IP0SR8_11_8)
+#define GPSR8_1 F_(SDA0, IP0SR8_7_4)
+#define GPSR8_0 F_(SCL0, IP0SR8_3_0)
+
+/* SR0 */
+/* IP0SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_B) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR0_3_0 FM(MSIOF5_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_7_4 FM(MSIOF5_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_11_8 FM(MSIOF5_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_15_12 FM(MSIOF5_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_19_16 FM(MSIOF5_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1) FM(IRQ2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1) FM(TX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1) FM(RX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N) FM(CTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N) FM(RTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1) FM(SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR1 */
+/* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_A) FM(TX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_A) FM(RX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_A) FM(RTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_A) FM(CTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_A) FM(SCK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_23_20 FM(MSIOF1_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_X) FM(TX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_X) FM(RX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_X) FM(CTS1_N_X) FM(CANFD5_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_X) FM(RTS1_N_X) FM(CANFD5_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_X) FM(SCK1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_15_12 FM(MSIOF0_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_19_16 FM(HTX0) FM(TX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) FM(PWM8_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM9_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR1_3_0 FM(HRX0) FM(RX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_7_4 FM(SCIF_CLK) FM(IRQ4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_27_24 FM(AUDIO_CLKIN) FM(PWM3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_31_28 F_(0, 0) FM(TCLK2) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR1_3_0 FM(HRX3) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_7_4 FM(HSCK3) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_11_8 FM(HRTS3_N) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_15_12 FM(HCTS3_N) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_19_16 FM(HTX3) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR2 */
+/* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR2_3_0 FM(FXR_TXDA) FM(CANFD1_TX) FM(TPU0TO2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_7_4 FM(FXR_TXENA_N) FM(CANFD1_RX) FM(TPU0TO3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_11_8 FM(RXDA_EXTFXR) FM(CANFD5_TX) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_15_12 FM(CLK_EXTFXR) FM(CANFD5_RX) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_19_16 FM(RXDB_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_23_20 FM(FXR_TXENB_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_27_24 FM(FXR_TXDB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_31_28 FM(TPU0TO1) FM(CANFD6_TX) F_(0, 0) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR2_3_0 FM(TPU0TO0) FM(CANFD6_RX) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_15_12 FM(CANFD0_RX) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2) F_(0, 0) FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3) FM(PWM1_B) FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_31_28 FM(CANFD3_RX) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR2_3_0 FM(CANFD4_TX) F_(0, 0) FM(PWM4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_7_4 FM(CANFD4_RX) F_(0, 0) FM(PWM5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_11_8 FM(CANFD7_TX) F_(0, 0) FM(PWM6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_15_12 FM(CANFD7_RX) F_(0, 0) FM(PWM7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR3 */
+/* IP0SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR3_3_0 FM(MMC_SD_D1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_7_4 FM(MMC_SD_D0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_11_8 FM(MMC_SD_D2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_15_12 FM(MMC_SD_CLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_19_16 FM(MMC_DS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_23_20 FM(MMC_SD_D3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_27_24 FM(MMC_D5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_31_28 FM(MMC_D4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR3_3_0 FM(MMC_D7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_7_4 FM(MMC_D6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_11_8 FM(MMC_SD_CMD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_15_12 FM(SD_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_19_16 FM(SD_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_23_20 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) FM(PWM1_A) FM(TCLK3_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_27_24 FM(IPC_CLKOUT) FM(IPC_CLKEN_OUT) FM(ERROROUTC_A) FM(TCLK4_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_31_28 FM(QSPI0_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR3_3_0 FM(QSPI0_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_7_4 FM(QSPI0_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_11_8 FM(QSPI0_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_15_12 FM(QSPI0_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_19_16 FM(QSPI0_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_23_20 FM(QSPI1_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_27_24 FM(QSPI1_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_31_28 FM(QSPI1_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR3_3_0 FM(QSPI1_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_7_4 FM(QSPI1_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_11_8 FM(QSPI1_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_15_12 FM(RPC_RESET_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_19_16 FM(RPC_WP_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_23_20 FM(RPC_INT_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR6 */
+/* IP0SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR6_3_0 FM(AVB1_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_7_4 FM(AVB1_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_11_8 FM(AVB1_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_15_12 FM(AVB1_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_19_16 FM(AVB1_LINK) FM(AVB1_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_23_20 FM(AVB1_AVTP_MATCH) FM(AVB1_MII_RX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_27_24 FM(AVB1_TXC) FM(AVB1_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_31_28 FM(AVB1_TX_CTL) FM(AVB1_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR6_3_0 FM(AVB1_RXC) FM(AVB1_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_7_4 FM(AVB1_RX_CTL) FM(AVB1_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_11_8 FM(AVB1_AVTP_PPS) FM(AVB1_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_15_12 FM(AVB1_AVTP_CAPTURE) FM(AVB1_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_19_16 FM(AVB1_TD1) FM(AVB1_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_23_20 FM(AVB1_TD0) FM(AVB1_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_27_24 FM(AVB1_RD1) FM(AVB1_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_31_28 FM(AVB1_RD0) FM(AVB1_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR6_3_0 FM(AVB1_TD2) FM(AVB1_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_7_4 FM(AVB1_RD2) FM(AVB1_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_11_8 FM(AVB1_TD3) FM(AVB1_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_15_12 FM(AVB1_RD3) FM(AVB1_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_19_16 FM(AVB1_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR7 */
+/* IP0SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR7_3_0 FM(AVB0_AVTP_PPS) FM(AVB0_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_7_4 FM(AVB0_AVTP_CAPTURE) FM(AVB0_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_11_8 FM(AVB0_AVTP_MATCH) FM(AVB0_MII_RX_ER) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_15_12 FM(AVB0_TD3) FM(AVB0_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_19_16 FM(AVB0_LINK) FM(AVB0_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_23_20 FM(AVB0_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_27_24 FM(AVB0_TD2) FM(AVB0_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_31_28 FM(AVB0_TD1) FM(AVB0_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR7_3_0 FM(AVB0_RD3) FM(AVB0_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_7_4 FM(AVB0_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_11_8 FM(AVB0_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_15_12 FM(AVB0_TD0) FM(AVB0_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_19_16 FM(AVB0_RD2) FM(AVB0_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_23_20 FM(AVB0_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_27_24 FM(AVB0_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_31_28 FM(AVB0_TXC) FM(AVB0_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR7_3_0 FM(AVB0_TX_CTL) FM(AVB0_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_7_4 FM(AVB0_RD1) FM(AVB0_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_11_8 FM(AVB0_RD0) FM(AVB0_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_15_12 FM(AVB0_RXC) FM(AVB0_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_19_16 FM(AVB0_RX_CTL) FM(AVB0_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR8 */
+/* IP0SR8 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR8_3_0 FM(SCL0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_7_4 FM(SDA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_11_8 FM(SCL1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_15_12 FM(SDA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_19_16 FM(SCL2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_23_20 FM(SDA2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_27_24 FM(SCL3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_31_28 FM(SDA3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR8 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR8_3_0 FM(SCL4) FM(HRX2) FM(SCK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_7_4 FM(SDA4) FM(HTX2) FM(CTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_11_8 FM(SCL5) FM(HRTS2_N) FM(RTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_15_12 FM(SDA5) FM(SCIF_CLK2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_19_16 F_(0, 0) FM(HCTS2_N) FM(TX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_23_20 F_(0, 0) FM(HSCK2) FM(RX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+#define PINMUX_GPSR \
+ GPSR3_29 \
+ GPSR1_28 GPSR3_28 \
+ GPSR1_27 GPSR3_27 \
+ GPSR1_26 GPSR3_26 \
+ GPSR1_25 GPSR3_25 \
+ GPSR1_24 GPSR3_24 GPSR4_24 \
+ GPSR1_23 GPSR3_23 GPSR4_23 \
+ GPSR1_22 GPSR3_22 GPSR4_22 \
+ GPSR1_21 GPSR3_21 GPSR4_21 \
+ GPSR1_20 GPSR3_20 GPSR4_20 GPSR5_20 GPSR6_20 GPSR7_20 \
+ GPSR1_19 GPSR2_19 GPSR3_19 GPSR4_19 GPSR5_19 GPSR6_19 GPSR7_19 \
+GPSR0_18 GPSR1_18 GPSR2_18 GPSR3_18 GPSR4_18 GPSR5_18 GPSR6_18 GPSR7_18 \
+GPSR0_17 GPSR1_17 GPSR2_17 GPSR3_17 GPSR4_17 GPSR5_17 GPSR6_17 GPSR7_17 \
+GPSR0_16 GPSR1_16 GPSR2_16 GPSR3_16 GPSR4_16 GPSR5_16 GPSR6_16 GPSR7_16 \
+GPSR0_15 GPSR1_15 GPSR2_15 GPSR3_15 GPSR4_15 GPSR5_15 GPSR6_15 GPSR7_15 \
+GPSR0_14 GPSR1_14 GPSR2_14 GPSR3_14 GPSR4_14 GPSR5_14 GPSR6_14 GPSR7_14 \
+GPSR0_13 GPSR1_13 GPSR2_13 GPSR3_13 GPSR4_13 GPSR5_13 GPSR6_13 GPSR7_13 GPSR8_13 \
+GPSR0_12 GPSR1_12 GPSR2_12 GPSR3_12 GPSR4_12 GPSR5_12 GPSR6_12 GPSR7_12 GPSR8_12 \
+GPSR0_11 GPSR1_11 GPSR2_11 GPSR3_11 GPSR4_11 GPSR5_11 GPSR6_11 GPSR7_11 GPSR8_11 \
+GPSR0_10 GPSR1_10 GPSR2_10 GPSR3_10 GPSR4_10 GPSR5_10 GPSR6_10 GPSR7_10 GPSR8_10 \
+GPSR0_9 GPSR1_9 GPSR2_9 GPSR3_9 GPSR4_9 GPSR5_9 GPSR6_9 GPSR7_9 GPSR8_9 \
+GPSR0_8 GPSR1_8 GPSR2_8 GPSR3_8 GPSR4_8 GPSR5_8 GPSR6_8 GPSR7_8 GPSR8_8 \
+GPSR0_7 GPSR1_7 GPSR2_7 GPSR3_7 GPSR4_7 GPSR5_7 GPSR6_7 GPSR7_7 GPSR8_7 \
+GPSR0_6 GPSR1_6 GPSR2_6 GPSR3_6 GPSR4_6 GPSR5_6 GPSR6_6 GPSR7_6 GPSR8_6 \
+GPSR0_5 GPSR1_5 GPSR2_5 GPSR3_5 GPSR4_5 GPSR5_5 GPSR6_5 GPSR7_5 GPSR8_5 \
+GPSR0_4 GPSR1_4 GPSR2_4 GPSR3_4 GPSR4_4 GPSR5_4 GPSR6_4 GPSR7_4 GPSR8_4 \
+GPSR0_3 GPSR1_3 GPSR2_3 GPSR3_3 GPSR4_3 GPSR5_3 GPSR6_3 GPSR7_3 GPSR8_3 \
+GPSR0_2 GPSR1_2 GPSR2_2 GPSR3_2 GPSR4_2 GPSR5_2 GPSR6_2 GPSR7_2 GPSR8_2 \
+GPSR0_1 GPSR1_1 GPSR2_1 GPSR3_1 GPSR4_1 GPSR5_1 GPSR6_1 GPSR7_1 GPSR8_1 \
+GPSR0_0 GPSR1_0 GPSR2_0 GPSR3_0 GPSR4_0 GPSR5_0 GPSR6_0 GPSR7_0 GPSR8_0
+
+#define PINMUX_IPSR \
+\
+FM(IP0SR0_3_0) IP0SR0_3_0 FM(IP1SR0_3_0) IP1SR0_3_0 FM(IP2SR0_3_0) IP2SR0_3_0 \
+FM(IP0SR0_7_4) IP0SR0_7_4 FM(IP1SR0_7_4) IP1SR0_7_4 FM(IP2SR0_7_4) IP2SR0_7_4 \
+FM(IP0SR0_11_8) IP0SR0_11_8 FM(IP1SR0_11_8) IP1SR0_11_8 FM(IP2SR0_11_8) IP2SR0_11_8 \
+FM(IP0SR0_15_12) IP0SR0_15_12 FM(IP1SR0_15_12) IP1SR0_15_12 \
+FM(IP0SR0_19_16) IP0SR0_19_16 FM(IP1SR0_19_16) IP1SR0_19_16 \
+FM(IP0SR0_23_20) IP0SR0_23_20 FM(IP1SR0_23_20) IP1SR0_23_20 \
+FM(IP0SR0_27_24) IP0SR0_27_24 FM(IP1SR0_27_24) IP1SR0_27_24 \
+FM(IP0SR0_31_28) IP0SR0_31_28 FM(IP1SR0_31_28) IP1SR0_31_28 \
+\
+FM(IP0SR1_3_0) IP0SR1_3_0 FM(IP1SR1_3_0) IP1SR1_3_0 FM(IP2SR1_3_0) IP2SR1_3_0 FM(IP3SR1_3_0) IP3SR1_3_0 \
+FM(IP0SR1_7_4) IP0SR1_7_4 FM(IP1SR1_7_4) IP1SR1_7_4 FM(IP2SR1_7_4) IP2SR1_7_4 FM(IP3SR1_7_4) IP3SR1_7_4 \
+FM(IP0SR1_11_8) IP0SR1_11_8 FM(IP1SR1_11_8) IP1SR1_11_8 FM(IP2SR1_11_8) IP2SR1_11_8 FM(IP3SR1_11_8) IP3SR1_11_8 \
+FM(IP0SR1_15_12) IP0SR1_15_12 FM(IP1SR1_15_12) IP1SR1_15_12 FM(IP2SR1_15_12) IP2SR1_15_12 FM(IP3SR1_15_12) IP3SR1_15_12 \
+FM(IP0SR1_19_16) IP0SR1_19_16 FM(IP1SR1_19_16) IP1SR1_19_16 FM(IP2SR1_19_16) IP2SR1_19_16 FM(IP3SR1_19_16) IP3SR1_19_16 \
+FM(IP0SR1_23_20) IP0SR1_23_20 FM(IP1SR1_23_20) IP1SR1_23_20 FM(IP2SR1_23_20) IP2SR1_23_20 \
+FM(IP0SR1_27_24) IP0SR1_27_24 FM(IP1SR1_27_24) IP1SR1_27_24 FM(IP2SR1_27_24) IP2SR1_27_24 \
+FM(IP0SR1_31_28) IP0SR1_31_28 FM(IP1SR1_31_28) IP1SR1_31_28 FM(IP2SR1_31_28) IP2SR1_31_28 \
+\
+FM(IP0SR2_3_0) IP0SR2_3_0 FM(IP1SR2_3_0) IP1SR2_3_0 FM(IP2SR2_3_0) IP2SR2_3_0 \
+FM(IP0SR2_7_4) IP0SR2_7_4 FM(IP1SR2_7_4) IP1SR2_7_4 FM(IP2SR2_7_4) IP2SR2_7_4 \
+FM(IP0SR2_11_8) IP0SR2_11_8 FM(IP1SR2_11_8) IP1SR2_11_8 FM(IP2SR2_11_8) IP2SR2_11_8 \
+FM(IP0SR2_15_12) IP0SR2_15_12 FM(IP1SR2_15_12) IP1SR2_15_12 FM(IP2SR2_15_12) IP2SR2_15_12 \
+FM(IP0SR2_19_16) IP0SR2_19_16 FM(IP1SR2_19_16) IP1SR2_19_16 \
+FM(IP0SR2_23_20) IP0SR2_23_20 FM(IP1SR2_23_20) IP1SR2_23_20 \
+FM(IP0SR2_27_24) IP0SR2_27_24 FM(IP1SR2_27_24) IP1SR2_27_24 \
+FM(IP0SR2_31_28) IP0SR2_31_28 FM(IP1SR2_31_28) IP1SR2_31_28 \
+\
+FM(IP0SR3_3_0) IP0SR3_3_0 FM(IP1SR3_3_0) IP1SR3_3_0 FM(IP2SR3_3_0) IP2SR3_3_0 FM(IP3SR3_3_0) IP3SR3_3_0 \
+FM(IP0SR3_7_4) IP0SR3_7_4 FM(IP1SR3_7_4) IP1SR3_7_4 FM(IP2SR3_7_4) IP2SR3_7_4 FM(IP3SR3_7_4) IP3SR3_7_4 \
+FM(IP0SR3_11_8) IP0SR3_11_8 FM(IP1SR3_11_8) IP1SR3_11_8 FM(IP2SR3_11_8) IP2SR3_11_8 FM(IP3SR3_11_8) IP3SR3_11_8 \
+FM(IP0SR3_15_12) IP0SR3_15_12 FM(IP1SR3_15_12) IP1SR3_15_12 FM(IP2SR3_15_12) IP2SR3_15_12 FM(IP3SR3_15_12) IP3SR3_15_12 \
+FM(IP0SR3_19_16) IP0SR3_19_16 FM(IP1SR3_19_16) IP1SR3_19_16 FM(IP2SR3_19_16) IP2SR3_19_16 FM(IP3SR3_19_16) IP3SR3_19_16 \
+FM(IP0SR3_23_20) IP0SR3_23_20 FM(IP1SR3_23_20) IP1SR3_23_20 FM(IP2SR3_23_20) IP2SR3_23_20 FM(IP3SR3_23_20) IP3SR3_23_20 \
+FM(IP0SR3_27_24) IP0SR3_27_24 FM(IP1SR3_27_24) IP1SR3_27_24 FM(IP2SR3_27_24) IP2SR3_27_24 \
+FM(IP0SR3_31_28) IP0SR3_31_28 FM(IP1SR3_31_28) IP1SR3_31_28 FM(IP2SR3_31_28) IP2SR3_31_28 \
+\
+FM(IP0SR6_3_0) IP0SR6_3_0 FM(IP1SR6_3_0) IP1SR6_3_0 FM(IP2SR6_3_0) IP2SR6_3_0 \
+FM(IP0SR6_7_4) IP0SR6_7_4 FM(IP1SR6_7_4) IP1SR6_7_4 FM(IP2SR6_7_4) IP2SR6_7_4 \
+FM(IP0SR6_11_8) IP0SR6_11_8 FM(IP1SR6_11_8) IP1SR6_11_8 FM(IP2SR6_11_8) IP2SR6_11_8 \
+FM(IP0SR6_15_12) IP0SR6_15_12 FM(IP1SR6_15_12) IP1SR6_15_12 FM(IP2SR6_15_12) IP2SR6_15_12 \
+FM(IP0SR6_19_16) IP0SR6_19_16 FM(IP1SR6_19_16) IP1SR6_19_16 FM(IP2SR6_19_16) IP2SR6_19_16 \
+FM(IP0SR6_23_20) IP0SR6_23_20 FM(IP1SR6_23_20) IP1SR6_23_20 \
+FM(IP0SR6_27_24) IP0SR6_27_24 FM(IP1SR6_27_24) IP1SR6_27_24 \
+FM(IP0SR6_31_28) IP0SR6_31_28 FM(IP1SR6_31_28) IP1SR6_31_28 \
+\
+FM(IP0SR7_3_0) IP0SR7_3_0 FM(IP1SR7_3_0) IP1SR7_3_0 FM(IP2SR7_3_0) IP2SR7_3_0 \
+FM(IP0SR7_7_4) IP0SR7_7_4 FM(IP1SR7_7_4) IP1SR7_7_4 FM(IP2SR7_7_4) IP2SR7_7_4 \
+FM(IP0SR7_11_8) IP0SR7_11_8 FM(IP1SR7_11_8) IP1SR7_11_8 FM(IP2SR7_11_8) IP2SR7_11_8 \
+FM(IP0SR7_15_12) IP0SR7_15_12 FM(IP1SR7_15_12) IP1SR7_15_12 FM(IP2SR7_15_12) IP2SR7_15_12 \
+FM(IP0SR7_19_16) IP0SR7_19_16 FM(IP1SR7_19_16) IP1SR7_19_16 FM(IP2SR7_19_16) IP2SR7_19_16 \
+FM(IP0SR7_23_20) IP0SR7_23_20 FM(IP1SR7_23_20) IP1SR7_23_20 \
+FM(IP0SR7_27_24) IP0SR7_27_24 FM(IP1SR7_27_24) IP1SR7_27_24 \
+FM(IP0SR7_31_28) IP0SR7_31_28 FM(IP1SR7_31_28) IP1SR7_31_28 \
+\
+FM(IP0SR8_3_0) IP0SR8_3_0 FM(IP1SR8_3_0) IP1SR8_3_0 \
+FM(IP0SR8_7_4) IP0SR8_7_4 FM(IP1SR8_7_4) IP1SR8_7_4 \
+FM(IP0SR8_11_8) IP0SR8_11_8 FM(IP1SR8_11_8) IP1SR8_11_8 \
+FM(IP0SR8_15_12) IP0SR8_15_12 FM(IP1SR8_15_12) IP1SR8_15_12 \
+FM(IP0SR8_19_16) IP0SR8_19_16 FM(IP1SR8_19_16) IP1SR8_19_16 \
+FM(IP0SR8_23_20) IP0SR8_23_20 FM(IP1SR8_23_20) IP1SR8_23_20 \
+FM(IP0SR8_27_24) IP0SR8_27_24 \
+FM(IP0SR8_31_28) IP0SR8_31_28
+
+/* MOD_SEL4 */ /* 0 */ /* 1 */
+#define MOD_SEL4_19 FM(SEL_TSN0_TD2_0) FM(SEL_TSN0_TD2_1)
+#define MOD_SEL4_18 FM(SEL_TSN0_TD3_0) FM(SEL_TSN0_TD3_1)
+#define MOD_SEL4_15 FM(SEL_TSN0_TD0_0) FM(SEL_TSN0_TD0_1)
+#define MOD_SEL4_14 FM(SEL_TSN0_TD1_0) FM(SEL_TSN0_TD1_1)
+#define MOD_SEL4_12 FM(SEL_TSN0_TXC_0) FM(SEL_TSN0_TXC_1)
+#define MOD_SEL4_9 FM(SEL_TSN0_TX_CTL_0) FM(SEL_TSN0_TX_CTL_1)
+#define MOD_SEL4_8 FM(SEL_TSN0_AVTP_PPS0_0) FM(SEL_TSN0_AVTP_PPS0_1)
+#define MOD_SEL4_5 FM(SEL_TSN0_AVTP_MATCH_0) FM(SEL_TSN0_AVTP_MATCH_1)
+#define MOD_SEL4_2 FM(SEL_TSN0_AVTP_PPS1_0) FM(SEL_TSN0_AVTP_PPS1_1)
+#define MOD_SEL4_1 FM(SEL_TSN0_MDC_0) FM(SEL_TSN0_MDC_1)
+
+/* MOD_SEL5 */ /* 0 */ /* 1 */
+#define MOD_SEL5_19 FM(SEL_AVB2_TX_CTL_0) FM(SEL_AVB2_TX_CTL_1)
+#define MOD_SEL5_16 FM(SEL_AVB2_TXC_0) FM(SEL_AVB2_TXC_1)
+#define MOD_SEL5_15 FM(SEL_AVB2_TD0_0) FM(SEL_AVB2_TD0_1)
+#define MOD_SEL5_12 FM(SEL_AVB2_TD1_0) FM(SEL_AVB2_TD1_1)
+#define MOD_SEL5_11 FM(SEL_AVB2_TD2_0) FM(SEL_AVB2_TD2_1)
+#define MOD_SEL5_8 FM(SEL_AVB2_TD3_0) FM(SEL_AVB2_TD3_1)
+#define MOD_SEL5_6 FM(SEL_AVB2_MDC_0) FM(SEL_AVB2_MDC_1)
+#define MOD_SEL5_5 FM(SEL_AVB2_MAGIC_0) FM(SEL_AVB2_MAGIC_1)
+#define MOD_SEL5_2 FM(SEL_AVB2_AVTP_MATCH_0) FM(SEL_AVB2_AVTP_MATCH_1)
+#define MOD_SEL5_0 FM(SEL_AVB2_AVTP_PPS_0) FM(SEL_AVB2_AVTP_PPS_1)
+
+/* MOD_SEL6 */ /* 0 */ /* 1 */
+#define MOD_SEL6_18 FM(SEL_AVB1_TD3_0) FM(SEL_AVB1_TD3_1)
+#define MOD_SEL6_16 FM(SEL_AVB1_TD2_0) FM(SEL_AVB1_TD2_1)
+#define MOD_SEL6_13 FM(SEL_AVB1_TD0_0) FM(SEL_AVB1_TD0_1)
+#define MOD_SEL6_12 FM(SEL_AVB1_TD1_0) FM(SEL_AVB1_TD1_1)
+#define MOD_SEL6_10 FM(SEL_AVB1_AVTP_PPS_0) FM(SEL_AVB1_AVTP_PPS_1)
+#define MOD_SEL6_7 FM(SEL_AVB1_TX_CTL_0) FM(SEL_AVB1_TX_CTL_1)
+#define MOD_SEL6_6 FM(SEL_AVB1_TXC_0) FM(SEL_AVB1_TXC_1)
+#define MOD_SEL6_5 FM(SEL_AVB1_AVTP_MATCH_0) FM(SEL_AVB1_AVTP_MATCH_1)
+#define MOD_SEL6_2 FM(SEL_AVB1_MDC_0) FM(SEL_AVB1_MDC_1)
+#define MOD_SEL6_1 FM(SEL_AVB1_MAGIC_0) FM(SEL_AVB1_MAGIC_1)
+
+/* MOD_SEL7 */ /* 0 */ /* 1 */
+#define MOD_SEL7_16 FM(SEL_AVB0_TX_CTL_0) FM(SEL_AVB0_TX_CTL_1)
+#define MOD_SEL7_15 FM(SEL_AVB0_TXC_0) FM(SEL_AVB0_TXC_1)
+#define MOD_SEL7_13 FM(SEL_AVB0_MDC_0) FM(SEL_AVB0_MDC_1)
+#define MOD_SEL7_11 FM(SEL_AVB0_TD0_0) FM(SEL_AVB0_TD0_1)
+#define MOD_SEL7_10 FM(SEL_AVB0_MAGIC_0) FM(SEL_AVB0_MAGIC_1)
+#define MOD_SEL7_7 FM(SEL_AVB0_TD1_0) FM(SEL_AVB0_TD1_1)
+#define MOD_SEL7_6 FM(SEL_AVB0_TD2_0) FM(SEL_AVB0_TD2_1)
+#define MOD_SEL7_3 FM(SEL_AVB0_TD3_0) FM(SEL_AVB0_TD3_1)
+#define MOD_SEL7_2 FM(SEL_AVB0_AVTP_MATCH_0) FM(SEL_AVB0_AVTP_MATCH_1)
+#define MOD_SEL7_0 FM(SEL_AVB0_AVTP_PPS_0) FM(SEL_AVB0_AVTP_PPS_1)
+
+/* MOD_SEL8 */ /* 0 */ /* 1 */
+#define MOD_SEL8_11 FM(SEL_SDA5_0) FM(SEL_SDA5_1)
+#define MOD_SEL8_10 FM(SEL_SCL5_0) FM(SEL_SCL5_1)
+#define MOD_SEL8_9 FM(SEL_SDA4_0) FM(SEL_SDA4_1)
+#define MOD_SEL8_8 FM(SEL_SCL4_0) FM(SEL_SCL4_1)
+#define MOD_SEL8_7 FM(SEL_SDA3_0) FM(SEL_SDA3_1)
+#define MOD_SEL8_6 FM(SEL_SCL3_0) FM(SEL_SCL3_1)
+#define MOD_SEL8_5 FM(SEL_SDA2_0) FM(SEL_SDA2_1)
+#define MOD_SEL8_4 FM(SEL_SCL2_0) FM(SEL_SCL2_1)
+#define MOD_SEL8_3 FM(SEL_SDA1_0) FM(SEL_SDA1_1)
+#define MOD_SEL8_2 FM(SEL_SCL1_0) FM(SEL_SCL1_1)
+#define MOD_SEL8_1 FM(SEL_SDA0_0) FM(SEL_SDA0_1)
+#define MOD_SEL8_0 FM(SEL_SCL0_0) FM(SEL_SCL0_1)
+
+#define PINMUX_MOD_SELS \
+\
+MOD_SEL4_19 MOD_SEL5_19 \
+MOD_SEL4_18 MOD_SEL6_18 \
+ \
+ MOD_SEL5_16 MOD_SEL6_16 MOD_SEL7_16 \
+MOD_SEL4_15 MOD_SEL5_15 MOD_SEL7_15 \
+MOD_SEL4_14 \
+ MOD_SEL6_13 MOD_SEL7_13 \
+MOD_SEL4_12 MOD_SEL5_12 MOD_SEL6_12 \
+ MOD_SEL5_11 MOD_SEL7_11 MOD_SEL8_11 \
+ MOD_SEL6_10 MOD_SEL7_10 MOD_SEL8_10 \
+MOD_SEL4_9 MOD_SEL8_9 \
+MOD_SEL4_8 MOD_SEL5_8 MOD_SEL8_8 \
+ MOD_SEL6_7 MOD_SEL7_7 MOD_SEL8_7 \
+ MOD_SEL5_6 MOD_SEL6_6 MOD_SEL7_6 MOD_SEL8_6 \
+MOD_SEL4_5 MOD_SEL5_5 MOD_SEL6_5 MOD_SEL8_5 \
+ MOD_SEL8_4 \
+ MOD_SEL7_3 MOD_SEL8_3 \
+MOD_SEL4_2 MOD_SEL5_2 MOD_SEL6_2 MOD_SEL7_2 MOD_SEL8_2 \
+MOD_SEL4_1 MOD_SEL6_1 MOD_SEL8_1 \
+ MOD_SEL5_0 MOD_SEL7_0 MOD_SEL8_0
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA),
+ PINMUX_DATA_END,
+
+#define F_(x, y)
+#define FM(x) FN_##x,
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN),
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_FUNCTION_END,
+#undef F_
+#undef FM
+
+#define F_(x, y)
+#define FM(x) x##_MARK,
+ PINMUX_MARK_BEGIN,
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_MARK_END,
+#undef F_
+#undef FM
+};
+
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(),
+
+ PINMUX_SINGLE(AVS1),
+ PINMUX_SINGLE(AVS0),
+ PINMUX_SINGLE(PCIE1_CLKREQ_N),
+ PINMUX_SINGLE(PCIE0_CLKREQ_N),
+
+ /* TSN0 without MODSEL4 */
+ PINMUX_SINGLE(TSN0_TXCREFCLK),
+ PINMUX_SINGLE(TSN0_RD2),
+ PINMUX_SINGLE(TSN0_RD3),
+ PINMUX_SINGLE(TSN0_RD1),
+ PINMUX_SINGLE(TSN0_RXC),
+ PINMUX_SINGLE(TSN0_RD0),
+ PINMUX_SINGLE(TSN0_RX_CTL),
+ PINMUX_SINGLE(TSN0_AVTP_CAPTURE),
+ PINMUX_SINGLE(TSN0_LINK),
+ PINMUX_SINGLE(TSN0_PHY_INT),
+ PINMUX_SINGLE(TSN0_MDIO),
+ /* TSN0 with MODSEL4 */
+ PINMUX_IPSR_NOGM(0, TSN0_TD2, SEL_TSN0_TD2_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TD3, SEL_TSN0_TD3_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TD0, SEL_TSN0_TD0_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TD1, SEL_TSN0_TD1_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TXC, SEL_TSN0_TXC_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TX_CTL, SEL_TSN0_TX_CTL_1),
+ PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS0, SEL_TSN0_AVTP_PPS0_1),
+ PINMUX_IPSR_NOGM(0, TSN0_AVTP_MATCH, SEL_TSN0_AVTP_MATCH_1),
+ PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS1, SEL_TSN0_AVTP_PPS1_1),
+ PINMUX_IPSR_NOGM(0, TSN0_MDC, SEL_TSN0_MDC_1),
+
+ /* TSN0 without MODSEL5 */
+ PINMUX_SINGLE(AVB2_RX_CTL),
+ PINMUX_SINGLE(AVB2_RXC),
+ PINMUX_SINGLE(AVB2_RD0),
+ PINMUX_SINGLE(AVB2_RD1),
+ PINMUX_SINGLE(AVB2_RD2),
+ PINMUX_SINGLE(AVB2_MDIO),
+ PINMUX_SINGLE(AVB2_RD3),
+ PINMUX_SINGLE(AVB2_TXCREFCLK),
+ PINMUX_SINGLE(AVB2_PHY_INT),
+ PINMUX_SINGLE(AVB2_LINK),
+ PINMUX_SINGLE(AVB2_AVTP_CAPTURE),
+ /* TSN0 with MODSEL5 */
+ PINMUX_IPSR_NOGM(0, AVB2_TX_CTL, SEL_AVB2_TX_CTL_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TXC, SEL_AVB2_TXC_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD0, SEL_AVB2_TD0_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD1, SEL_AVB2_TD1_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD2, SEL_AVB2_TD2_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD3, SEL_AVB2_TD3_1),
+ PINMUX_IPSR_NOGM(0, AVB2_MDC, SEL_AVB2_MDC_1),
+ PINMUX_IPSR_NOGM(0, AVB2_MAGIC, SEL_AVB2_MAGIC_1),
+ PINMUX_IPSR_NOGM(0, AVB2_AVTP_MATCH, SEL_AVB2_AVTP_MATCH_1),
+ PINMUX_IPSR_NOGM(0, AVB2_AVTP_PPS, SEL_AVB2_AVTP_PPS_1),
+
+ /* IP0SR0 */
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, ERROROUTC_B),
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, TCLK2_A),
+
+ PINMUX_IPSR_GPSR(IP0SR0_7_4, MSIOF3_SS1),
+
+ PINMUX_IPSR_GPSR(IP0SR0_11_8, MSIOF3_SS2),
+
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3),
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, MSIOF3_SCK),
+
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2),
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, MSIOF3_TXD),
+
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1),
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, MSIOF3_RXD),
+
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0),
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, MSIOF3_SYNC),
+
+ PINMUX_IPSR_GPSR(IP0SR0_31_28, MSIOF5_SS2),
+
+ /* IP1SR0 */
+ PINMUX_IPSR_GPSR(IP1SR0_3_0, MSIOF5_SS1),
+
+ PINMUX_IPSR_GPSR(IP1SR0_7_4, MSIOF5_SYNC),
+
+ PINMUX_IPSR_GPSR(IP1SR0_11_8, MSIOF5_TXD),
+
+ PINMUX_IPSR_GPSR(IP1SR0_15_12, MSIOF5_SCK),
+
+ PINMUX_IPSR_GPSR(IP1SR0_19_16, MSIOF5_RXD),
+
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, MSIOF2_SS2),
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, TCLK1),
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, IRQ2_A),
+
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, MSIOF2_SS1),
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, HTX1),
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, TX1),
+
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, MSIOF2_SYNC),
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, HRX1),
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, RX1),
+
+ /* IP2SR0 */
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, MSIOF2_TXD),
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, HCTS1_N),
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, CTS1_N),
+
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, MSIOF2_SCK),
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, HRTS1_N),
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, RTS1_N),
+
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, MSIOF2_RXD),
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, HSCK1),
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, SCK1),
+
+ /* IP0SR1 */
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, MSIOF1_SS2),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, HTX3_A),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, TX3),
+
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, MSIOF1_SS1),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, HCTS3_N_A),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, RX3),
+
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, MSIOF1_SYNC),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, HRTS3_N_A),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, RTS3_N),
+
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, MSIOF1_SCK),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, HSCK3_A),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, CTS3_N),
+
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, MSIOF1_TXD),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, HRX3_A),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, SCK3),
+
+ PINMUX_IPSR_GPSR(IP0SR1_23_20, MSIOF1_RXD),
+
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, MSIOF0_SS2),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, HTX1_X),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, TX1_X),
+
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, MSIOF0_SS1),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, HRX1_X),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, RX1_X),
+
+ /* IP1SR1 */
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, MSIOF0_SYNC),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, HCTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, CTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, CANFD5_TX_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, MSIOF0_TXD),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, HRTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, RTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, CANFD5_RX_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, MSIOF0_SCK),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, HSCK1_X),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, SCK1_X),
+
+ PINMUX_IPSR_GPSR(IP1SR1_15_12, MSIOF0_RXD),
+
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, HTX0),
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, TX0),
+
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, HCTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, CTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, PWM8_A),
+
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, HRTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, RTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, PWM9_A),
+
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, HSCK0),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, SCK0),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, PWM0_A),
+
+ /* IP2SR1 */
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, HRX0),
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, RX0),
+
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, SCIF_CLK),
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, IRQ4_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, SSI_SCK),
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, TCLK3),
+
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, SSI_WS),
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, TCLK4),
+
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, SSI_SD),
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, IRQ0_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, AUDIO_CLKOUT),
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, IRQ1_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, AUDIO_CLKIN),
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, PWM3_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, TCLK2),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, MSIOF4_SS1),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, IRQ3_B),
+
+ /* IP3SR1 */
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, HRX3),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, SCK3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, MSIOF4_SS2),
+
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, HSCK3),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, CTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, MSIOF4_SCK),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, TPU0TO0_A),
+
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, HRTS3_N),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, RTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, MSIOF4_TXD),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, TPU0TO1_A),
+
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, HCTS3_N),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, RX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, MSIOF4_RXD),
+
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, HTX3),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, TX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, MSIOF4_SYNC),
+
+ /* IP0SR2 */
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, FXR_TXDA),
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, CANFD1_TX),
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, TPU0TO2_A),
+
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, FXR_TXENA_N),
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, CANFD1_RX),
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, TPU0TO3_A),
+
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, RXDA_EXTFXR),
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, CANFD5_TX),
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, IRQ5),
+
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, CLK_EXTFXR),
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, CANFD5_RX),
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, IRQ4_B),
+
+ PINMUX_IPSR_GPSR(IP0SR2_19_16, RXDB_EXTFXR),
+
+ PINMUX_IPSR_GPSR(IP0SR2_23_20, FXR_TXENB_N),
+
+ PINMUX_IPSR_GPSR(IP0SR2_27_24, FXR_TXDB),
+
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, TPU0TO1),
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, CANFD6_TX),
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, TCLK2_B),
+
+ /* IP1SR2 */
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, TPU0TO0),
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, CANFD6_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, TCLK1_A),
+
+ PINMUX_IPSR_GPSR(IP1SR2_7_4, CAN_CLK),
+ PINMUX_IPSR_GPSR(IP1SR2_7_4, FXR_TXENA_N_X),
+
+ PINMUX_IPSR_GPSR(IP1SR2_11_8, CANFD0_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_11_8, FXR_TXENB_N_X),
+
+ PINMUX_IPSR_GPSR(IP1SR2_15_12, CANFD0_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_15_12, STPWT_EXTFXR),
+
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, CANFD2_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, TPU0TO2),
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, TCLK3_A),
+
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, CANFD2_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, TPU0TO3),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, PWM1_B),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, TCLK4_A),
+
+ PINMUX_IPSR_GPSR(IP1SR2_27_24, CANFD3_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_27_24, PWM2_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_31_28, CANFD3_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_31_28, PWM3_B),
+
+ /* IP2SR2 */
+ PINMUX_IPSR_GPSR(IP2SR2_3_0, CANFD4_TX),
+ PINMUX_IPSR_GPSR(IP2SR2_3_0, PWM4),
+
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, CANFD4_RX),
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, PWM5),
+
+ PINMUX_IPSR_GPSR(IP2SR2_11_8, CANFD7_TX),
+ PINMUX_IPSR_GPSR(IP2SR2_11_8, PWM6),
+
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, CANFD7_RX),
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, PWM7),
+
+ /* IP0SR3 */
+ PINMUX_IPSR_GPSR(IP0SR3_3_0, MMC_SD_D1),
+ PINMUX_IPSR_GPSR(IP0SR3_7_4, MMC_SD_D0),
+ PINMUX_IPSR_GPSR(IP0SR3_11_8, MMC_SD_D2),
+ PINMUX_IPSR_GPSR(IP0SR3_15_12, MMC_SD_CLK),
+ PINMUX_IPSR_GPSR(IP0SR3_19_16, MMC_DS),
+ PINMUX_IPSR_GPSR(IP0SR3_23_20, MMC_SD_D3),
+ PINMUX_IPSR_GPSR(IP0SR3_27_24, MMC_D5),
+ PINMUX_IPSR_GPSR(IP0SR3_31_28, MMC_D4),
+
+ /* IP1SR3 */
+ PINMUX_IPSR_GPSR(IP1SR3_3_0, MMC_D7),
+
+ PINMUX_IPSR_GPSR(IP1SR3_7_4, MMC_D6),
+
+ PINMUX_IPSR_GPSR(IP1SR3_11_8, MMC_SD_CMD),
+
+ PINMUX_IPSR_GPSR(IP1SR3_15_12, SD_CD),
+
+ PINMUX_IPSR_GPSR(IP1SR3_19_16, SD_WP),
+
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKIN),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKEN_IN),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, PWM1_A),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, TCLK3_X),
+
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKOUT),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKEN_OUT),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, ERROROUTC_A),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, TCLK4_X),
+
+ PINMUX_IPSR_GPSR(IP1SR3_31_28, QSPI0_SSL),
+
+ /* IP2SR3 */
+ PINMUX_IPSR_GPSR(IP2SR3_3_0, QSPI0_IO3),
+ PINMUX_IPSR_GPSR(IP2SR3_7_4, QSPI0_IO2),
+ PINMUX_IPSR_GPSR(IP2SR3_11_8, QSPI0_MISO_IO1),
+ PINMUX_IPSR_GPSR(IP2SR3_15_12, QSPI0_MOSI_IO0),
+ PINMUX_IPSR_GPSR(IP2SR3_19_16, QSPI0_SPCLK),
+ PINMUX_IPSR_GPSR(IP2SR3_23_20, QSPI1_MOSI_IO0),
+ PINMUX_IPSR_GPSR(IP2SR3_27_24, QSPI1_SPCLK),
+ PINMUX_IPSR_GPSR(IP2SR3_31_28, QSPI1_MISO_IO1),
+
+ /* IP3SR3 */
+ PINMUX_IPSR_GPSR(IP3SR3_3_0, QSPI1_IO2),
+ PINMUX_IPSR_GPSR(IP3SR3_7_4, QSPI1_SSL),
+ PINMUX_IPSR_GPSR(IP3SR3_11_8, QSPI1_IO3),
+ PINMUX_IPSR_GPSR(IP3SR3_15_12, RPC_RESET_N),
+ PINMUX_IPSR_GPSR(IP3SR3_19_16, RPC_WP_N),
+ PINMUX_IPSR_GPSR(IP3SR3_23_20, RPC_INT_N),
+
+ /* IP0SR6 */
+ PINMUX_IPSR_GPSR(IP0SR6_3_0, AVB1_MDIO),
+
+ PINMUX_IPSR_MSEL(IP0SR6_7_4, AVB1_MAGIC, SEL_AVB1_MAGIC_1),
+
+ PINMUX_IPSR_MSEL(IP0SR6_11_8, AVB1_MDC, SEL_AVB1_MDC_1),
+
+ PINMUX_IPSR_GPSR(IP0SR6_15_12, AVB1_PHY_INT),
+
+ PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_LINK),
+ PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_MII_TX_ER),
+
+ PINMUX_IPSR_MSEL(IP0SR6_23_20, AVB1_AVTP_MATCH, SEL_AVB1_AVTP_MATCH_1),
+ PINMUX_IPSR_MSEL(IP0SR6_23_20, AVB1_MII_RX_ER, SEL_AVB1_AVTP_MATCH_0),
+
+ PINMUX_IPSR_MSEL(IP0SR6_27_24, AVB1_TXC, SEL_AVB1_TXC_1),
+ PINMUX_IPSR_MSEL(IP0SR6_27_24, AVB1_MII_TXC, SEL_AVB1_TXC_0),
+
+ PINMUX_IPSR_MSEL(IP0SR6_31_28, AVB1_TX_CTL, SEL_AVB1_TX_CTL_1),
+ PINMUX_IPSR_MSEL(IP0SR6_31_28, AVB1_MII_TX_EN, SEL_AVB1_TX_CTL_0),
+
+ /* IP1SR6 */
+ PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_RXC),
+ PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_MII_RXC),
+
+ PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_RX_CTL),
+ PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_MII_RX_DV),
+
+ PINMUX_IPSR_MSEL(IP1SR6_11_8, AVB1_AVTP_PPS, SEL_AVB1_AVTP_PPS_1),
+ PINMUX_IPSR_MSEL(IP1SR6_11_8, AVB1_MII_COL, SEL_AVB1_AVTP_PPS_0),
+
+ PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_MII_CRS),
+
+ PINMUX_IPSR_MSEL(IP1SR6_19_16, AVB1_TD1, SEL_AVB1_TD1_1),
+ PINMUX_IPSR_MSEL(IP1SR6_19_16, AVB1_MII_TD1, SEL_AVB1_TD1_0),
+
+ PINMUX_IPSR_MSEL(IP1SR6_23_20, AVB1_TD0, SEL_AVB1_TD0_1),
+ PINMUX_IPSR_MSEL(IP1SR6_23_20, AVB1_MII_TD0, SEL_AVB1_TD0_0),
+
+ PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_RD1),
+ PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_MII_RD1),
+
+ PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_RD0),
+ PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_MII_RD0),
+
+ /* IP2SR6 */
+ PINMUX_IPSR_MSEL(IP2SR6_3_0, AVB1_TD2, SEL_AVB1_TD2_1),
+ PINMUX_IPSR_MSEL(IP2SR6_3_0, AVB1_MII_TD2, SEL_AVB1_TD2_0),
+
+ PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_RD2),
+ PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_MII_RD2),
+
+ PINMUX_IPSR_MSEL(IP2SR6_11_8, AVB1_TD3, SEL_AVB1_TD3_1),
+ PINMUX_IPSR_MSEL(IP2SR6_11_8, AVB1_MII_TD3, SEL_AVB1_TD3_0),
+
+ PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_RD3),
+ PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_MII_RD3),
+
+ PINMUX_IPSR_GPSR(IP2SR6_19_16, AVB1_TXCREFCLK),
+
+ /* IP0SR7 */
+ PINMUX_IPSR_MSEL(IP0SR7_3_0, AVB0_AVTP_PPS, SEL_AVB0_AVTP_PPS_1),
+ PINMUX_IPSR_MSEL(IP0SR7_3_0, AVB0_MII_COL, SEL_AVB0_AVTP_PPS_0),
+
+ PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_MII_CRS),
+
+ PINMUX_IPSR_MSEL(IP0SR7_11_8, AVB0_AVTP_MATCH, SEL_AVB0_AVTP_MATCH_1),
+ PINMUX_IPSR_MSEL(IP0SR7_11_8, AVB0_MII_RX_ER, SEL_AVB0_AVTP_MATCH_0),
+ PINMUX_IPSR_MSEL(IP0SR7_11_8, CC5_OSCOUT, SEL_AVB0_AVTP_MATCH_0),
+
+ PINMUX_IPSR_MSEL(IP0SR7_15_12, AVB0_TD3, SEL_AVB0_TD3_1),
+ PINMUX_IPSR_MSEL(IP0SR7_15_12, AVB0_MII_TD3, SEL_AVB0_TD3_0),
+
+ PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_LINK),
+ PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_MII_TX_ER),
+
+ PINMUX_IPSR_GPSR(IP0SR7_23_20, AVB0_PHY_INT),
+
+ PINMUX_IPSR_MSEL(IP0SR7_27_24, AVB0_TD2, SEL_AVB0_TD2_1),
+ PINMUX_IPSR_MSEL(IP0SR7_27_24, AVB0_MII_TD2, SEL_AVB0_TD2_0),
+
+ PINMUX_IPSR_MSEL(IP0SR7_31_28, AVB0_TD1, SEL_AVB0_TD1_1),
+ PINMUX_IPSR_MSEL(IP0SR7_31_28, AVB0_MII_TD1, SEL_AVB0_TD1_0),
+
+ /* IP1SR7 */
+ PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_RD3),
+ PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_MII_RD3),
+
+ PINMUX_IPSR_GPSR(IP1SR7_7_4, AVB0_TXCREFCLK),
+
+ PINMUX_IPSR_MSEL(IP1SR7_11_8, AVB0_MAGIC, SEL_AVB0_MAGIC_1),
+
+ PINMUX_IPSR_MSEL(IP1SR7_15_12, AVB0_TD0, SEL_AVB0_TD0_1),
+ PINMUX_IPSR_MSEL(IP1SR7_15_12, AVB0_MII_TD0, SEL_AVB0_TD0_0),
+
+ PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_RD2),
+ PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_MII_RD2),
+
+ PINMUX_IPSR_MSEL(IP1SR7_23_20, AVB0_MDC, SEL_AVB0_MDC_1),
+
+ PINMUX_IPSR_GPSR(IP1SR7_27_24, AVB0_MDIO),
+
+ PINMUX_IPSR_MSEL(IP1SR7_31_28, AVB0_TXC, SEL_AVB0_TXC_1),
+ PINMUX_IPSR_MSEL(IP1SR7_31_28, AVB0_MII_TXC, SEL_AVB0_TXC_0),
+
+ /* IP2SR7 */
+ PINMUX_IPSR_MSEL(IP2SR7_3_0, AVB0_TX_CTL, SEL_AVB0_TX_CTL_1),
+ PINMUX_IPSR_MSEL(IP2SR7_3_0, AVB0_MII_TX_EN, SEL_AVB0_TX_CTL_0),
+
+ PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_RD1),
+ PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_MII_RD1),
+
+ PINMUX_IPSR_GPSR(IP2SR7_11_8, AVB0_RD0),
+ PINMUX_IPSR_GPSR(IP2SR7_11_8, AVB0_MII_RD0),
+
+ PINMUX_IPSR_GPSR(IP2SR7_15_12, AVB0_RXC),
+ PINMUX_IPSR_GPSR(IP2SR7_15_12, AVB0_MII_RXC),
+
+ PINMUX_IPSR_GPSR(IP2SR7_19_16, AVB0_RX_CTL),
+ PINMUX_IPSR_GPSR(IP2SR7_19_16, AVB0_MII_RX_DV),
+
+ /* IP0SR8 */
+ PINMUX_IPSR_MSEL(IP0SR8_3_0, SCL0, SEL_SCL0_0),
+ PINMUX_IPSR_MSEL(IP0SR8_7_4, SDA0, SEL_SDA0_0),
+ PINMUX_IPSR_MSEL(IP0SR8_11_8, SCL1, SEL_SCL1_0),
+ PINMUX_IPSR_MSEL(IP0SR8_15_12, SDA1, SEL_SDA1_0),
+ PINMUX_IPSR_MSEL(IP0SR8_19_16, SCL2, SEL_SCL2_0),
+ PINMUX_IPSR_MSEL(IP0SR8_23_20, SDA2, SEL_SDA2_0),
+ PINMUX_IPSR_MSEL(IP0SR8_27_24, SCL3, SEL_SCL3_0),
+ PINMUX_IPSR_MSEL(IP0SR8_31_28, SDA3, SEL_SDA3_0),
+
+ /* IP1SR8 */
+ PINMUX_IPSR_MSEL(IP1SR8_3_0, SCL4, SEL_SCL4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_3_0, HRX2, SEL_SCL4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_3_0, SCK4, SEL_SCL4_0),
+
+ PINMUX_IPSR_MSEL(IP1SR8_7_4, SDA4, SEL_SDA4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_7_4, HTX2, SEL_SDA4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_7_4, CTS4_N, SEL_SDA4_0),
+
+ PINMUX_IPSR_MSEL(IP1SR8_11_8, SCL5, SEL_SCL5_0),
+ PINMUX_IPSR_MSEL(IP1SR8_11_8, HRTS2_N, SEL_SCL5_0),
+ PINMUX_IPSR_MSEL(IP1SR8_11_8, RTS4_N, SEL_SCL5_0),
+
+ PINMUX_IPSR_MSEL(IP1SR8_15_12, SDA5, SEL_SDA5_0),
+ PINMUX_IPSR_MSEL(IP1SR8_15_12, SCIF_CLK2, SEL_SDA5_0),
+
+ PINMUX_IPSR_GPSR(IP1SR8_19_16, HCTS2_N),
+ PINMUX_IPSR_GPSR(IP1SR8_19_16, TX4),
+
+ PINMUX_IPSR_GPSR(IP1SR8_23_20, HSCK2),
+ PINMUX_IPSR_GPSR(IP1SR8_23_20, RX4),
+};
+
+/*
+ * Pins not associated with a GPIO port.
+ */
+enum {
+ GP_ASSIGN_LAST(),
+};
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+};
+
+/* - AVB0 ------------------------------------------------ */
+static const unsigned int avb0_link_pins[] = {
+ /* AVB0_LINK */
+ RCAR_GP_PIN(7, 4),
+};
+static const unsigned int avb0_link_mux[] = {
+ AVB0_LINK_MARK,
+};
+static const unsigned int avb0_magic_pins[] = {
+ /* AVB0_MAGIC */
+ RCAR_GP_PIN(7, 10),
+};
+static const unsigned int avb0_magic_mux[] = {
+ AVB0_MAGIC_MARK,
+};
+static const unsigned int avb0_phy_int_pins[] = {
+ /* AVB0_PHY_INT */
+ RCAR_GP_PIN(7, 5),
+};
+static const unsigned int avb0_phy_int_mux[] = {
+ AVB0_PHY_INT_MARK,
+};
+static const unsigned int avb0_mdio_pins[] = {
+ /* AVB0_MDC, AVB0_MDIO */
+ RCAR_GP_PIN(7, 13), RCAR_GP_PIN(7, 14),
+};
+static const unsigned int avb0_mdio_mux[] = {
+ AVB0_MDC_MARK, AVB0_MDIO_MARK,
+};
+static const unsigned int avb0_rgmii_pins[] = {
+ /*
+ * AVB0_TX_CTL, AVB0_TXC, AVB0_TD0, AVB0_TD1, AVB0_TD2, AVB0_TD3,
+ * AVB0_RX_CTL, AVB0_RXC, AVB0_RD0, AVB0_RD1, AVB0_RD2, AVB0_RD3,
+ */
+ RCAR_GP_PIN(7, 16), RCAR_GP_PIN(7, 15),
+ RCAR_GP_PIN(7, 11), RCAR_GP_PIN(7, 7),
+ RCAR_GP_PIN(7, 6), RCAR_GP_PIN(7, 3),
+ RCAR_GP_PIN(7, 20), RCAR_GP_PIN(7, 19),
+ RCAR_GP_PIN(7, 18), RCAR_GP_PIN(7, 17),
+ RCAR_GP_PIN(7, 12), RCAR_GP_PIN(7, 8),
+};
+static const unsigned int avb0_rgmii_mux[] = {
+ AVB0_TX_CTL_MARK, AVB0_TXC_MARK,
+ AVB0_TD0_MARK, AVB0_TD1_MARK,
+ AVB0_TD2_MARK, AVB0_TD3_MARK,
+ AVB0_RX_CTL_MARK, AVB0_RXC_MARK,
+ AVB0_RD0_MARK, AVB0_RD1_MARK,
+ AVB0_RD2_MARK, AVB0_RD3_MARK,
+};
+static const unsigned int avb0_txcrefclk_pins[] = {
+ /* AVB0_TXCREFCLK */
+ RCAR_GP_PIN(7, 9),
+};
+static const unsigned int avb0_txcrefclk_mux[] = {
+ AVB0_TXCREFCLK_MARK,
+};
+static const unsigned int avb0_avtp_pps_pins[] = {
+ /* AVB0_AVTP_PPS */
+ RCAR_GP_PIN(7, 0),
+};
+static const unsigned int avb0_avtp_pps_mux[] = {
+ AVB0_AVTP_PPS_MARK,
+};
+static const unsigned int avb0_avtp_capture_pins[] = {
+ /* AVB0_AVTP_CAPTURE */
+ RCAR_GP_PIN(7, 1),
+};
+static const unsigned int avb0_avtp_capture_mux[] = {
+ AVB0_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb0_avtp_match_pins[] = {
+ /* AVB0_AVTP_MATCH */
+ RCAR_GP_PIN(7, 2),
+};
+static const unsigned int avb0_avtp_match_mux[] = {
+ AVB0_AVTP_MATCH_MARK,
+};
+
+/* - AVB1 ------------------------------------------------ */
+static const unsigned int avb1_link_pins[] = {
+ /* AVB1_LINK */
+ RCAR_GP_PIN(6, 4),
+};
+static const unsigned int avb1_link_mux[] = {
+ AVB1_LINK_MARK,
+};
+static const unsigned int avb1_magic_pins[] = {
+ /* AVB1_MAGIC */
+ RCAR_GP_PIN(6, 1),
+};
+static const unsigned int avb1_magic_mux[] = {
+ AVB1_MAGIC_MARK,
+};
+static const unsigned int avb1_phy_int_pins[] = {
+ /* AVB1_PHY_INT */
+ RCAR_GP_PIN(6, 3),
+};
+static const unsigned int avb1_phy_int_mux[] = {
+ AVB1_PHY_INT_MARK,
+};
+static const unsigned int avb1_mdio_pins[] = {
+ /* AVB1_MDC, AVB1_MDIO */
+ RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 0),
+};
+static const unsigned int avb1_mdio_mux[] = {
+ AVB1_MDC_MARK, AVB1_MDIO_MARK,
+};
+static const unsigned int avb1_rgmii_pins[] = {
+ /*
+ * AVB1_TX_CTL, AVB1_TXC, AVB1_TD0, AVB1_TD1, AVB1_TD2, AVB1_TD3,
+ * AVB1_RX_CTL, AVB1_RXC, AVB1_RD0, AVB1_RD1, AVB1_RD2, AVB1_RD3,
+ */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+ RCAR_GP_PIN(6, 13), RCAR_GP_PIN(6, 12),
+ RCAR_GP_PIN(6, 16), RCAR_GP_PIN(6, 18),
+ RCAR_GP_PIN(6, 9), RCAR_GP_PIN(6, 8),
+ RCAR_GP_PIN(6, 15), RCAR_GP_PIN(6, 14),
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 19),
+};
+static const unsigned int avb1_rgmii_mux[] = {
+ AVB1_TX_CTL_MARK, AVB1_TXC_MARK,
+ AVB1_TD0_MARK, AVB1_TD1_MARK,
+ AVB1_TD2_MARK, AVB1_TD3_MARK,
+ AVB1_RX_CTL_MARK, AVB1_RXC_MARK,
+ AVB1_RD0_MARK, AVB1_RD1_MARK,
+ AVB1_RD2_MARK, AVB1_RD3_MARK,
+};
+static const unsigned int avb1_txcrefclk_pins[] = {
+ /* AVB1_TXCREFCLK */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int avb1_txcrefclk_mux[] = {
+ AVB1_TXCREFCLK_MARK,
+};
+static const unsigned int avb1_avtp_pps_pins[] = {
+ /* AVB1_AVTP_PPS */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int avb1_avtp_pps_mux[] = {
+ AVB1_AVTP_PPS_MARK,
+};
+static const unsigned int avb1_avtp_capture_pins[] = {
+ /* AVB1_AVTP_CAPTURE */
+ RCAR_GP_PIN(6, 11),
+};
+static const unsigned int avb1_avtp_capture_mux[] = {
+ AVB1_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb1_avtp_match_pins[] = {
+ /* AVB1_AVTP_MATCH */
+ RCAR_GP_PIN(6, 5),
+};
+static const unsigned int avb1_avtp_match_mux[] = {
+ AVB1_AVTP_MATCH_MARK,
+};
+
+/* - AVB2 ------------------------------------------------ */
+static const unsigned int avb2_link_pins[] = {
+ /* AVB2_LINK */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int avb2_link_mux[] = {
+ AVB2_LINK_MARK,
+};
+static const unsigned int avb2_magic_pins[] = {
+ /* AVB2_MAGIC */
+ RCAR_GP_PIN(5, 5),
+};
+static const unsigned int avb2_magic_mux[] = {
+ AVB2_MAGIC_MARK,
+};
+static const unsigned int avb2_phy_int_pins[] = {
+ /* AVB2_PHY_INT */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int avb2_phy_int_mux[] = {
+ AVB2_PHY_INT_MARK,
+};
+static const unsigned int avb2_mdio_pins[] = {
+ /* AVB2_MDC, AVB2_MDIO */
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int avb2_mdio_mux[] = {
+ AVB2_MDC_MARK, AVB2_MDIO_MARK,
+};
+static const unsigned int avb2_rgmii_pins[] = {
+ /*
+ * AVB2_TX_CTL, AVB2_TXC, AVB2_TD0, AVB2_TD1, AVB2_TD2, AVB2_TD3,
+ * AVB2_RX_CTL, AVB2_RXC, AVB2_RD0, AVB2_RD1, AVB2_RD2, AVB2_RD3,
+ */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 16),
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 12),
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 8),
+ RCAR_GP_PIN(5, 20), RCAR_GP_PIN(5, 18),
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 14),
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 9),
+};
+static const unsigned int avb2_rgmii_mux[] = {
+ AVB2_TX_CTL_MARK, AVB2_TXC_MARK,
+ AVB2_TD0_MARK, AVB2_TD1_MARK,
+ AVB2_TD2_MARK, AVB2_TD3_MARK,
+ AVB2_RX_CTL_MARK, AVB2_RXC_MARK,
+ AVB2_RD0_MARK, AVB2_RD1_MARK,
+ AVB2_RD2_MARK, AVB2_RD3_MARK,
+};
+static const unsigned int avb2_txcrefclk_pins[] = {
+ /* AVB2_TXCREFCLK */
+ RCAR_GP_PIN(5, 7),
+};
+static const unsigned int avb2_txcrefclk_mux[] = {
+ AVB2_TXCREFCLK_MARK,
+};
+static const unsigned int avb2_avtp_pps_pins[] = {
+ /* AVB2_AVTP_PPS */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int avb2_avtp_pps_mux[] = {
+ AVB2_AVTP_PPS_MARK,
+};
+static const unsigned int avb2_avtp_capture_pins[] = {
+ /* AVB2_AVTP_CAPTURE */
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int avb2_avtp_capture_mux[] = {
+ AVB2_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb2_avtp_match_pins[] = {
+ /* AVB2_AVTP_MATCH */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int avb2_avtp_match_mux[] = {
+ AVB2_AVTP_MATCH_MARK,
+};
+
+/* - CANFD0 ----------------------------------------------------------------- */
+static const unsigned int canfd0_data_pins[] = {
+ /* CANFD0_TX, CANFD0_RX */
+ RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11),
+};
+static const unsigned int canfd0_data_mux[] = {
+ CANFD0_TX_MARK, CANFD0_RX_MARK,
+};
+
+/* - CANFD1 ----------------------------------------------------------------- */
+static const unsigned int canfd1_data_pins[] = {
+ /* CANFD1_TX, CANFD1_RX */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+};
+static const unsigned int canfd1_data_mux[] = {
+ CANFD1_TX_MARK, CANFD1_RX_MARK,
+};
+
+/* - CANFD2 ----------------------------------------------------------------- */
+static const unsigned int canfd2_data_pins[] = {
+ /* CANFD2_TX, CANFD2_RX */
+ RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13),
+};
+static const unsigned int canfd2_data_mux[] = {
+ CANFD2_TX_MARK, CANFD2_RX_MARK,
+};
+
+/* - CANFD3 ----------------------------------------------------------------- */
+static const unsigned int canfd3_data_pins[] = {
+ /* CANFD3_TX, CANFD3_RX */
+ RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15),
+};
+static const unsigned int canfd3_data_mux[] = {
+ CANFD3_TX_MARK, CANFD3_RX_MARK,
+};
+
+/* - CANFD4 ----------------------------------------------------------------- */
+static const unsigned int canfd4_data_pins[] = {
+ /* CANFD4_TX, CANFD4_RX */
+ RCAR_GP_PIN(2, 16), RCAR_GP_PIN(2, 17),
+};
+static const unsigned int canfd4_data_mux[] = {
+ CANFD4_TX_MARK, CANFD4_RX_MARK,
+};
+
+/* - CANFD5 ----------------------------------------------------------------- */
+static const unsigned int canfd5_data_pins[] = {
+ /* CANFD5_TX, CANFD5_RX */
+ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+};
+static const unsigned int canfd5_data_mux[] = {
+ CANFD5_TX_MARK, CANFD5_RX_MARK,
+};
+
+/* - CANFD5_B ----------------------------------------------------------------- */
+static const unsigned int canfd5_data_b_pins[] = {
+ /* CANFD5_TX_B, CANFD5_RX_B */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int canfd5_data_b_mux[] = {
+ CANFD5_TX_B_MARK, CANFD5_RX_B_MARK,
+};
+
+/* - CANFD6 ----------------------------------------------------------------- */
+static const unsigned int canfd6_data_pins[] = {
+ /* CANFD6_TX, CANFD6_RX */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+static const unsigned int canfd6_data_mux[] = {
+ CANFD6_TX_MARK, CANFD6_RX_MARK,
+};
+
+/* - CANFD7 ----------------------------------------------------------------- */
+static const unsigned int canfd7_data_pins[] = {
+ /* CANFD7_TX, CANFD7_RX */
+ RCAR_GP_PIN(2, 18), RCAR_GP_PIN(2, 19),
+};
+static const unsigned int canfd7_data_mux[] = {
+ CANFD7_TX_MARK, CANFD7_RX_MARK,
+};
+
+/* - CANFD Clock ------------------------------------------------------------ */
+static const unsigned int can_clk_pins[] = {
+ /* CAN_CLK */
+ RCAR_GP_PIN(2, 9),
+};
+static const unsigned int can_clk_mux[] = {
+ CAN_CLK_MARK,
+};
+
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* HRX0, HTX0 */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 12),
+};
+static const unsigned int hscif0_data_mux[] = {
+ HRX0_MARK, HTX0_MARK,
+};
+static const unsigned int hscif0_clk_pins[] = {
+ /* HSCK0 */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK,
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* HRTS0_N, HCTS0_N */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+ HRTS0_N_MARK, HCTS0_N_MARK,
+};
+
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_pins[] = {
+ /* HRX1, HTX1 */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int hscif1_data_mux[] = {
+ HRX1_MARK, HTX1_MARK,
+};
+static const unsigned int hscif1_clk_pins[] = {
+ /* HSCK1 */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int hscif1_clk_mux[] = {
+ HSCK1_MARK,
+};
+static const unsigned int hscif1_ctrl_pins[] = {
+ /* HRTS1_N, HCTS1_N */
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+};
+static const unsigned int hscif1_ctrl_mux[] = {
+ HRTS1_N_MARK, HCTS1_N_MARK,
+};
+
+/* - HSCIF1_X---------------------------------------------------------------- */
+static const unsigned int hscif1_data_x_pins[] = {
+ /* HRX1_X, HTX1_X */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int hscif1_data_x_mux[] = {
+ HRX1_X_MARK, HTX1_X_MARK,
+};
+static const unsigned int hscif1_clk_x_pins[] = {
+ /* HSCK1_X */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int hscif1_clk_x_mux[] = {
+ HSCK1_X_MARK,
+};
+static const unsigned int hscif1_ctrl_x_pins[] = {
+ /* HRTS1_N_X, HCTS1_N_X */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int hscif1_ctrl_x_mux[] = {
+ HRTS1_N_X_MARK, HCTS1_N_X_MARK,
+};
+
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_pins[] = {
+ /* HRX2, HTX2 */
+ RCAR_GP_PIN(8, 8), RCAR_GP_PIN(8, 9),
+};
+static const unsigned int hscif2_data_mux[] = {
+ HRX2_MARK, HTX2_MARK,
+};
+static const unsigned int hscif2_clk_pins[] = {
+ /* HSCK2 */
+ RCAR_GP_PIN(8, 13),
+};
+static const unsigned int hscif2_clk_mux[] = {
+ HSCK2_MARK,
+};
+static const unsigned int hscif2_ctrl_pins[] = {
+ /* HRTS2_N, HCTS2_N */
+ RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 12),
+};
+static const unsigned int hscif2_ctrl_mux[] = {
+ HRTS2_N_MARK, HCTS2_N_MARK,
+};
+
+/* - HSCIF3 ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_pins[] = {
+ /* HRX3, HTX3 */
+ RCAR_GP_PIN(1, 24), RCAR_GP_PIN(1, 28),
+};
+static const unsigned int hscif3_data_mux[] = {
+ HRX3_MARK, HTX3_MARK,
+};
+static const unsigned int hscif3_clk_pins[] = {
+ /* HSCK3 */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int hscif3_clk_mux[] = {
+ HSCK3_MARK,
+};
+static const unsigned int hscif3_ctrl_pins[] = {
+ /* HRTS3_N, HCTS3_N */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 27),
+};
+static const unsigned int hscif3_ctrl_mux[] = {
+ HRTS3_N_MARK, HCTS3_N_MARK,
+};
+
+/* - HSCIF3_A ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_a_pins[] = {
+ /* HRX3_A, HTX3_A */
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int hscif3_data_a_mux[] = {
+ HRX3_A_MARK, HTX3_A_MARK,
+};
+static const unsigned int hscif3_clk_a_pins[] = {
+ /* HSCK3_A */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int hscif3_clk_a_mux[] = {
+ HSCK3_A_MARK,
+};
+static const unsigned int hscif3_ctrl_a_pins[] = {
+ /* HRTS3_N_A, HCTS3_N_A */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1),
+};
+static const unsigned int hscif3_ctrl_a_mux[] = {
+ HRTS3_N_A_MARK, HCTS3_N_A_MARK,
+};
+
+/* - I2C0 ------------------------------------------------------------------- */
+static const unsigned int i2c0_pins[] = {
+ /* SDA0, SCL0 */
+ RCAR_GP_PIN(8, 1), RCAR_GP_PIN(8, 0),
+};
+static const unsigned int i2c0_mux[] = {
+ SDA0_MARK, SCL0_MARK,
+};
+
+/* - I2C1 ------------------------------------------------------------------- */
+static const unsigned int i2c1_pins[] = {
+ /* SDA1, SCL1 */
+ RCAR_GP_PIN(8, 3), RCAR_GP_PIN(8, 2),
+};
+static const unsigned int i2c1_mux[] = {
+ SDA1_MARK, SCL1_MARK,
+};
+
+/* - I2C2 ------------------------------------------------------------------- */
+static const unsigned int i2c2_pins[] = {
+ /* SDA2, SCL2 */
+ RCAR_GP_PIN(8, 5), RCAR_GP_PIN(8, 4),
+};
+static const unsigned int i2c2_mux[] = {
+ SDA2_MARK, SCL2_MARK,
+};
+
+/* - I2C3 ------------------------------------------------------------------- */
+static const unsigned int i2c3_pins[] = {
+ /* SDA3, SCL3 */
+ RCAR_GP_PIN(8, 7), RCAR_GP_PIN(8, 6),
+};
+static const unsigned int i2c3_mux[] = {
+ SDA3_MARK, SCL3_MARK,
+};
+
+/* - I2C4 ------------------------------------------------------------------- */
+static const unsigned int i2c4_pins[] = {
+ /* SDA4, SCL4 */
+ RCAR_GP_PIN(8, 9), RCAR_GP_PIN(8, 8),
+};
+static const unsigned int i2c4_mux[] = {
+ SDA4_MARK, SCL4_MARK,
+};
+
+/* - I2C5 ------------------------------------------------------------------- */
+static const unsigned int i2c5_pins[] = {
+ /* SDA5, SCL5 */
+ RCAR_GP_PIN(8, 11), RCAR_GP_PIN(8, 10),
+};
+static const unsigned int i2c5_mux[] = {
+ SDA5_MARK, SCL5_MARK,
+};
+
+/* - MMC -------------------------------------------------------------------- */
+static const unsigned int mmc_data_pins[] = {
+ /* MMC_SD_D[0:3], MMC_D[4:7] */
+ RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 0),
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 5),
+ RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 6),
+ RCAR_GP_PIN(3, 9), RCAR_GP_PIN(3, 8),
+};
+static const unsigned int mmc_data_mux[] = {
+ MMC_SD_D0_MARK, MMC_SD_D1_MARK,
+ MMC_SD_D2_MARK, MMC_SD_D3_MARK,
+ MMC_D4_MARK, MMC_D5_MARK,
+ MMC_D6_MARK, MMC_D7_MARK,
+};
+static const unsigned int mmc_ctrl_pins[] = {
+ /* MMC_SD_CLK, MMC_SD_CMD */
+ RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 10),
+};
+static const unsigned int mmc_ctrl_mux[] = {
+ MMC_SD_CLK_MARK, MMC_SD_CMD_MARK,
+};
+static const unsigned int mmc_cd_pins[] = {
+ /* SD_CD */
+ RCAR_GP_PIN(3, 11),
+};
+static const unsigned int mmc_cd_mux[] = {
+ SD_CD_MARK,
+};
+static const unsigned int mmc_wp_pins[] = {
+ /* SD_WP */
+ RCAR_GP_PIN(3, 12),
+};
+static const unsigned int mmc_wp_mux[] = {
+ SD_WP_MARK,
+};
+static const unsigned int mmc_ds_pins[] = {
+ /* MMC_DS */
+ RCAR_GP_PIN(3, 4),
+};
+static const unsigned int mmc_ds_mux[] = {
+ MMC_DS_MARK,
+};
+
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* MSIOF0_SCK */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+ /* MSIOF0_SYNC */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_ss1_pins[] = {
+ /* MSIOF0_SS1 */
+ RCAR_GP_PIN(1, 7),
+};
+static const unsigned int msiof0_ss1_mux[] = {
+ MSIOF0_SS1_MARK,
+};
+static const unsigned int msiof0_ss2_pins[] = {
+ /* MSIOF0_SS2 */
+ RCAR_GP_PIN(1, 6),
+};
+static const unsigned int msiof0_ss2_mux[] = {
+ MSIOF0_SS2_MARK,
+};
+static const unsigned int msiof0_txd_pins[] = {
+ /* MSIOF0_TXD */
+ RCAR_GP_PIN(1, 9),
+};
+static const unsigned int msiof0_txd_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+static const unsigned int msiof0_rxd_pins[] = {
+ /* MSIOF0_RXD */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int msiof0_rxd_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_pins[] = {
+ /* MSIOF1_SCK */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int msiof1_clk_mux[] = {
+ MSIOF1_SCK_MARK,
+};
+static const unsigned int msiof1_sync_pins[] = {
+ /* MSIOF1_SYNC */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int msiof1_sync_mux[] = {
+ MSIOF1_SYNC_MARK,
+};
+static const unsigned int msiof1_ss1_pins[] = {
+ /* MSIOF1_SS1 */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int msiof1_ss1_mux[] = {
+ MSIOF1_SS1_MARK,
+};
+static const unsigned int msiof1_ss2_pins[] = {
+ /* MSIOF1_SS2 */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int msiof1_ss2_mux[] = {
+ MSIOF1_SS2_MARK,
+};
+static const unsigned int msiof1_txd_pins[] = {
+ /* MSIOF1_TXD */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int msiof1_txd_mux[] = {
+ MSIOF1_TXD_MARK,
+};
+static const unsigned int msiof1_rxd_pins[] = {
+ /* MSIOF1_RXD */
+ RCAR_GP_PIN(1, 5),
+};
+static const unsigned int msiof1_rxd_mux[] = {
+ MSIOF1_RXD_MARK,
+};
+
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_pins[] = {
+ /* MSIOF2_SCK */
+ RCAR_GP_PIN(0, 17),
+};
+static const unsigned int msiof2_clk_mux[] = {
+ MSIOF2_SCK_MARK,
+};
+static const unsigned int msiof2_sync_pins[] = {
+ /* MSIOF2_SYNC */
+ RCAR_GP_PIN(0, 15),
+};
+static const unsigned int msiof2_sync_mux[] = {
+ MSIOF2_SYNC_MARK,
+};
+static const unsigned int msiof2_ss1_pins[] = {
+ /* MSIOF2_SS1 */
+ RCAR_GP_PIN(0, 14),
+};
+static const unsigned int msiof2_ss1_mux[] = {
+ MSIOF2_SS1_MARK,
+};
+static const unsigned int msiof2_ss2_pins[] = {
+ /* MSIOF2_SS2 */
+ RCAR_GP_PIN(0, 13),
+};
+static const unsigned int msiof2_ss2_mux[] = {
+ MSIOF2_SS2_MARK,
+};
+static const unsigned int msiof2_txd_pins[] = {
+ /* MSIOF2_TXD */
+ RCAR_GP_PIN(0, 16),
+};
+static const unsigned int msiof2_txd_mux[] = {
+ MSIOF2_TXD_MARK,
+};
+static const unsigned int msiof2_rxd_pins[] = {
+ /* MSIOF2_RXD */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int msiof2_rxd_mux[] = {
+ MSIOF2_RXD_MARK,
+};
+
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_pins[] = {
+ /* MSIOF3_SCK */
+ RCAR_GP_PIN(0, 3),
+};
+static const unsigned int msiof3_clk_mux[] = {
+ MSIOF3_SCK_MARK,
+};
+static const unsigned int msiof3_sync_pins[] = {
+ /* MSIOF3_SYNC */
+ RCAR_GP_PIN(0, 6),
+};
+static const unsigned int msiof3_sync_mux[] = {
+ MSIOF3_SYNC_MARK,
+};
+static const unsigned int msiof3_ss1_pins[] = {
+ /* MSIOF3_SS1 */
+ RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof3_ss1_mux[] = {
+ MSIOF3_SS1_MARK,
+};
+static const unsigned int msiof3_ss2_pins[] = {
+ /* MSIOF3_SS2 */
+ RCAR_GP_PIN(0, 2),
+};
+static const unsigned int msiof3_ss2_mux[] = {
+ MSIOF3_SS2_MARK,
+};
+static const unsigned int msiof3_txd_pins[] = {
+ /* MSIOF3_TXD */
+ RCAR_GP_PIN(0, 4),
+};
+static const unsigned int msiof3_txd_mux[] = {
+ MSIOF3_TXD_MARK,
+};
+static const unsigned int msiof3_rxd_pins[] = {
+ /* MSIOF3_RXD */
+ RCAR_GP_PIN(0, 5),
+};
+static const unsigned int msiof3_rxd_mux[] = {
+ MSIOF3_RXD_MARK,
+};
+
+/* - MSIOF4 ----------------------------------------------------------------- */
+static const unsigned int msiof4_clk_pins[] = {
+ /* MSIOF4_SCK */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int msiof4_clk_mux[] = {
+ MSIOF4_SCK_MARK,
+};
+static const unsigned int msiof4_sync_pins[] = {
+ /* MSIOF4_SYNC */
+ RCAR_GP_PIN(1, 28),
+};
+static const unsigned int msiof4_sync_mux[] = {
+ MSIOF4_SYNC_MARK,
+};
+static const unsigned int msiof4_ss1_pins[] = {
+ /* MSIOF4_SS1 */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof4_ss1_mux[] = {
+ MSIOF4_SS1_MARK,
+};
+static const unsigned int msiof4_ss2_pins[] = {
+ /* MSIOF4_SS2 */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int msiof4_ss2_mux[] = {
+ MSIOF4_SS2_MARK,
+};
+static const unsigned int msiof4_txd_pins[] = {
+ /* MSIOF4_TXD */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int msiof4_txd_mux[] = {
+ MSIOF4_TXD_MARK,
+};
+static const unsigned int msiof4_rxd_pins[] = {
+ /* MSIOF4_RXD */
+ RCAR_GP_PIN(1, 27),
+};
+static const unsigned int msiof4_rxd_mux[] = {
+ MSIOF4_RXD_MARK,
+};
+
+/* - MSIOF5 ----------------------------------------------------------------- */
+static const unsigned int msiof5_clk_pins[] = {
+ /* MSIOF5_SCK */
+ RCAR_GP_PIN(0, 11),
+};
+static const unsigned int msiof5_clk_mux[] = {
+ MSIOF5_SCK_MARK,
+};
+static const unsigned int msiof5_sync_pins[] = {
+ /* MSIOF5_SYNC */
+ RCAR_GP_PIN(0, 9),
+};
+static const unsigned int msiof5_sync_mux[] = {
+ MSIOF5_SYNC_MARK,
+};
+static const unsigned int msiof5_ss1_pins[] = {
+ /* MSIOF5_SS1 */
+ RCAR_GP_PIN(0, 8),
+};
+static const unsigned int msiof5_ss1_mux[] = {
+ MSIOF5_SS1_MARK,
+};
+static const unsigned int msiof5_ss2_pins[] = {
+ /* MSIOF5_SS2 */
+ RCAR_GP_PIN(0, 7),
+};
+static const unsigned int msiof5_ss2_mux[] = {
+ MSIOF5_SS2_MARK,
+};
+static const unsigned int msiof5_txd_pins[] = {
+ /* MSIOF5_TXD */
+ RCAR_GP_PIN(0, 10),
+};
+static const unsigned int msiof5_txd_mux[] = {
+ MSIOF5_TXD_MARK,
+};
+static const unsigned int msiof5_rxd_pins[] = {
+ /* MSIOF5_RXD */
+ RCAR_GP_PIN(0, 12),
+};
+static const unsigned int msiof5_rxd_mux[] = {
+ MSIOF5_RXD_MARK,
+};
+
+/* - PCIE ------------------------------------------------------------------- */
+static const unsigned int pcie0_clkreq_n_pins[] = {
+ /* PCIE0_CLKREQ_N */
+ RCAR_GP_PIN(4, 21),
+};
+
+static const unsigned int pcie0_clkreq_n_mux[] = {
+ PCIE0_CLKREQ_N_MARK,
+};
+
+static const unsigned int pcie1_clkreq_n_pins[] = {
+ /* PCIE1_CLKREQ_N */
+ RCAR_GP_PIN(4, 22),
+};
+
+static const unsigned int pcie1_clkreq_n_mux[] = {
+ PCIE1_CLKREQ_N_MARK,
+};
+
+/* - PWM0_A ------------------------------------------------------------------- */
+static const unsigned int pwm0_a_pins[] = {
+ /* PWM0_A */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int pwm0_a_mux[] = {
+ PWM0_A_MARK,
+};
+
+/* - PWM1_A ------------------------------------------------------------------- */
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM1_A */
+ RCAR_GP_PIN(3, 13),
+};
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+
+/* - PWM1_B ------------------------------------------------------------------- */
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM1_B */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+
+/* - PWM2_B ------------------------------------------------------------------- */
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM2_B */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+
+/* - PWM3_A ------------------------------------------------------------------- */
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM3_A */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+
+/* - PWM3_B ------------------------------------------------------------------- */
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM3_B */
+ RCAR_GP_PIN(2, 15),
+};
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+
+/* - PWM4 ------------------------------------------------------------------- */
+static const unsigned int pwm4_pins[] = {
+ /* PWM4 */
+ RCAR_GP_PIN(2, 16),
+};
+static const unsigned int pwm4_mux[] = {
+ PWM4_MARK,
+};
+
+/* - PWM5 ------------------------------------------------------------------- */
+static const unsigned int pwm5_pins[] = {
+ /* PWM5 */
+ RCAR_GP_PIN(2, 17),
+};
+static const unsigned int pwm5_mux[] = {
+ PWM5_MARK,
+};
+
+/* - PWM6 ------------------------------------------------------------------- */
+static const unsigned int pwm6_pins[] = {
+ /* PWM6 */
+ RCAR_GP_PIN(2, 18),
+};
+static const unsigned int pwm6_mux[] = {
+ PWM6_MARK,
+};
+
+/* - PWM7 ------------------------------------------------------------------- */
+static const unsigned int pwm7_pins[] = {
+ /* PWM7 */
+ RCAR_GP_PIN(2, 19),
+};
+static const unsigned int pwm7_mux[] = {
+ PWM7_MARK,
+};
+
+/* - PWM8_A ------------------------------------------------------------------- */
+static const unsigned int pwm8_a_pins[] = {
+ /* PWM8_A */
+ RCAR_GP_PIN(1, 13),
+};
+static const unsigned int pwm8_a_mux[] = {
+ PWM8_A_MARK,
+};
+
+/* - PWM9_A ------------------------------------------------------------------- */
+static const unsigned int pwm9_a_pins[] = {
+ /* PWM9_A */
+ RCAR_GP_PIN(1, 14),
+};
+static const unsigned int pwm9_a_mux[] = {
+ PWM9_A_MARK,
+};
+
+/* - QSPI0 ------------------------------------------------------------------ */
+static const unsigned int qspi0_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 15),
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+static const unsigned int qspi0_data_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 18),
+ RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 16),
+};
+static const unsigned int qspi0_data_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK
+};
+
+/* - QSPI1 ------------------------------------------------------------------ */
+static const unsigned int qspi1_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 25),
+};
+static const unsigned int qspi1_ctrl_mux[] = {
+ QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int qspi1_data_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(3, 21), RCAR_GP_PIN(3, 23),
+ RCAR_GP_PIN(3, 24), RCAR_GP_PIN(3, 26),
+};
+static const unsigned int qspi1_data_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+ QSPI1_IO2_MARK, QSPI1_IO3_MARK
+};
+
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+ /* RX0, TX0 */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 12),
+};
+static const unsigned int scif0_data_mux[] = {
+ RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_clk_pins[] = {
+ /* SCK0 */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int scif0_clk_mux[] = {
+ SCK0_MARK,
+};
+static const unsigned int scif0_ctrl_pins[] = {
+ /* RTS0_N, CTS0_N */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int scif0_ctrl_mux[] = {
+ RTS0_N_MARK, CTS0_N_MARK,
+};
+
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_pins[] = {
+ /* RX1, TX1 */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int scif1_data_mux[] = {
+ RX1_MARK, TX1_MARK,
+};
+static const unsigned int scif1_clk_pins[] = {
+ /* SCK1 */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int scif1_clk_mux[] = {
+ SCK1_MARK,
+};
+static const unsigned int scif1_ctrl_pins[] = {
+ /* RTS1_N, CTS1_N */
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+};
+static const unsigned int scif1_ctrl_mux[] = {
+ RTS1_N_MARK, CTS1_N_MARK,
+};
+
+/* - SCIF1_X ------------------------------------------------------------------ */
+static const unsigned int scif1_data_x_pins[] = {
+ /* RX1_X, TX1_X */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int scif1_data_x_mux[] = {
+ RX1_X_MARK, TX1_X_MARK,
+};
+static const unsigned int scif1_clk_x_pins[] = {
+ /* SCK1_X */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int scif1_clk_x_mux[] = {
+ SCK1_X_MARK,
+};
+static const unsigned int scif1_ctrl_x_pins[] = {
+ /* RTS1_N_X, CTS1_N_X */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int scif1_ctrl_x_mux[] = {
+ RTS1_N_X_MARK, CTS1_N_X_MARK,
+};
+
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_pins[] = {
+ /* RX3, TX3 */
+ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int scif3_data_mux[] = {
+ RX3_MARK, TX3_MARK,
+};
+static const unsigned int scif3_clk_pins[] = {
+ /* SCK3 */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int scif3_clk_mux[] = {
+ SCK3_MARK,
+};
+static const unsigned int scif3_ctrl_pins[] = {
+ /* RTS3_N, CTS3_N */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+};
+static const unsigned int scif3_ctrl_mux[] = {
+ RTS3_N_MARK, CTS3_N_MARK,
+};
+
+/* - SCIF3_A ------------------------------------------------------------------ */
+static const unsigned int scif3_data_a_pins[] = {
+ /* RX3_A, TX3_A */
+ RCAR_GP_PIN(1, 27), RCAR_GP_PIN(1, 28),
+};
+static const unsigned int scif3_data_a_mux[] = {
+ RX3_A_MARK, TX3_A_MARK,
+};
+static const unsigned int scif3_clk_a_pins[] = {
+ /* SCK3_A */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int scif3_clk_a_mux[] = {
+ SCK3_A_MARK,
+};
+static const unsigned int scif3_ctrl_a_pins[] = {
+ /* RTS3_N_A, CTS3_N_A */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int scif3_ctrl_a_mux[] = {
+ RTS3_N_A_MARK, CTS3_N_A_MARK,
+};
+
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_pins[] = {
+ /* RX4, TX4 */
+ RCAR_GP_PIN(8, 13), RCAR_GP_PIN(8, 12),
+};
+static const unsigned int scif4_data_mux[] = {
+ RX4_MARK, TX4_MARK,
+};
+static const unsigned int scif4_clk_pins[] = {
+ /* SCK4 */
+ RCAR_GP_PIN(8, 8),
+};
+static const unsigned int scif4_clk_mux[] = {
+ SCK4_MARK,
+};
+static const unsigned int scif4_ctrl_pins[] = {
+ /* RTS4_N, CTS4_N */
+ RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 9),
+};
+static const unsigned int scif4_ctrl_mux[] = {
+ RTS4_N_MARK, CTS4_N_MARK,
+};
+
+/* - SCIF Clock ------------------------------------------------------------- */
+static const unsigned int scif_clk_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(1, 17),
+};
+static const unsigned int scif_clk_mux[] = {
+ SCIF_CLK_MARK,
+};
+
+/* - TPU ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_pins[] = {
+ /* TPU0TO0 */
+ RCAR_GP_PIN(2, 8),
+};
+static const unsigned int tpu_to0_mux[] = {
+ TPU0TO0_MARK,
+};
+static const unsigned int tpu_to1_pins[] = {
+ /* TPU0TO1 */
+ RCAR_GP_PIN(2, 7),
+};
+static const unsigned int tpu_to1_mux[] = {
+ TPU0TO1_MARK,
+};
+static const unsigned int tpu_to2_pins[] = {
+ /* TPU0TO2 */
+ RCAR_GP_PIN(2, 12),
+};
+static const unsigned int tpu_to2_mux[] = {
+ TPU0TO2_MARK,
+};
+static const unsigned int tpu_to3_pins[] = {
+ /* TPU0TO3 */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int tpu_to3_mux[] = {
+ TPU0TO3_MARK,
+};
+
+/* - TPU_A ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_a_pins[] = {
+ /* TPU0TO0_A */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int tpu_to0_a_mux[] = {
+ TPU0TO0_A_MARK,
+};
+static const unsigned int tpu_to1_a_pins[] = {
+ /* TPU0TO1_A */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int tpu_to1_a_mux[] = {
+ TPU0TO1_A_MARK,
+};
+static const unsigned int tpu_to2_a_pins[] = {
+ /* TPU0TO2_A */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int tpu_to2_a_mux[] = {
+ TPU0TO2_A_MARK,
+};
+static const unsigned int tpu_to3_a_pins[] = {
+ /* TPU0TO3_A */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int tpu_to3_a_mux[] = {
+ TPU0TO3_A_MARK,
+};
+
+/* - TSN0 ------------------------------------------------ */
+static const unsigned int tsn0_link_pins[] = {
+ /* TSN0_LINK */
+ RCAR_GP_PIN(4, 4),
+};
+static const unsigned int tsn0_link_mux[] = {
+ TSN0_LINK_MARK,
+};
+static const unsigned int tsn0_phy_int_pins[] = {
+ /* TSN0_PHY_INT */
+ RCAR_GP_PIN(4, 3),
+};
+static const unsigned int tsn0_phy_int_mux[] = {
+ TSN0_PHY_INT_MARK,
+};
+static const unsigned int tsn0_mdio_pins[] = {
+ /* TSN0_MDC, TSN0_MDIO */
+ RCAR_GP_PIN(4, 1), RCAR_GP_PIN(4, 0),
+};
+static const unsigned int tsn0_mdio_mux[] = {
+ TSN0_MDC_MARK, TSN0_MDIO_MARK,
+};
+static const unsigned int tsn0_rgmii_pins[] = {
+ /*
+ * TSN0_TX_CTL, TSN0_TXC, TSN0_TD0, TSN0_TD1, TSN0_TD2, TSN0_TD3,
+ * TSN0_RX_CTL, TSN0_RXC, TSN0_RD0, TSN0_RD1, TSN0_RD2, TSN0_RD3,
+ */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 12),
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 14),
+ RCAR_GP_PIN(4, 19), RCAR_GP_PIN(4, 18),
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 11),
+ RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 13),
+ RCAR_GP_PIN(4, 17), RCAR_GP_PIN(4, 16),
+};
+static const unsigned int tsn0_rgmii_mux[] = {
+ TSN0_TX_CTL_MARK, TSN0_TXC_MARK,
+ TSN0_TD0_MARK, TSN0_TD1_MARK,
+ TSN0_TD2_MARK, TSN0_TD3_MARK,
+ TSN0_RX_CTL_MARK, TSN0_RXC_MARK,
+ TSN0_RD0_MARK, TSN0_RD1_MARK,
+ TSN0_RD2_MARK, TSN0_RD3_MARK,
+};
+static const unsigned int tsn0_txcrefclk_pins[] = {
+ /* TSN0_TXCREFCLK */
+ RCAR_GP_PIN(4, 20),
+};
+static const unsigned int tsn0_txcrefclk_mux[] = {
+ TSN0_TXCREFCLK_MARK,
+};
+static const unsigned int tsn0_avtp_pps_pins[] = {
+ /* TSN0_AVTP_PPS0, TSN0_AVTP_PPS1 */
+ RCAR_GP_PIN(4, 8), RCAR_GP_PIN(4, 2),
+};
+static const unsigned int tsn0_avtp_pps_mux[] = {
+ TSN0_AVTP_PPS0_MARK, TSN0_AVTP_PPS1_MARK,
+};
+static const unsigned int tsn0_avtp_capture_pins[] = {
+ /* TSN0_AVTP_CAPTURE */
+ RCAR_GP_PIN(4, 6),
+};
+static const unsigned int tsn0_avtp_capture_mux[] = {
+ TSN0_AVTP_CAPTURE_MARK,
+};
+static const unsigned int tsn0_avtp_match_pins[] = {
+ /* TSN0_AVTP_MATCH */
+ RCAR_GP_PIN(4, 5),
+};
+static const unsigned int tsn0_avtp_match_mux[] = {
+ TSN0_AVTP_MATCH_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(avb0_link),
+ SH_PFC_PIN_GROUP(avb0_magic),
+ SH_PFC_PIN_GROUP(avb0_phy_int),
+ SH_PFC_PIN_GROUP(avb0_mdio),
+ SH_PFC_PIN_GROUP(avb0_rgmii),
+ SH_PFC_PIN_GROUP(avb0_txcrefclk),
+ SH_PFC_PIN_GROUP(avb0_avtp_pps),
+ SH_PFC_PIN_GROUP(avb0_avtp_capture),
+ SH_PFC_PIN_GROUP(avb0_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb1_link),
+ SH_PFC_PIN_GROUP(avb1_magic),
+ SH_PFC_PIN_GROUP(avb1_phy_int),
+ SH_PFC_PIN_GROUP(avb1_mdio),
+ SH_PFC_PIN_GROUP(avb1_rgmii),
+ SH_PFC_PIN_GROUP(avb1_txcrefclk),
+ SH_PFC_PIN_GROUP(avb1_avtp_pps),
+ SH_PFC_PIN_GROUP(avb1_avtp_capture),
+ SH_PFC_PIN_GROUP(avb1_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb2_link),
+ SH_PFC_PIN_GROUP(avb2_magic),
+ SH_PFC_PIN_GROUP(avb2_phy_int),
+ SH_PFC_PIN_GROUP(avb2_mdio),
+ SH_PFC_PIN_GROUP(avb2_rgmii),
+ SH_PFC_PIN_GROUP(avb2_txcrefclk),
+ SH_PFC_PIN_GROUP(avb2_avtp_pps),
+ SH_PFC_PIN_GROUP(avb2_avtp_capture),
+ SH_PFC_PIN_GROUP(avb2_avtp_match),
+
+ SH_PFC_PIN_GROUP(canfd0_data),
+ SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(canfd2_data),
+ SH_PFC_PIN_GROUP(canfd3_data),
+ SH_PFC_PIN_GROUP(canfd4_data),
+ SH_PFC_PIN_GROUP(canfd5_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(canfd5_data_b), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(canfd6_data),
+ SH_PFC_PIN_GROUP(canfd7_data),
+ SH_PFC_PIN_GROUP(can_clk),
+
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_data_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_clk_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_ctrl_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif2_data),
+ SH_PFC_PIN_GROUP(hscif2_clk),
+ SH_PFC_PIN_GROUP(hscif2_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_data_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_clk_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_ctrl_a), /* suffix might be updated */
+
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c1),
+ SH_PFC_PIN_GROUP(i2c2),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(i2c4),
+ SH_PFC_PIN_GROUP(i2c5),
+
+ BUS_DATA_PIN_GROUP(mmc_data, 1),
+ BUS_DATA_PIN_GROUP(mmc_data, 4),
+ BUS_DATA_PIN_GROUP(mmc_data, 8),
+ SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(mmc_cd),
+ SH_PFC_PIN_GROUP(mmc_wp),
+ SH_PFC_PIN_GROUP(mmc_ds),
+
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_ss1),
+ SH_PFC_PIN_GROUP(msiof1_ss2),
+ SH_PFC_PIN_GROUP(msiof1_txd),
+ SH_PFC_PIN_GROUP(msiof1_rxd),
+
+ SH_PFC_PIN_GROUP(msiof2_clk),
+ SH_PFC_PIN_GROUP(msiof2_sync),
+ SH_PFC_PIN_GROUP(msiof2_ss1),
+ SH_PFC_PIN_GROUP(msiof2_ss2),
+ SH_PFC_PIN_GROUP(msiof2_txd),
+ SH_PFC_PIN_GROUP(msiof2_rxd),
+
+ SH_PFC_PIN_GROUP(msiof3_clk),
+ SH_PFC_PIN_GROUP(msiof3_sync),
+ SH_PFC_PIN_GROUP(msiof3_ss1),
+ SH_PFC_PIN_GROUP(msiof3_ss2),
+ SH_PFC_PIN_GROUP(msiof3_txd),
+ SH_PFC_PIN_GROUP(msiof3_rxd),
+
+ SH_PFC_PIN_GROUP(msiof4_clk),
+ SH_PFC_PIN_GROUP(msiof4_sync),
+ SH_PFC_PIN_GROUP(msiof4_ss1),
+ SH_PFC_PIN_GROUP(msiof4_ss2),
+ SH_PFC_PIN_GROUP(msiof4_txd),
+ SH_PFC_PIN_GROUP(msiof4_rxd),
+
+ SH_PFC_PIN_GROUP(msiof5_clk),
+ SH_PFC_PIN_GROUP(msiof5_sync),
+ SH_PFC_PIN_GROUP(msiof5_ss1),
+ SH_PFC_PIN_GROUP(msiof5_ss2),
+ SH_PFC_PIN_GROUP(msiof5_txd),
+ SH_PFC_PIN_GROUP(msiof5_rxd),
+
+ SH_PFC_PIN_GROUP(pcie0_clkreq_n),
+ SH_PFC_PIN_GROUP(pcie1_clkreq_n),
+
+ SH_PFC_PIN_GROUP(pwm0_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_b), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4),
+ SH_PFC_PIN_GROUP(pwm5),
+ SH_PFC_PIN_GROUP(pwm6),
+ SH_PFC_PIN_GROUP(pwm7),
+ SH_PFC_PIN_GROUP(pwm8_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm9_a), /* suffix might be updated */
+
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ BUS_DATA_PIN_GROUP(qspi0_data, 2),
+ BUS_DATA_PIN_GROUP(qspi0_data, 4),
+ SH_PFC_PIN_GROUP(qspi1_ctrl),
+ BUS_DATA_PIN_GROUP(qspi1_data, 2),
+ BUS_DATA_PIN_GROUP(qspi1_data, 4),
+
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_data_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_clk_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_ctrl_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_data_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_clk_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_ctrl_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif4_data),
+ SH_PFC_PIN_GROUP(scif4_clk),
+ SH_PFC_PIN_GROUP(scif4_ctrl),
+ SH_PFC_PIN_GROUP(scif_clk),
+
+ SH_PFC_PIN_GROUP(tpu_to0), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to0_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to1), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to1_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to2), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to2_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to3), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to3_a), /* suffix might be updated */
+
+ SH_PFC_PIN_GROUP(tsn0_link),
+ SH_PFC_PIN_GROUP(tsn0_phy_int),
+ SH_PFC_PIN_GROUP(tsn0_mdio),
+ SH_PFC_PIN_GROUP(tsn0_rgmii),
+ SH_PFC_PIN_GROUP(tsn0_txcrefclk),
+ SH_PFC_PIN_GROUP(tsn0_avtp_pps),
+ SH_PFC_PIN_GROUP(tsn0_avtp_capture),
+ SH_PFC_PIN_GROUP(tsn0_avtp_match),
+};
+
+static const char * const avb0_groups[] = {
+ "avb0_link",
+ "avb0_magic",
+ "avb0_phy_int",
+ "avb0_mdio",
+ "avb0_rgmii",
+ "avb0_txcrefclk",
+ "avb0_avtp_pps",
+ "avb0_avtp_capture",
+ "avb0_avtp_match",
+};
+
+static const char * const avb1_groups[] = {
+ "avb1_link",
+ "avb1_magic",
+ "avb1_phy_int",
+ "avb1_mdio",
+ "avb1_rgmii",
+ "avb1_txcrefclk",
+ "avb1_avtp_pps",
+ "avb1_avtp_capture",
+ "avb1_avtp_match",
+};
+
+static const char * const avb2_groups[] = {
+ "avb2_link",
+ "avb2_magic",
+ "avb2_phy_int",
+ "avb2_mdio",
+ "avb2_rgmii",
+ "avb2_txcrefclk",
+ "avb2_avtp_pps",
+ "avb2_avtp_capture",
+ "avb2_avtp_match",
+};
+
+static const char * const canfd0_groups[] = {
+ "canfd0_data",
+};
+
+static const char * const canfd1_groups[] = {
+ "canfd1_data",
+};
+
+static const char * const canfd2_groups[] = {
+ "canfd2_data",
+};
+
+static const char * const canfd3_groups[] = {
+ "canfd3_data",
+};
+
+static const char * const canfd4_groups[] = {
+ "canfd4_data",
+};
+
+static const char * const canfd5_groups[] = {
+ /* suffix might be updated */
+ "canfd5_data",
+ "canfd5_data_b",
+};
+
+static const char * const canfd6_groups[] = {
+ "canfd6_data",
+};
+
+static const char * const canfd7_groups[] = {
+ "canfd7_data",
+};
+
+static const char * const can_clk_groups[] = {
+ "can_clk",
+};
+
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+ /* suffix might be updated */
+ "hscif1_data",
+ "hscif1_clk",
+ "hscif1_ctrl",
+ "hscif1_data_x",
+ "hscif1_clk_x",
+ "hscif1_ctrl_x",
+};
+
+static const char * const hscif2_groups[] = {
+ "hscif2_data",
+ "hscif2_clk",
+ "hscif2_ctrl",
+};
+
+static const char * const hscif3_groups[] = {
+ /* suffix might be updated */
+ "hscif3_data",
+ "hscif3_clk",
+ "hscif3_ctrl",
+ "hscif3_data_a",
+ "hscif3_clk_a",
+ "hscif3_ctrl_a",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3",
+};
+
+static const char * const i2c4_groups[] = {
+ "i2c4",
+};
+
+static const char * const i2c5_groups[] = {
+ "i2c5",
+};
+
+static const char * const mmc_groups[] = {
+ "mmc_data1",
+ "mmc_data4",
+ "mmc_data8",
+ "mmc_ctrl",
+ "mmc_cd",
+ "mmc_wp",
+ "mmc_ds",
+};
+
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_sync",
+ "msiof0_ss1",
+ "msiof0_ss2",
+ "msiof0_txd",
+ "msiof0_rxd",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk",
+ "msiof1_sync",
+ "msiof1_ss1",
+ "msiof1_ss2",
+ "msiof1_txd",
+ "msiof1_rxd",
+};
+
+static const char * const msiof2_groups[] = {
+ "msiof2_clk",
+ "msiof2_sync",
+ "msiof2_ss1",
+ "msiof2_ss2",
+ "msiof2_txd",
+ "msiof2_rxd",
+};
+
+static const char * const msiof3_groups[] = {
+ "msiof3_clk",
+ "msiof3_sync",
+ "msiof3_ss1",
+ "msiof3_ss2",
+ "msiof3_txd",
+ "msiof3_rxd",
+};
+
+static const char * const msiof4_groups[] = {
+ "msiof4_clk",
+ "msiof4_sync",
+ "msiof4_ss1",
+ "msiof4_ss2",
+ "msiof4_txd",
+ "msiof4_rxd",
+};
+
+static const char * const msiof5_groups[] = {
+ "msiof5_clk",
+ "msiof5_sync",
+ "msiof5_ss1",
+ "msiof5_ss2",
+ "msiof5_txd",
+ "msiof5_rxd",
+};
+
+static const char * const pcie_groups[] = {
+ "pcie0_clkreq_n",
+ "pcie1_clkreq_n",
+};
+
+static const char * const pwm0_groups[] = {
+ /* suffix might be updated */
+ "pwm0_a",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+};
+
+static const char * const pwm2_groups[] = {
+ /* suffix might be updated */
+ "pwm2_b",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+};
+
+static const char * const pwm4_groups[] = {
+ "pwm4",
+};
+
+static const char * const pwm5_groups[] = {
+ "pwm5",
+};
+
+static const char * const pwm6_groups[] = {
+ "pwm6",
+};
+
+static const char * const pwm7_groups[] = {
+ "pwm7",
+};
+
+static const char * const pwm8_groups[] = {
+ /* suffix might be updated */
+ "pwm8_a",
+};
+
+static const char * const pwm9_groups[] = {
+ /* suffix might be updated */
+ "pwm9_a",
+};
+
+static const char * const qspi0_groups[] = {
+ "qspi0_ctrl",
+ "qspi0_data2",
+ "qspi0_data4",
+};
+
+static const char * const qspi1_groups[] = {
+ "qspi1_ctrl",
+ "qspi1_data2",
+ "qspi1_data4",
+};
+
+static const char * const scif0_groups[] = {
+ "scif0_data",
+ "scif0_clk",
+ "scif0_ctrl",
+};
+
+static const char * const scif1_groups[] = {
+ /* suffix might be updated */
+ "scif1_data",
+ "scif1_clk",
+ "scif1_ctrl",
+ "scif1_data_x",
+ "scif1_clk_x",
+ "scif1_ctrl_x",
+};
+
+static const char * const scif3_groups[] = {
+ /* suffix might be updated */
+ "scif3_data",
+ "scif3_clk",
+ "scif3_ctrl",
+ "scif3_data_a",
+ "scif3_clk_a",
+ "scif3_ctrl_a",
+};
+
+static const char * const scif4_groups[] = {
+ "scif4_data",
+ "scif4_clk",
+ "scif4_ctrl",
+};
+
+static const char * const scif_clk_groups[] = {
+ "scif_clk",
+};
+
+static const char * const tpu_groups[] = {
+ /* suffix might be updated */
+ "tpu_to0",
+ "tpu_to0_a",
+ "tpu_to1",
+ "tpu_to1_a",
+ "tpu_to2",
+ "tpu_to2_a",
+ "tpu_to3",
+ "tpu_to3_a",
+};
+
+static const char * const tsn0_groups[] = {
+ "tsn0_link",
+ "tsn0_phy_int",
+ "tsn0_mdio",
+ "tsn0_rgmii",
+ "tsn0_txcrefclk",
+ "tsn0_avtp_pps",
+ "tsn0_avtp_capture",
+ "tsn0_avtp_match",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(avb0),
+ SH_PFC_FUNCTION(avb1),
+ SH_PFC_FUNCTION(avb2),
+
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(canfd2),
+ SH_PFC_FUNCTION(canfd3),
+ SH_PFC_FUNCTION(canfd4),
+ SH_PFC_FUNCTION(canfd5),
+ SH_PFC_FUNCTION(canfd6),
+ SH_PFC_FUNCTION(canfd7),
+ SH_PFC_FUNCTION(can_clk),
+
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(i2c4),
+ SH_PFC_FUNCTION(i2c5),
+
+ SH_PFC_FUNCTION(mmc),
+
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(msiof4),
+ SH_PFC_FUNCTION(msiof5),
+
+ SH_PFC_FUNCTION(pcie),
+
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(pwm7),
+ SH_PFC_FUNCTION(pwm8),
+ SH_PFC_FUNCTION(pwm9),
+
+ SH_PFC_FUNCTION(qspi0),
+ SH_PFC_FUNCTION(qspi1),
+
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif_clk),
+
+ SH_PFC_FUNCTION(tpu),
+
+ SH_PFC_FUNCTION(tsn0),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+#define F_(x, y) FN_##y
+#define FM(x) FN_##x
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xE6050040, 32,
+ GROUP(-13, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_19 RESERVED */
+ GP_0_18_FN, GPSR0_18,
+ GP_0_17_FN, GPSR0_17,
+ GP_0_16_FN, GPSR0_16,
+ GP_0_15_FN, GPSR0_15,
+ GP_0_14_FN, GPSR0_14,
+ GP_0_13_FN, GPSR0_13,
+ GP_0_12_FN, GPSR0_12,
+ GP_0_11_FN, GPSR0_11,
+ GP_0_10_FN, GPSR0_10,
+ GP_0_9_FN, GPSR0_9,
+ GP_0_8_FN, GPSR0_8,
+ GP_0_7_FN, GPSR0_7,
+ GP_0_6_FN, GPSR0_6,
+ GP_0_5_FN, GPSR0_5,
+ GP_0_4_FN, GPSR0_4,
+ GP_0_3_FN, GPSR0_3,
+ GP_0_2_FN, GPSR0_2,
+ GP_0_1_FN, GPSR0_1,
+ GP_0_0_FN, GPSR0_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xE6050840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_1_28_FN, GPSR1_28,
+ GP_1_27_FN, GPSR1_27,
+ GP_1_26_FN, GPSR1_26,
+ GP_1_25_FN, GPSR1_25,
+ GP_1_24_FN, GPSR1_24,
+ GP_1_23_FN, GPSR1_23,
+ GP_1_22_FN, GPSR1_22,
+ GP_1_21_FN, GPSR1_21,
+ GP_1_20_FN, GPSR1_20,
+ GP_1_19_FN, GPSR1_19,
+ GP_1_18_FN, GPSR1_18,
+ GP_1_17_FN, GPSR1_17,
+ GP_1_16_FN, GPSR1_16,
+ GP_1_15_FN, GPSR1_15,
+ GP_1_14_FN, GPSR1_14,
+ GP_1_13_FN, GPSR1_13,
+ GP_1_12_FN, GPSR1_12,
+ GP_1_11_FN, GPSR1_11,
+ GP_1_10_FN, GPSR1_10,
+ GP_1_9_FN, GPSR1_9,
+ GP_1_8_FN, GPSR1_8,
+ GP_1_7_FN, GPSR1_7,
+ GP_1_6_FN, GPSR1_6,
+ GP_1_5_FN, GPSR1_5,
+ GP_1_4_FN, GPSR1_4,
+ GP_1_3_FN, GPSR1_3,
+ GP_1_2_FN, GPSR1_2,
+ GP_1_1_FN, GPSR1_1,
+ GP_1_0_FN, GPSR1_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR2", 0xE6058040, 32,
+ GROUP(-12, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP2_31_20 RESERVED */
+ GP_2_19_FN, GPSR2_19,
+ GP_2_18_FN, GPSR2_18,
+ GP_2_17_FN, GPSR2_17,
+ GP_2_16_FN, GPSR2_16,
+ GP_2_15_FN, GPSR2_15,
+ GP_2_14_FN, GPSR2_14,
+ GP_2_13_FN, GPSR2_13,
+ GP_2_12_FN, GPSR2_12,
+ GP_2_11_FN, GPSR2_11,
+ GP_2_10_FN, GPSR2_10,
+ GP_2_9_FN, GPSR2_9,
+ GP_2_8_FN, GPSR2_8,
+ GP_2_7_FN, GPSR2_7,
+ GP_2_6_FN, GPSR2_6,
+ GP_2_5_FN, GPSR2_5,
+ GP_2_4_FN, GPSR2_4,
+ GP_2_3_FN, GPSR2_3,
+ GP_2_2_FN, GPSR2_2,
+ GP_2_1_FN, GPSR2_1,
+ GP_2_0_FN, GPSR2_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xE6058840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ GP_3_29_FN, GPSR3_29,
+ GP_3_28_FN, GPSR3_28,
+ GP_3_27_FN, GPSR3_27,
+ GP_3_26_FN, GPSR3_26,
+ GP_3_25_FN, GPSR3_25,
+ GP_3_24_FN, GPSR3_24,
+ GP_3_23_FN, GPSR3_23,
+ GP_3_22_FN, GPSR3_22,
+ GP_3_21_FN, GPSR3_21,
+ GP_3_20_FN, GPSR3_20,
+ GP_3_19_FN, GPSR3_19,
+ GP_3_18_FN, GPSR3_18,
+ GP_3_17_FN, GPSR3_17,
+ GP_3_16_FN, GPSR3_16,
+ GP_3_15_FN, GPSR3_15,
+ GP_3_14_FN, GPSR3_14,
+ GP_3_13_FN, GPSR3_13,
+ GP_3_12_FN, GPSR3_12,
+ GP_3_11_FN, GPSR3_11,
+ GP_3_10_FN, GPSR3_10,
+ GP_3_9_FN, GPSR3_9,
+ GP_3_8_FN, GPSR3_8,
+ GP_3_7_FN, GPSR3_7,
+ GP_3_6_FN, GPSR3_6,
+ GP_3_5_FN, GPSR3_5,
+ GP_3_4_FN, GPSR3_4,
+ GP_3_3_FN, GPSR3_3,
+ GP_3_2_FN, GPSR3_2,
+ GP_3_1_FN, GPSR3_1,
+ GP_3_0_FN, GPSR3_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xE6060040, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_4_24_FN, GPSR4_24,
+ GP_4_23_FN, GPSR4_23,
+ GP_4_22_FN, GPSR4_22,
+ GP_4_21_FN, GPSR4_21,
+ GP_4_20_FN, GPSR4_20,
+ GP_4_19_FN, GPSR4_19,
+ GP_4_18_FN, GPSR4_18,
+ GP_4_17_FN, GPSR4_17,
+ GP_4_16_FN, GPSR4_16,
+ GP_4_15_FN, GPSR4_15,
+ GP_4_14_FN, GPSR4_14,
+ GP_4_13_FN, GPSR4_13,
+ GP_4_12_FN, GPSR4_12,
+ GP_4_11_FN, GPSR4_11,
+ GP_4_10_FN, GPSR4_10,
+ GP_4_9_FN, GPSR4_9,
+ GP_4_8_FN, GPSR4_8,
+ GP_4_7_FN, GPSR4_7,
+ GP_4_6_FN, GPSR4_6,
+ GP_4_5_FN, GPSR4_5,
+ GP_4_4_FN, GPSR4_4,
+ GP_4_3_FN, GPSR4_3,
+ GP_4_2_FN, GPSR4_2,
+ GP_4_1_FN, GPSR4_1,
+ GP_4_0_FN, GPSR4_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR5", 0xE6060840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP5_31_21 RESERVED */
+ GP_5_20_FN, GPSR5_20,
+ GP_5_19_FN, GPSR5_19,
+ GP_5_18_FN, GPSR5_18,
+ GP_5_17_FN, GPSR5_17,
+ GP_5_16_FN, GPSR5_16,
+ GP_5_15_FN, GPSR5_15,
+ GP_5_14_FN, GPSR5_14,
+ GP_5_13_FN, GPSR5_13,
+ GP_5_12_FN, GPSR5_12,
+ GP_5_11_FN, GPSR5_11,
+ GP_5_10_FN, GPSR5_10,
+ GP_5_9_FN, GPSR5_9,
+ GP_5_8_FN, GPSR5_8,
+ GP_5_7_FN, GPSR5_7,
+ GP_5_6_FN, GPSR5_6,
+ GP_5_5_FN, GPSR5_5,
+ GP_5_4_FN, GPSR5_4,
+ GP_5_3_FN, GPSR5_3,
+ GP_5_2_FN, GPSR5_2,
+ GP_5_1_FN, GPSR5_1,
+ GP_5_0_FN, GPSR5_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR6", 0xE6061040, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP6_31_21 RESERVED */
+ GP_6_20_FN, GPSR6_20,
+ GP_6_19_FN, GPSR6_19,
+ GP_6_18_FN, GPSR6_18,
+ GP_6_17_FN, GPSR6_17,
+ GP_6_16_FN, GPSR6_16,
+ GP_6_15_FN, GPSR6_15,
+ GP_6_14_FN, GPSR6_14,
+ GP_6_13_FN, GPSR6_13,
+ GP_6_12_FN, GPSR6_12,
+ GP_6_11_FN, GPSR6_11,
+ GP_6_10_FN, GPSR6_10,
+ GP_6_9_FN, GPSR6_9,
+ GP_6_8_FN, GPSR6_8,
+ GP_6_7_FN, GPSR6_7,
+ GP_6_6_FN, GPSR6_6,
+ GP_6_5_FN, GPSR6_5,
+ GP_6_4_FN, GPSR6_4,
+ GP_6_3_FN, GPSR6_3,
+ GP_6_2_FN, GPSR6_2,
+ GP_6_1_FN, GPSR6_1,
+ GP_6_0_FN, GPSR6_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR7", 0xE6061840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP7_31_21 RESERVED */
+ GP_7_20_FN, GPSR7_20,
+ GP_7_19_FN, GPSR7_19,
+ GP_7_18_FN, GPSR7_18,
+ GP_7_17_FN, GPSR7_17,
+ GP_7_16_FN, GPSR7_16,
+ GP_7_15_FN, GPSR7_15,
+ GP_7_14_FN, GPSR7_14,
+ GP_7_13_FN, GPSR7_13,
+ GP_7_12_FN, GPSR7_12,
+ GP_7_11_FN, GPSR7_11,
+ GP_7_10_FN, GPSR7_10,
+ GP_7_9_FN, GPSR7_9,
+ GP_7_8_FN, GPSR7_8,
+ GP_7_7_FN, GPSR7_7,
+ GP_7_6_FN, GPSR7_6,
+ GP_7_5_FN, GPSR7_5,
+ GP_7_4_FN, GPSR7_4,
+ GP_7_3_FN, GPSR7_3,
+ GP_7_2_FN, GPSR7_2,
+ GP_7_1_FN, GPSR7_1,
+ GP_7_0_FN, GPSR7_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR8", 0xE6068040, 32,
+ GROUP(-18, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP8_31_14 RESERVED */
+ GP_8_13_FN, GPSR8_13,
+ GP_8_12_FN, GPSR8_12,
+ GP_8_11_FN, GPSR8_11,
+ GP_8_10_FN, GPSR8_10,
+ GP_8_9_FN, GPSR8_9,
+ GP_8_8_FN, GPSR8_8,
+ GP_8_7_FN, GPSR8_7,
+ GP_8_6_FN, GPSR8_6,
+ GP_8_5_FN, GPSR8_5,
+ GP_8_4_FN, GPSR8_4,
+ GP_8_3_FN, GPSR8_3,
+ GP_8_2_FN, GPSR8_2,
+ GP_8_1_FN, GPSR8_1,
+ GP_8_0_FN, GPSR8_0, ))
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG("IP0SR0", 0xE6050060, 32, 4, GROUP(
+ IP0SR0_31_28
+ IP0SR0_27_24
+ IP0SR0_23_20
+ IP0SR0_19_16
+ IP0SR0_15_12
+ IP0SR0_11_8
+ IP0SR0_7_4
+ IP0SR0_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR0", 0xE6050064, 32, 4, GROUP(
+ IP1SR0_31_28
+ IP1SR0_27_24
+ IP1SR0_23_20
+ IP1SR0_19_16
+ IP1SR0_15_12
+ IP1SR0_11_8
+ IP1SR0_7_4
+ IP1SR0_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR0", 0xE6050068, 32,
+ GROUP(-20, 4, 4, 4),
+ GROUP(
+ /* IP2SR0_31_12 RESERVED */
+ IP2SR0_11_8
+ IP2SR0_7_4
+ IP2SR0_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR1", 0xE6050860, 32, 4, GROUP(
+ IP0SR1_31_28
+ IP0SR1_27_24
+ IP0SR1_23_20
+ IP0SR1_19_16
+ IP0SR1_15_12
+ IP0SR1_11_8
+ IP0SR1_7_4
+ IP0SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR1", 0xE6050864, 32, 4, GROUP(
+ IP1SR1_31_28
+ IP1SR1_27_24
+ IP1SR1_23_20
+ IP1SR1_19_16
+ IP1SR1_15_12
+ IP1SR1_11_8
+ IP1SR1_7_4
+ IP1SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR1", 0xE6050868, 32, 4, GROUP(
+ IP2SR1_31_28
+ IP2SR1_27_24
+ IP2SR1_23_20
+ IP2SR1_19_16
+ IP2SR1_15_12
+ IP2SR1_11_8
+ IP2SR1_7_4
+ IP2SR1_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP3SR1", 0xE605086C, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP3SR1_31_20 RESERVED */
+ IP3SR1_19_16
+ IP3SR1_15_12
+ IP3SR1_11_8
+ IP3SR1_7_4
+ IP3SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR2", 0xE6058060, 32, 4, GROUP(
+ IP0SR2_31_28
+ IP0SR2_27_24
+ IP0SR2_23_20
+ IP0SR2_19_16
+ IP0SR2_15_12
+ IP0SR2_11_8
+ IP0SR2_7_4
+ IP0SR2_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR2", 0xE6058064, 32, 4, GROUP(
+ IP1SR2_31_28
+ IP1SR2_27_24
+ IP1SR2_23_20
+ IP1SR2_19_16
+ IP1SR2_15_12
+ IP1SR2_11_8
+ IP1SR2_7_4
+ IP1SR2_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR2", 0xE6058068, 32,
+ GROUP(-16, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR2_31_16 RESERVED */
+ IP2SR2_15_12
+ IP2SR2_11_8
+ IP2SR2_7_4
+ IP2SR2_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR3", 0xE6058860, 32, 4, GROUP(
+ IP0SR3_31_28
+ IP0SR3_27_24
+ IP0SR3_23_20
+ IP0SR3_19_16
+ IP0SR3_15_12
+ IP0SR3_11_8
+ IP0SR3_7_4
+ IP0SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR3", 0xE6058864, 32, 4, GROUP(
+ IP1SR3_31_28
+ IP1SR3_27_24
+ IP1SR3_23_20
+ IP1SR3_19_16
+ IP1SR3_15_12
+ IP1SR3_11_8
+ IP1SR3_7_4
+ IP1SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR3", 0xE6058868, 32, 4, GROUP(
+ IP2SR3_31_28
+ IP2SR3_27_24
+ IP2SR3_23_20
+ IP2SR3_19_16
+ IP2SR3_15_12
+ IP2SR3_11_8
+ IP2SR3_7_4
+ IP2SR3_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP3SR3", 0xE605886C, 32,
+ GROUP(-8, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP3SR3_31_24 RESERVED */
+ IP3SR3_23_20
+ IP3SR3_19_16
+ IP3SR3_15_12
+ IP3SR3_11_8
+ IP3SR3_7_4
+ IP3SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR6", 0xE6061060, 32, 4, GROUP(
+ IP0SR6_31_28
+ IP0SR6_27_24
+ IP0SR6_23_20
+ IP0SR6_19_16
+ IP0SR6_15_12
+ IP0SR6_11_8
+ IP0SR6_7_4
+ IP0SR6_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR6", 0xE6061064, 32, 4, GROUP(
+ IP1SR6_31_28
+ IP1SR6_27_24
+ IP1SR6_23_20
+ IP1SR6_19_16
+ IP1SR6_15_12
+ IP1SR6_11_8
+ IP1SR6_7_4
+ IP1SR6_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR6", 0xE6061068, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR6_31_20 RESERVED */
+ IP2SR6_19_16
+ IP2SR6_15_12
+ IP2SR6_11_8
+ IP2SR6_7_4
+ IP2SR6_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR7", 0xE6061860, 32, 4, GROUP(
+ IP0SR7_31_28
+ IP0SR7_27_24
+ IP0SR7_23_20
+ IP0SR7_19_16
+ IP0SR7_15_12
+ IP0SR7_11_8
+ IP0SR7_7_4
+ IP0SR7_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR7", 0xE6061864, 32, 4, GROUP(
+ IP1SR7_31_28
+ IP1SR7_27_24
+ IP1SR7_23_20
+ IP1SR7_19_16
+ IP1SR7_15_12
+ IP1SR7_11_8
+ IP1SR7_7_4
+ IP1SR7_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR7", 0xE6061868, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR7_31_20 RESERVED */
+ IP2SR7_19_16
+ IP2SR7_15_12
+ IP2SR7_11_8
+ IP2SR7_7_4
+ IP2SR7_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR8", 0xE6068060, 32, 4, GROUP(
+ IP0SR8_31_28
+ IP0SR8_27_24
+ IP0SR8_23_20
+ IP0SR8_19_16
+ IP0SR8_15_12
+ IP0SR8_11_8
+ IP0SR8_7_4
+ IP0SR8_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP1SR8", 0xE6068064, 32,
+ GROUP(-8, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP1SR8_31_24 RESERVED */
+ IP1SR8_23_20
+ IP1SR8_19_16
+ IP1SR8_15_12
+ IP1SR8_11_8
+ IP1SR8_7_4
+ IP1SR8_3_0))
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG_VAR("MOD_SEL4", 0xE6060100, 32,
+ GROUP(-12, 1, 1, -2, 1, 1, -1, 1, -2, 1, 1, -2, 1,
+ -2, 1, 1, -1),
+ GROUP(
+ /* RESERVED 31-20 */
+ MOD_SEL4_19
+ MOD_SEL4_18
+ /* RESERVED 17-16 */
+ MOD_SEL4_15
+ MOD_SEL4_14
+ /* RESERVED 13 */
+ MOD_SEL4_12
+ /* RESERVED 11-10 */
+ MOD_SEL4_9
+ MOD_SEL4_8
+ /* RESERVED 7-6 */
+ MOD_SEL4_5
+ /* RESERVED 4-3 */
+ MOD_SEL4_2
+ MOD_SEL4_1
+ /* RESERVED 0 */
+ ))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL5", 0xE6060900, 32,
+ GROUP(-12, 1, -2, 1, 1, -2, 1, 1, -2, 1, -1,
+ 1, 1, -2, 1, -1, 1),
+ GROUP(
+ /* RESERVED 31-20 */
+ MOD_SEL5_19
+ /* RESERVED 18-17 */
+ MOD_SEL5_16
+ MOD_SEL5_15
+ /* RESERVED 14-13 */
+ MOD_SEL5_12
+ MOD_SEL5_11
+ /* RESERVED 10-9 */
+ MOD_SEL5_8
+ /* RESERVED 7 */
+ MOD_SEL5_6
+ MOD_SEL5_5
+ /* RESERVED 4-3 */
+ MOD_SEL5_2
+ /* RESERVED 1 */
+ MOD_SEL5_0))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL6", 0xE6061100, 32,
+ GROUP(-13, 1, -1, 1, -2, 1, 1,
+ -1, 1, -2, 1, 1, 1, -2, 1, 1, -1),
+ GROUP(
+ /* RESERVED 31-19 */
+ MOD_SEL6_18
+ /* RESERVED 17 */
+ MOD_SEL6_16
+ /* RESERVED 15-14 */
+ MOD_SEL6_13
+ MOD_SEL6_12
+ /* RESERVED 11 */
+ MOD_SEL6_10
+ /* RESERVED 9-8 */
+ MOD_SEL6_7
+ MOD_SEL6_6
+ MOD_SEL6_5
+ /* RESERVED 4-3 */
+ MOD_SEL6_2
+ MOD_SEL6_1
+ /* RESERVED 0 */
+ ))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL7", 0xE6061900, 32,
+ GROUP(-15, 1, 1, -1, 1, -1, 1, 1, -2, 1, 1,
+ -2, 1, 1, -1, 1),
+ GROUP(
+ /* RESERVED 31-17 */
+ MOD_SEL7_16
+ MOD_SEL7_15
+ /* RESERVED 14 */
+ MOD_SEL7_13
+ /* RESERVED 12 */
+ MOD_SEL7_11
+ MOD_SEL7_10
+ /* RESERVED 9-8 */
+ MOD_SEL7_7
+ MOD_SEL7_6
+ /* RESERVED 5-4 */
+ MOD_SEL7_3
+ MOD_SEL7_2
+ /* RESERVED 1 */
+ MOD_SEL7_0))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL8", 0xE6068100, 32,
+ GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED 31-12 */
+ MOD_SEL8_11
+ MOD_SEL8_10
+ MOD_SEL8_9
+ MOD_SEL8_8
+ MOD_SEL8_7
+ MOD_SEL8_6
+ MOD_SEL8_5
+ MOD_SEL8_4
+ MOD_SEL8_3
+ MOD_SEL8_2
+ MOD_SEL8_1
+ MOD_SEL8_0))
+ },
+ { },
+};
+
+static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+ { PINMUX_DRIVE_REG("DRV0CTRL0", 0xE6050080) {
+ { RCAR_GP_PIN(0, 7), 28, 3 }, /* MSIOF5_SS2 */
+ { RCAR_GP_PIN(0, 6), 24, 3 }, /* IRQ0 */
+ { RCAR_GP_PIN(0, 5), 20, 3 }, /* IRQ1 */
+ { RCAR_GP_PIN(0, 4), 16, 3 }, /* IRQ2 */
+ { RCAR_GP_PIN(0, 3), 12, 3 }, /* IRQ3 */
+ { RCAR_GP_PIN(0, 2), 8, 3 }, /* GP0_02 */
+ { RCAR_GP_PIN(0, 1), 4, 3 }, /* GP0_01 */
+ { RCAR_GP_PIN(0, 0), 0, 3 }, /* GP0_00 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL0", 0xE6050084) {
+ { RCAR_GP_PIN(0, 15), 28, 3 }, /* MSIOF2_SYNC */
+ { RCAR_GP_PIN(0, 14), 24, 3 }, /* MSIOF2_SS1 */
+ { RCAR_GP_PIN(0, 13), 20, 3 }, /* MSIOF2_SS2 */
+ { RCAR_GP_PIN(0, 12), 16, 3 }, /* MSIOF5_RXD */
+ { RCAR_GP_PIN(0, 11), 12, 3 }, /* MSIOF5_SCK */
+ { RCAR_GP_PIN(0, 10), 8, 3 }, /* MSIOF5_TXD */
+ { RCAR_GP_PIN(0, 9), 4, 3 }, /* MSIOF5_SYNC */
+ { RCAR_GP_PIN(0, 8), 0, 3 }, /* MSIOF5_SS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL0", 0xE6050088) {
+ { RCAR_GP_PIN(0, 18), 8, 3 }, /* MSIOF2_RXD */
+ { RCAR_GP_PIN(0, 17), 4, 3 }, /* MSIOF2_SCK */
+ { RCAR_GP_PIN(0, 16), 0, 3 }, /* MSIOF2_TXD */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL1", 0xE6050880) {
+ { RCAR_GP_PIN(1, 7), 28, 3 }, /* MSIOF0_SS1 */
+ { RCAR_GP_PIN(1, 6), 24, 3 }, /* MSIOF0_SS2 */
+ { RCAR_GP_PIN(1, 5), 20, 3 }, /* MSIOF1_RXD */
+ { RCAR_GP_PIN(1, 4), 16, 3 }, /* MSIOF1_TXD */
+ { RCAR_GP_PIN(1, 3), 12, 3 }, /* MSIOF1_SCK */
+ { RCAR_GP_PIN(1, 2), 8, 3 }, /* MSIOF1_SYNC */
+ { RCAR_GP_PIN(1, 1), 4, 3 }, /* MSIOF1_SS1 */
+ { RCAR_GP_PIN(1, 0), 0, 3 }, /* MSIOF1_SS2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL1", 0xE6050884) {
+ { RCAR_GP_PIN(1, 15), 28, 3 }, /* HSCK0 */
+ { RCAR_GP_PIN(1, 14), 24, 3 }, /* HRTS0_N */
+ { RCAR_GP_PIN(1, 13), 20, 3 }, /* HCTS0_N */
+ { RCAR_GP_PIN(1, 12), 16, 3 }, /* HTX0 */
+ { RCAR_GP_PIN(1, 11), 12, 3 }, /* MSIOF0_RXD */
+ { RCAR_GP_PIN(1, 10), 8, 3 }, /* MSIOF0_SCK */
+ { RCAR_GP_PIN(1, 9), 4, 3 }, /* MSIOF0_TXD */
+ { RCAR_GP_PIN(1, 8), 0, 3 }, /* MSIOF0_SYNC */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL1", 0xE6050888) {
+ { RCAR_GP_PIN(1, 23), 28, 3 }, /* GP1_23 */
+ { RCAR_GP_PIN(1, 22), 24, 3 }, /* AUDIO_CLKIN */
+ { RCAR_GP_PIN(1, 21), 20, 3 }, /* AUDIO_CLKOUT */
+ { RCAR_GP_PIN(1, 20), 16, 3 }, /* SSI_SD */
+ { RCAR_GP_PIN(1, 19), 12, 3 }, /* SSI_WS */
+ { RCAR_GP_PIN(1, 18), 8, 3 }, /* SSI_SCK */
+ { RCAR_GP_PIN(1, 17), 4, 3 }, /* SCIF_CLK */
+ { RCAR_GP_PIN(1, 16), 0, 3 }, /* HRX0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL1", 0xE605088C) {
+ { RCAR_GP_PIN(1, 28), 16, 3 }, /* HTX3 */
+ { RCAR_GP_PIN(1, 27), 12, 3 }, /* HCTS3_N */
+ { RCAR_GP_PIN(1, 26), 8, 3 }, /* HRTS3_N */
+ { RCAR_GP_PIN(1, 25), 4, 3 }, /* HSCK3 */
+ { RCAR_GP_PIN(1, 24), 0, 3 }, /* HRX3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL2", 0xE6058080) {
+ { RCAR_GP_PIN(2, 7), 28, 3 }, /* TPU0TO1 */
+ { RCAR_GP_PIN(2, 6), 24, 3 }, /* FXR_TXDB */
+ { RCAR_GP_PIN(2, 5), 20, 3 }, /* FXR_TXENB_N */
+ { RCAR_GP_PIN(2, 4), 16, 3 }, /* RXDB_EXTFXR */
+ { RCAR_GP_PIN(2, 3), 12, 3 }, /* CLK_EXTFXR */
+ { RCAR_GP_PIN(2, 2), 8, 3 }, /* RXDA_EXTFXR */
+ { RCAR_GP_PIN(2, 1), 4, 3 }, /* FXR_TXENA_N */
+ { RCAR_GP_PIN(2, 0), 0, 3 }, /* FXR_TXDA */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL2", 0xE6058084) {
+ { RCAR_GP_PIN(2, 15), 28, 3 }, /* CANFD3_RX */
+ { RCAR_GP_PIN(2, 14), 24, 3 }, /* CANFD3_TX */
+ { RCAR_GP_PIN(2, 13), 20, 3 }, /* CANFD2_RX */
+ { RCAR_GP_PIN(2, 12), 16, 3 }, /* CANFD2_TX */
+ { RCAR_GP_PIN(2, 11), 12, 3 }, /* CANFD0_RX */
+ { RCAR_GP_PIN(2, 10), 8, 3 }, /* CANFD0_TX */
+ { RCAR_GP_PIN(2, 9), 4, 3 }, /* CAN_CLK */
+ { RCAR_GP_PIN(2, 8), 0, 3 }, /* TPU0TO0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL2", 0xE6058088) {
+ { RCAR_GP_PIN(2, 19), 12, 3 }, /* CANFD7_RX */
+ { RCAR_GP_PIN(2, 18), 8, 3 }, /* CANFD7_TX */
+ { RCAR_GP_PIN(2, 17), 4, 3 }, /* CANFD4_RX */
+ { RCAR_GP_PIN(2, 16), 0, 3 }, /* CANFD4_TX */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL3", 0xE6058880) {
+ { RCAR_GP_PIN(3, 7), 28, 3 }, /* MMC_D4 */
+ { RCAR_GP_PIN(3, 6), 24, 3 }, /* MMC_D5 */
+ { RCAR_GP_PIN(3, 5), 20, 3 }, /* MMC_SD_D3 */
+ { RCAR_GP_PIN(3, 4), 16, 3 }, /* MMC_DS */
+ { RCAR_GP_PIN(3, 3), 12, 3 }, /* MMC_SD_CLK */
+ { RCAR_GP_PIN(3, 2), 8, 3 }, /* MMC_SD_D2 */
+ { RCAR_GP_PIN(3, 1), 4, 3 }, /* MMC_SD_D0 */
+ { RCAR_GP_PIN(3, 0), 0, 3 }, /* MMC_SD_D1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL3", 0xE6058884) {
+ { RCAR_GP_PIN(3, 15), 28, 2 }, /* QSPI0_SSL */
+ { RCAR_GP_PIN(3, 14), 24, 2 }, /* IPC_CLKOUT */
+ { RCAR_GP_PIN(3, 13), 20, 2 }, /* IPC_CLKIN */
+ { RCAR_GP_PIN(3, 12), 16, 3 }, /* SD_WP */
+ { RCAR_GP_PIN(3, 11), 12, 3 }, /* SD_CD */
+ { RCAR_GP_PIN(3, 10), 8, 3 }, /* MMC_SD_CMD */
+ { RCAR_GP_PIN(3, 9), 4, 3 }, /* MMC_D6*/
+ { RCAR_GP_PIN(3, 8), 0, 3 }, /* MMC_D7 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL3", 0xE6058888) {
+ { RCAR_GP_PIN(3, 23), 28, 2 }, /* QSPI1_MISO_IO1 */
+ { RCAR_GP_PIN(3, 22), 24, 2 }, /* QSPI1_SPCLK */
+ { RCAR_GP_PIN(3, 21), 20, 2 }, /* QSPI1_MOSI_IO0 */
+ { RCAR_GP_PIN(3, 20), 16, 2 }, /* QSPI0_SPCLK */
+ { RCAR_GP_PIN(3, 19), 12, 2 }, /* QSPI0_MOSI_IO0 */
+ { RCAR_GP_PIN(3, 18), 8, 2 }, /* QSPI0_MISO_IO1 */
+ { RCAR_GP_PIN(3, 17), 4, 2 }, /* QSPI0_IO2 */
+ { RCAR_GP_PIN(3, 16), 0, 2 }, /* QSPI0_IO3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL3", 0xE605888C) {
+ { RCAR_GP_PIN(3, 29), 20, 2 }, /* RPC_INT_N */
+ { RCAR_GP_PIN(3, 28), 16, 2 }, /* RPC_WP_N */
+ { RCAR_GP_PIN(3, 27), 12, 2 }, /* RPC_RESET_N */
+ { RCAR_GP_PIN(3, 26), 8, 2 }, /* QSPI1_IO3 */
+ { RCAR_GP_PIN(3, 25), 4, 2 }, /* QSPI1_SSL */
+ { RCAR_GP_PIN(3, 24), 0, 2 }, /* QSPI1_IO2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL4", 0xE6060080) {
+ { RCAR_GP_PIN(4, 7), 28, 3 }, /* TSN0_RX_CTL */
+ { RCAR_GP_PIN(4, 6), 24, 3 }, /* TSN0_AVTP_CAPTURE */
+ { RCAR_GP_PIN(4, 5), 20, 3 }, /* TSN0_AVTP_MATCH */
+ { RCAR_GP_PIN(4, 4), 16, 3 }, /* TSN0_LINK */
+ { RCAR_GP_PIN(4, 3), 12, 3 }, /* TSN0_PHY_INT */
+ { RCAR_GP_PIN(4, 2), 8, 3 }, /* TSN0_AVTP_PPS1 */
+ { RCAR_GP_PIN(4, 1), 4, 3 }, /* TSN0_MDC */
+ { RCAR_GP_PIN(4, 0), 0, 3 }, /* TSN0_MDIO */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL4", 0xE6060084) {
+ { RCAR_GP_PIN(4, 15), 28, 3 }, /* TSN0_TD0 */
+ { RCAR_GP_PIN(4, 14), 24, 3 }, /* TSN0_TD1 */
+ { RCAR_GP_PIN(4, 13), 20, 3 }, /* TSN0_RD1 */
+ { RCAR_GP_PIN(4, 12), 16, 3 }, /* TSN0_TXC */
+ { RCAR_GP_PIN(4, 11), 12, 3 }, /* TSN0_RXC */
+ { RCAR_GP_PIN(4, 10), 8, 3 }, /* TSN0_RD0 */
+ { RCAR_GP_PIN(4, 9), 4, 3 }, /* TSN0_TX_CTL */
+ { RCAR_GP_PIN(4, 8), 0, 3 }, /* TSN0_AVTP_PPS0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL4", 0xE6060088) {
+ { RCAR_GP_PIN(4, 23), 28, 3 }, /* AVS0 */
+ { RCAR_GP_PIN(4, 22), 24, 3 }, /* PCIE1_CLKREQ_N */
+ { RCAR_GP_PIN(4, 21), 20, 3 }, /* PCIE0_CLKREQ_N */
+ { RCAR_GP_PIN(4, 20), 16, 3 }, /* TSN0_TXCREFCLK */
+ { RCAR_GP_PIN(4, 19), 12, 3 }, /* TSN0_TD2 */
+ { RCAR_GP_PIN(4, 18), 8, 3 }, /* TSN0_TD3 */
+ { RCAR_GP_PIN(4, 17), 4, 3 }, /* TSN0_RD2 */
+ { RCAR_GP_PIN(4, 16), 0, 3 }, /* TSN0_RD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL4", 0xE606008C) {
+ { RCAR_GP_PIN(4, 24), 0, 3 }, /* AVS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL5", 0xE6060880) {
+ { RCAR_GP_PIN(5, 7), 28, 3 }, /* AVB2_TXCREFCLK */
+ { RCAR_GP_PIN(5, 6), 24, 3 }, /* AVB2_MDC */
+ { RCAR_GP_PIN(5, 5), 20, 3 }, /* AVB2_MAGIC */
+ { RCAR_GP_PIN(5, 4), 16, 3 }, /* AVB2_PHY_INT */
+ { RCAR_GP_PIN(5, 3), 12, 3 }, /* AVB2_LINK */
+ { RCAR_GP_PIN(5, 2), 8, 3 }, /* AVB2_AVTP_MATCH */
+ { RCAR_GP_PIN(5, 1), 4, 3 }, /* AVB2_AVTP_CAPTURE */
+ { RCAR_GP_PIN(5, 0), 0, 3 }, /* AVB2_AVTP_PPS */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL5", 0xE6060884) {
+ { RCAR_GP_PIN(5, 15), 28, 3 }, /* AVB2_TD0 */
+ { RCAR_GP_PIN(5, 14), 24, 3 }, /* AVB2_RD1 */
+ { RCAR_GP_PIN(5, 13), 20, 3 }, /* AVB2_RD2 */
+ { RCAR_GP_PIN(5, 12), 16, 3 }, /* AVB2_TD1 */
+ { RCAR_GP_PIN(5, 11), 12, 3 }, /* AVB2_TD2 */
+ { RCAR_GP_PIN(5, 10), 8, 3 }, /* AVB2_MDIO */
+ { RCAR_GP_PIN(5, 9), 4, 3 }, /* AVB2_RD3 */
+ { RCAR_GP_PIN(5, 8), 0, 3 }, /* AVB2_TD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL5", 0xE6060888) {
+ { RCAR_GP_PIN(5, 20), 16, 3 }, /* AVB2_RX_CTL */
+ { RCAR_GP_PIN(5, 19), 12, 3 }, /* AVB2_TX_CTL */
+ { RCAR_GP_PIN(5, 18), 8, 3 }, /* AVB2_RXC */
+ { RCAR_GP_PIN(5, 17), 4, 3 }, /* AVB2_RD0 */
+ { RCAR_GP_PIN(5, 16), 0, 3 }, /* AVB2_TXC */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL6", 0xE6061080) {
+ { RCAR_GP_PIN(6, 7), 28, 3 }, /* AVB1_TX_CTL */
+ { RCAR_GP_PIN(6, 6), 24, 3 }, /* AVB1_TXC */
+ { RCAR_GP_PIN(6, 5), 20, 3 }, /* AVB1_AVTP_MATCH */
+ { RCAR_GP_PIN(6, 4), 16, 3 }, /* AVB1_LINK */
+ { RCAR_GP_PIN(6, 3), 12, 3 }, /* AVB1_PHY_INT */
+ { RCAR_GP_PIN(6, 2), 8, 3 }, /* AVB1_MDC */
+ { RCAR_GP_PIN(6, 1), 4, 3 }, /* AVB1_MAGIC */
+ { RCAR_GP_PIN(6, 0), 0, 3 }, /* AVB1_MDIO */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL6", 0xE6061084) {
+ { RCAR_GP_PIN(6, 15), 28, 3 }, /* AVB1_RD0 */
+ { RCAR_GP_PIN(6, 14), 24, 3 }, /* AVB1_RD1 */
+ { RCAR_GP_PIN(6, 13), 20, 3 }, /* AVB1_TD0 */
+ { RCAR_GP_PIN(6, 12), 16, 3 }, /* AVB1_TD1 */
+ { RCAR_GP_PIN(6, 11), 12, 3 }, /* AVB1_AVTP_CAPTURE */
+ { RCAR_GP_PIN(6, 10), 8, 3 }, /* AVB1_AVTP_PPS */
+ { RCAR_GP_PIN(6, 9), 4, 3 }, /* AVB1_RX_CTL */
+ { RCAR_GP_PIN(6, 8), 0, 3 }, /* AVB1_RXC */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL6", 0xE6061088) {
+ { RCAR_GP_PIN(6, 20), 16, 3 }, /* AVB1_TXCREFCLK */
+ { RCAR_GP_PIN(6, 19), 12, 3 }, /* AVB1_RD3 */
+ { RCAR_GP_PIN(6, 18), 8, 3 }, /* AVB1_TD3 */
+ { RCAR_GP_PIN(6, 17), 4, 3 }, /* AVB1_RD2 */
+ { RCAR_GP_PIN(6, 16), 0, 3 }, /* AVB1_TD2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL7", 0xE6061880) {
+ { RCAR_GP_PIN(7, 7), 28, 3 }, /* AVB0_TD1 */
+ { RCAR_GP_PIN(7, 6), 24, 3 }, /* AVB0_TD2 */
+ { RCAR_GP_PIN(7, 5), 20, 3 }, /* AVB0_PHY_INT */
+ { RCAR_GP_PIN(7, 4), 16, 3 }, /* AVB0_LINK */
+ { RCAR_GP_PIN(7, 3), 12, 3 }, /* AVB0_TD3 */
+ { RCAR_GP_PIN(7, 2), 8, 3 }, /* AVB0_AVTP_MATCH */
+ { RCAR_GP_PIN(7, 1), 4, 3 }, /* AVB0_AVTP_CAPTURE */
+ { RCAR_GP_PIN(7, 0), 0, 3 }, /* AVB0_AVTP_PPS */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL7", 0xE6061884) {
+ { RCAR_GP_PIN(7, 15), 28, 3 }, /* AVB0_TXC */
+ { RCAR_GP_PIN(7, 14), 24, 3 }, /* AVB0_MDIO */
+ { RCAR_GP_PIN(7, 13), 20, 3 }, /* AVB0_MDC */
+ { RCAR_GP_PIN(7, 12), 16, 3 }, /* AVB0_RD2 */
+ { RCAR_GP_PIN(7, 11), 12, 3 }, /* AVB0_TD0 */
+ { RCAR_GP_PIN(7, 10), 8, 3 }, /* AVB0_MAGIC */
+ { RCAR_GP_PIN(7, 9), 4, 3 }, /* AVB0_TXCREFCLK */
+ { RCAR_GP_PIN(7, 8), 0, 3 }, /* AVB0_RD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL7", 0xE6061888) {
+ { RCAR_GP_PIN(7, 20), 16, 3 }, /* AVB0_RX_CTL */
+ { RCAR_GP_PIN(7, 19), 12, 3 }, /* AVB0_RXC */
+ { RCAR_GP_PIN(7, 18), 8, 3 }, /* AVB0_RD0 */
+ { RCAR_GP_PIN(7, 17), 4, 3 }, /* AVB0_RD1 */
+ { RCAR_GP_PIN(7, 16), 0, 3 }, /* AVB0_TX_CTL */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL8", 0xE6068080) {
+ { RCAR_GP_PIN(8, 7), 28, 3 }, /* SDA3 */
+ { RCAR_GP_PIN(8, 6), 24, 3 }, /* SCL3 */
+ { RCAR_GP_PIN(8, 5), 20, 3 }, /* SDA2 */
+ { RCAR_GP_PIN(8, 4), 16, 3 }, /* SCL2 */
+ { RCAR_GP_PIN(8, 3), 12, 3 }, /* SDA1 */
+ { RCAR_GP_PIN(8, 2), 8, 3 }, /* SCL1 */
+ { RCAR_GP_PIN(8, 1), 4, 3 }, /* SDA0 */
+ { RCAR_GP_PIN(8, 0), 0, 3 }, /* SCL0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL8", 0xE6068084) {
+ { RCAR_GP_PIN(8, 13), 20, 3 }, /* GP8_13 */
+ { RCAR_GP_PIN(8, 12), 16, 3 }, /* GP8_12 */
+ { RCAR_GP_PIN(8, 11), 12, 3 }, /* SDA5 */
+ { RCAR_GP_PIN(8, 10), 8, 3 }, /* SCL5 */
+ { RCAR_GP_PIN(8, 9), 4, 3 }, /* SDA4 */
+ { RCAR_GP_PIN(8, 8), 0, 3 }, /* SCL4 */
+ } },
+ { },
+};
+
+enum ioctrl_regs {
+ POC0,
+ POC1,
+ POC3,
+ POC4,
+ POC5,
+ POC6,
+ POC7,
+ POC8,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POC0] = { 0xE60500A0, },
+ [POC1] = { 0xE60508A0, },
+ [POC3] = { 0xE60588A0, },
+ [POC4] = { 0xE60600A0, },
+ [POC5] = { 0xE60608A0, },
+ [POC6] = { 0xE60610A0, },
+ [POC7] = { 0xE60618A0, },
+ [POC8] = { 0xE60680A0, },
+ { /* sentinel */ },
+};
+
+static int r8a779g0_pin_to_pocctrl(unsigned int pin, u32 *pocctrl)
+{
+ int bit = pin & 0x1f;
+
+ *pocctrl = pinmux_ioctrl_regs[POC0].reg;
+ if (pin >= RCAR_GP_PIN(0, 0) && pin <= RCAR_GP_PIN(0, 18))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC1].reg;
+ if (pin >= RCAR_GP_PIN(1, 0) && pin <= RCAR_GP_PIN(1, 22))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC3].reg;
+ if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 12))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC8].reg;
+ if (pin >= RCAR_GP_PIN(8, 0) && pin <= RCAR_GP_PIN(8, 13))
+ return bit;
+
+ return -EINVAL;
+}
+
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xE60500C0, "PUD0", 0xE60500E0) {
+ [ 0] = RCAR_GP_PIN(0, 0), /* GP0_00 */
+ [ 1] = RCAR_GP_PIN(0, 1), /* GP0_01 */
+ [ 2] = RCAR_GP_PIN(0, 2), /* GP0_02 */
+ [ 3] = RCAR_GP_PIN(0, 3), /* IRQ3 */
+ [ 4] = RCAR_GP_PIN(0, 4), /* IRQ2 */
+ [ 5] = RCAR_GP_PIN(0, 5), /* IRQ1 */
+ [ 6] = RCAR_GP_PIN(0, 6), /* IRQ0 */
+ [ 7] = RCAR_GP_PIN(0, 7), /* MSIOF5_SS2 */
+ [ 8] = RCAR_GP_PIN(0, 8), /* MSIOF5_SS1 */
+ [ 9] = RCAR_GP_PIN(0, 9), /* MSIOF5_SYNC */
+ [10] = RCAR_GP_PIN(0, 10), /* MSIOF5_TXD */
+ [11] = RCAR_GP_PIN(0, 11), /* MSIOF5_SCK */
+ [12] = RCAR_GP_PIN(0, 12), /* MSIOF5_RXD */
+ [13] = RCAR_GP_PIN(0, 13), /* MSIOF2_SS2 */
+ [14] = RCAR_GP_PIN(0, 14), /* MSIOF2_SS1 */
+ [15] = RCAR_GP_PIN(0, 15), /* MSIOF2_SYNC */
+ [16] = RCAR_GP_PIN(0, 16), /* MSIOF2_TXD */
+ [17] = RCAR_GP_PIN(0, 17), /* MSIOF2_SCK */
+ [18] = RCAR_GP_PIN(0, 18), /* MSIOF2_RXD */
+ [19] = SH_PFC_PIN_NONE,
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xE60508C0, "PUD1", 0xE60508E0) {
+ [ 0] = RCAR_GP_PIN(1, 0), /* MSIOF1_SS2 */
+ [ 1] = RCAR_GP_PIN(1, 1), /* MSIOF1_SS1 */
+ [ 2] = RCAR_GP_PIN(1, 2), /* MSIOF1_SYNC */
+ [ 3] = RCAR_GP_PIN(1, 3), /* MSIOF1_SCK */
+ [ 4] = RCAR_GP_PIN(1, 4), /* MSIOF1_TXD */
+ [ 5] = RCAR_GP_PIN(1, 5), /* MSIOF1_RXD */
+ [ 6] = RCAR_GP_PIN(1, 6), /* MSIOF0_SS2 */
+ [ 7] = RCAR_GP_PIN(1, 7), /* MSIOF0_SS1 */
+ [ 8] = RCAR_GP_PIN(1, 8), /* MSIOF0_SYNC */
+ [ 9] = RCAR_GP_PIN(1, 9), /* MSIOF0_TXD */
+ [10] = RCAR_GP_PIN(1, 10), /* MSIOF0_SCK */
+ [11] = RCAR_GP_PIN(1, 11), /* MSIOF0_RXD */
+ [12] = RCAR_GP_PIN(1, 12), /* HTX0 */
+ [13] = RCAR_GP_PIN(1, 13), /* HCTS0_N */
+ [14] = RCAR_GP_PIN(1, 14), /* HRTS0_N */
+ [15] = RCAR_GP_PIN(1, 15), /* HSCK0 */
+ [16] = RCAR_GP_PIN(1, 16), /* HRX0 */
+ [17] = RCAR_GP_PIN(1, 17), /* SCIF_CLK */
+ [18] = RCAR_GP_PIN(1, 18), /* SSI_SCK */
+ [19] = RCAR_GP_PIN(1, 19), /* SSI_WS */
+ [20] = RCAR_GP_PIN(1, 20), /* SSI_SD */
+ [21] = RCAR_GP_PIN(1, 21), /* AUDIO_CLKOUT */
+ [22] = RCAR_GP_PIN(1, 22), /* AUDIO_CLKIN */
+ [23] = RCAR_GP_PIN(1, 23), /* GP1_23 */
+ [24] = RCAR_GP_PIN(1, 24), /* HRX3 */
+ [25] = RCAR_GP_PIN(1, 25), /* HSCK3 */
+ [26] = RCAR_GP_PIN(1, 26), /* HRTS3_N */
+ [27] = RCAR_GP_PIN(1, 27), /* HCTS3_N */
+ [28] = RCAR_GP_PIN(1, 28), /* HTX3 */
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xE60580C0, "PUD2", 0xE60580E0) {
+ [ 0] = RCAR_GP_PIN(2, 0), /* FXR_TXDA */
+ [ 1] = RCAR_GP_PIN(2, 1), /* FXR_TXENA_N */
+ [ 2] = RCAR_GP_PIN(2, 2), /* RXDA_EXTFXR */
+ [ 3] = RCAR_GP_PIN(2, 3), /* CLK_EXTFXR */
+ [ 4] = RCAR_GP_PIN(2, 4), /* RXDB_EXTFXR */
+ [ 5] = RCAR_GP_PIN(2, 5), /* FXR_TXENB_N */
+ [ 6] = RCAR_GP_PIN(2, 6), /* FXR_TXDB */
+ [ 7] = RCAR_GP_PIN(2, 7), /* TPU0TO1 */
+ [ 8] = RCAR_GP_PIN(2, 8), /* TPU0TO0 */
+ [ 9] = RCAR_GP_PIN(2, 9), /* CAN_CLK */
+ [10] = RCAR_GP_PIN(2, 10), /* CANFD0_TX */
+ [11] = RCAR_GP_PIN(2, 11), /* CANFD0_RX */
+ [12] = RCAR_GP_PIN(2, 12), /* CANFD2_TX */
+ [13] = RCAR_GP_PIN(2, 13), /* CANFD2_RX */
+ [14] = RCAR_GP_PIN(2, 14), /* CANFD3_TX */
+ [15] = RCAR_GP_PIN(2, 15), /* CANFD3_RX */
+ [16] = RCAR_GP_PIN(2, 16), /* CANFD4_TX */
+ [17] = RCAR_GP_PIN(2, 17), /* CANFD4_RX */
+ [18] = RCAR_GP_PIN(2, 18), /* CANFD7_TX */
+ [19] = RCAR_GP_PIN(2, 19), /* CANFD7_RX */
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xE60588C0, "PUD3", 0xE60588E0) {
+ [ 0] = RCAR_GP_PIN(3, 0), /* MMC_SD_D1 */
+ [ 1] = RCAR_GP_PIN(3, 1), /* MMC_SD_D0 */
+ [ 2] = RCAR_GP_PIN(3, 2), /* MMC_SD_D2 */
+ [ 3] = RCAR_GP_PIN(3, 3), /* MMC_SD_CLK */
+ [ 4] = RCAR_GP_PIN(3, 4), /* MMC_DS */
+ [ 5] = RCAR_GP_PIN(3, 5), /* MMC_SD_D3 */
+ [ 6] = RCAR_GP_PIN(3, 6), /* MMC_D5 */
+ [ 7] = RCAR_GP_PIN(3, 7), /* MMC_D4 */
+ [ 8] = RCAR_GP_PIN(3, 8), /* MMC_D7 */
+ [ 9] = RCAR_GP_PIN(3, 9), /* MMC_D6 */
+ [10] = RCAR_GP_PIN(3, 10), /* MMC_SD_CMD */
+ [11] = RCAR_GP_PIN(3, 11), /* SD_CD */
+ [12] = RCAR_GP_PIN(3, 12), /* SD_WP */
+ [13] = RCAR_GP_PIN(3, 13), /* IPC_CLKIN */
+ [14] = RCAR_GP_PIN(3, 14), /* IPC_CLKOUT */
+ [15] = RCAR_GP_PIN(3, 15), /* QSPI0_SSL */
+ [16] = RCAR_GP_PIN(3, 16), /* QSPI0_IO3 */
+ [17] = RCAR_GP_PIN(3, 17), /* QSPI0_IO2 */
+ [18] = RCAR_GP_PIN(3, 18), /* QSPI0_MISO_IO1 */
+ [19] = RCAR_GP_PIN(3, 19), /* QSPI0_MOSI_IO0 */
+ [20] = RCAR_GP_PIN(3, 20), /* QSPI0_SPCLK */
+ [21] = RCAR_GP_PIN(3, 21), /* QSPI1_MOSI_IO0 */
+ [22] = RCAR_GP_PIN(3, 22), /* QSPI1_SPCLK */
+ [23] = RCAR_GP_PIN(3, 23), /* QSPI1_MISO_IO1 */
+ [24] = RCAR_GP_PIN(3, 24), /* QSPI1_IO2 */
+ [25] = RCAR_GP_PIN(3, 25), /* QSPI1_SSL */
+ [26] = RCAR_GP_PIN(3, 26), /* QSPI1_IO3 */
+ [27] = RCAR_GP_PIN(3, 27), /* RPC_RESET_N */
+ [28] = RCAR_GP_PIN(3, 28), /* RPC_WP_N */
+ [29] = RCAR_GP_PIN(3, 29), /* RPC_INT_N */
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xE60600C0, "PUD4", 0xE60600E0) {
+ [ 0] = RCAR_GP_PIN(4, 0), /* TSN0_MDIO */
+ [ 1] = RCAR_GP_PIN(4, 1), /* TSN0_MDC */
+ [ 2] = RCAR_GP_PIN(4, 2), /* TSN0_AVTP_PPS1 */
+ [ 3] = RCAR_GP_PIN(4, 3), /* TSN0_PHY_INT */
+ [ 4] = RCAR_GP_PIN(4, 4), /* TSN0_LINK */
+ [ 5] = RCAR_GP_PIN(4, 5), /* TSN0_AVTP_MATCH */
+ [ 6] = RCAR_GP_PIN(4, 6), /* TSN0_AVTP_CAPTURE */
+ [ 7] = RCAR_GP_PIN(4, 7), /* TSN0_RX_CTL */
+ [ 8] = RCAR_GP_PIN(4, 8), /* TSN0_AVTP_PPS0 */
+ [ 9] = RCAR_GP_PIN(4, 9), /* TSN0_TX_CTL */
+ [10] = RCAR_GP_PIN(4, 10), /* TSN0_RD0 */
+ [11] = RCAR_GP_PIN(4, 11), /* TSN0_RXC */
+ [12] = RCAR_GP_PIN(4, 12), /* TSN0_TXC */
+ [13] = RCAR_GP_PIN(4, 13), /* TSN0_RD1 */
+ [14] = RCAR_GP_PIN(4, 14), /* TSN0_TD1 */
+ [15] = RCAR_GP_PIN(4, 15), /* TSN0_TD0 */
+ [16] = RCAR_GP_PIN(4, 16), /* TSN0_RD3 */
+ [17] = RCAR_GP_PIN(4, 17), /* TSN0_RD2 */
+ [18] = RCAR_GP_PIN(4, 18), /* TSN0_TD3 */
+ [19] = RCAR_GP_PIN(4, 19), /* TSN0_TD2 */
+ [20] = RCAR_GP_PIN(4, 20), /* TSN0_TXCREFCLK */
+ [21] = RCAR_GP_PIN(4, 21), /* PCIE0_CLKREQ_N */
+ [22] = RCAR_GP_PIN(4, 22), /* PCIE1_CLKREQ_N */
+ [23] = RCAR_GP_PIN(4, 23), /* AVS0 */
+ [24] = RCAR_GP_PIN(4, 24), /* AVS1 */
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xE60608C0, "PUD5", 0xE60608E0) {
+ [ 0] = RCAR_GP_PIN(5, 0), /* AVB2_AVTP_PPS */
+ [ 1] = RCAR_GP_PIN(5, 1), /* AVB0_AVTP_CAPTURE */
+ [ 2] = RCAR_GP_PIN(5, 2), /* AVB2_AVTP_MATCH */
+ [ 3] = RCAR_GP_PIN(5, 3), /* AVB2_LINK */
+ [ 4] = RCAR_GP_PIN(5, 4), /* AVB2_PHY_INT */
+ [ 5] = RCAR_GP_PIN(5, 5), /* AVB2_MAGIC */
+ [ 6] = RCAR_GP_PIN(5, 6), /* AVB2_MDC */
+ [ 7] = RCAR_GP_PIN(5, 7), /* AVB2_TXCREFCLK */
+ [ 8] = RCAR_GP_PIN(5, 8), /* AVB2_TD3 */
+ [ 9] = RCAR_GP_PIN(5, 9), /* AVB2_RD3 */
+ [10] = RCAR_GP_PIN(5, 10), /* AVB2_MDIO */
+ [11] = RCAR_GP_PIN(5, 11), /* AVB2_TD2 */
+ [12] = RCAR_GP_PIN(5, 12), /* AVB2_TD1 */
+ [13] = RCAR_GP_PIN(5, 13), /* AVB2_RD2 */
+ [14] = RCAR_GP_PIN(5, 14), /* AVB2_RD1 */
+ [15] = RCAR_GP_PIN(5, 15), /* AVB2_TD0 */
+ [16] = RCAR_GP_PIN(5, 16), /* AVB2_TXC */
+ [17] = RCAR_GP_PIN(5, 17), /* AVB2_RD0 */
+ [18] = RCAR_GP_PIN(5, 18), /* AVB2_RXC */
+ [19] = RCAR_GP_PIN(5, 19), /* AVB2_TX_CTL */
+ [20] = RCAR_GP_PIN(5, 20), /* AVB2_RX_CTL */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xE60610C0, "PUD6", 0xE60610E0) {
+ [ 0] = RCAR_GP_PIN(6, 0), /* AVB1_MDIO */
+ [ 1] = RCAR_GP_PIN(6, 1), /* AVB1_MAGIC */
+ [ 2] = RCAR_GP_PIN(6, 2), /* AVB1_MDC */
+ [ 3] = RCAR_GP_PIN(6, 3), /* AVB1_PHY_INT */
+ [ 4] = RCAR_GP_PIN(6, 4), /* AVB1_LINK */
+ [ 5] = RCAR_GP_PIN(6, 5), /* AVB1_AVTP_MATCH */
+ [ 6] = RCAR_GP_PIN(6, 6), /* AVB1_TXC */
+ [ 7] = RCAR_GP_PIN(6, 7), /* AVB1_TX_CTL */
+ [ 8] = RCAR_GP_PIN(6, 8), /* AVB1_RXC */
+ [ 9] = RCAR_GP_PIN(6, 9), /* AVB1_RX_CTL */
+ [10] = RCAR_GP_PIN(6, 10), /* AVB1_AVTP_PPS */
+ [11] = RCAR_GP_PIN(6, 11), /* AVB1_AVTP_CAPTURE */
+ [12] = RCAR_GP_PIN(6, 12), /* AVB1_TD1 */
+ [13] = RCAR_GP_PIN(6, 13), /* AVB1_TD0 */
+ [14] = RCAR_GP_PIN(6, 14), /* AVB1_RD1*/
+ [15] = RCAR_GP_PIN(6, 15), /* AVB1_RD0 */
+ [16] = RCAR_GP_PIN(6, 16), /* AVB1_TD2 */
+ [17] = RCAR_GP_PIN(6, 17), /* AVB1_RD2 */
+ [18] = RCAR_GP_PIN(6, 18), /* AVB1_TD3 */
+ [19] = RCAR_GP_PIN(6, 19), /* AVB1_RD3 */
+ [20] = RCAR_GP_PIN(6, 20), /* AVB1_TXCREFCLK */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN7", 0xE60618C0, "PUD7", 0xE60618E0) {
+ [ 0] = RCAR_GP_PIN(7, 0), /* AVB0_AVTP_PPS */
+ [ 1] = RCAR_GP_PIN(7, 1), /* AVB0_AVTP_CAPTURE */
+ [ 2] = RCAR_GP_PIN(7, 2), /* AVB0_AVTP_MATCH */
+ [ 3] = RCAR_GP_PIN(7, 3), /* AVB0_TD3 */
+ [ 4] = RCAR_GP_PIN(7, 4), /* AVB0_LINK */
+ [ 5] = RCAR_GP_PIN(7, 5), /* AVB0_PHY_INT */
+ [ 6] = RCAR_GP_PIN(7, 6), /* AVB0_TD2 */
+ [ 7] = RCAR_GP_PIN(7, 7), /* AVB0_TD1 */
+ [ 8] = RCAR_GP_PIN(7, 8), /* AVB0_RD3 */
+ [ 9] = RCAR_GP_PIN(7, 9), /* AVB0_TXCREFCLK */
+ [10] = RCAR_GP_PIN(7, 10), /* AVB0_MAGIC */
+ [11] = RCAR_GP_PIN(7, 11), /* AVB0_TD0 */
+ [12] = RCAR_GP_PIN(7, 12), /* AVB0_RD2 */
+ [13] = RCAR_GP_PIN(7, 13), /* AVB0_MDC */
+ [14] = RCAR_GP_PIN(7, 14), /* AVB0_MDIO */
+ [15] = RCAR_GP_PIN(7, 15), /* AVB0_TXC */
+ [16] = RCAR_GP_PIN(7, 16), /* AVB0_TX_CTL */
+ [17] = RCAR_GP_PIN(7, 17), /* AVB0_RD1 */
+ [18] = RCAR_GP_PIN(7, 18), /* AVB0_RD0 */
+ [19] = RCAR_GP_PIN(7, 19), /* AVB0_RXC */
+ [20] = RCAR_GP_PIN(7, 20), /* AVB0_RX_CTL */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN8", 0xE60680C0, "PUD8", 0xE60680E0) {
+ [ 0] = RCAR_GP_PIN(8, 0), /* SCL0 */
+ [ 1] = RCAR_GP_PIN(8, 1), /* SDA0 */
+ [ 2] = RCAR_GP_PIN(8, 2), /* SCL1 */
+ [ 3] = RCAR_GP_PIN(8, 3), /* SDA1 */
+ [ 4] = RCAR_GP_PIN(8, 4), /* SCL2 */
+ [ 5] = RCAR_GP_PIN(8, 5), /* SDA2 */
+ [ 6] = RCAR_GP_PIN(8, 6), /* SCL3 */
+ [ 7] = RCAR_GP_PIN(8, 7), /* SDA3 */
+ [ 8] = RCAR_GP_PIN(8, 8), /* SCL4 */
+ [ 9] = RCAR_GP_PIN(8, 9), /* SDA4 */
+ [10] = RCAR_GP_PIN(8, 10), /* SCL5 */
+ [11] = RCAR_GP_PIN(8, 11), /* SDA5 */
+ [12] = RCAR_GP_PIN(8, 12), /* GP8_12 */
+ [13] = RCAR_GP_PIN(8, 13), /* GP8_13 */
+ [14] = SH_PFC_PIN_NONE,
+ [15] = SH_PFC_PIN_NONE,
+ [16] = SH_PFC_PIN_NONE,
+ [17] = SH_PFC_PIN_NONE,
+ [18] = SH_PFC_PIN_NONE,
+ [19] = SH_PFC_PIN_NONE,
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { /* sentinel */ },
+};
+
+static const struct sh_pfc_soc_operations r8a779g0_pin_ops = {
+ .pin_to_pocctrl = r8a779g0_pin_to_pocctrl,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
+};
+
+const struct sh_pfc_soc_info r8a779g0_pinmux_info = {
+ .name = "r8a779g0_pfc",
+ .ops = &r8a779g0_pin_ops,
+ .unlock_reg = 0x1ff, /* PMMRn mask */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index c47eed9d948f..a43824fd9505 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -527,6 +527,8 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
if (!(cfg & PIN_CFG_IEN))
return -EINVAL;
arg = rzg2l_read_pin_config(pctrl, IEN(port_offset), bit, IEN_MASK);
+ if (!arg)
+ return -EINVAL;
break;
case PIN_CONFIG_POWER_SOURCE: {
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
new file mode 100644
index 000000000000..e8c18198bebd
--- /dev/null
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -0,0 +1,1119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2M Pin Control and GPIO driver core
+ *
+ * Based on:
+ * Renesas RZ/G2L Pin Control and GPIO driver core
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/pinctrl/rzv2m-pinctrl.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
+
+#define DRV_NAME "pinctrl-rzv2m"
+
+/*
+ * Use 16 lower bits [15:0] for pin identifier
+ * Use 16 higher bits [31:16] for pin mux function
+ */
+#define MUX_PIN_ID_MASK GENMASK(15, 0)
+#define MUX_FUNC_MASK GENMASK(31, 16)
+#define MUX_FUNC(pinconf) FIELD_GET(MUX_FUNC_MASK, (pinconf))
+
+/* PIN capabilities */
+#define PIN_CFG_GRP_1_8V_2 1
+#define PIN_CFG_GRP_1_8V_3 2
+#define PIN_CFG_GRP_SWIO_1 3
+#define PIN_CFG_GRP_SWIO_2 4
+#define PIN_CFG_GRP_3_3V 5
+#define PIN_CFG_GRP_MASK GENMASK(2, 0)
+#define PIN_CFG_BIAS BIT(3)
+#define PIN_CFG_DRV BIT(4)
+#define PIN_CFG_SLEW BIT(5)
+
+#define RZV2M_MPXED_PIN_FUNCS (PIN_CFG_BIAS | \
+ PIN_CFG_DRV | \
+ PIN_CFG_SLEW)
+
+/*
+ * n indicates number of pins in the port, a is the register index
+ * and f is pin configuration capabilities supported.
+ */
+#define RZV2M_GPIO_PORT_PACK(n, a, f) (((n) << 24) | ((a) << 16) | (f))
+#define RZV2M_GPIO_PORT_GET_PINCNT(x) FIELD_GET(GENMASK(31, 24), (x))
+#define RZV2M_GPIO_PORT_GET_INDEX(x) FIELD_GET(GENMASK(23, 16), (x))
+#define RZV2M_GPIO_PORT_GET_CFGS(x) FIELD_GET(GENMASK(15, 0), (x))
+
+#define RZV2M_DEDICATED_PORT_IDX 22
+
+/*
+ * BIT(31) indicates dedicated pin, b is the register bits (b * 16)
+ * and f is the pin configuration capabilities supported.
+ */
+#define RZV2M_SINGLE_PIN BIT(31)
+#define RZV2M_SINGLE_PIN_PACK(b, f) (RZV2M_SINGLE_PIN | \
+ ((RZV2M_DEDICATED_PORT_IDX) << 24) | \
+ ((b) << 16) | (f))
+#define RZV2M_SINGLE_PIN_GET_PORT(x) FIELD_GET(GENMASK(30, 24), (x))
+#define RZV2M_SINGLE_PIN_GET_BIT(x) FIELD_GET(GENMASK(23, 16), (x))
+#define RZV2M_SINGLE_PIN_GET_CFGS(x) FIELD_GET(GENMASK(15, 0), (x))
+
+#define RZV2M_PIN_ID_TO_PORT(id) ((id) / RZV2M_PINS_PER_PORT)
+#define RZV2M_PIN_ID_TO_PIN(id) ((id) % RZV2M_PINS_PER_PORT)
+
+#define DO(n) (0x00 + (n) * 0x40)
+#define OE(n) (0x04 + (n) * 0x40)
+#define IE(n) (0x08 + (n) * 0x40)
+#define PFSEL(n) (0x10 + (n) * 0x40)
+#define DI(n) (0x20 + (n) * 0x40)
+#define PUPD(n) (0x24 + (n) * 0x40)
+#define DRV(n) ((n) < RZV2M_DEDICATED_PORT_IDX ? (0x28 + (n) * 0x40) \
+ : 0x590)
+#define SR(n) ((n) < RZV2M_DEDICATED_PORT_IDX ? (0x2c + (n) * 0x40) \
+ : 0x594)
+#define DI_MSK(n) (0x30 + (n) * 0x40)
+#define EN_MSK(n) (0x34 + (n) * 0x40)
+
+#define PFC_MASK 0x07
+#define PUPD_MASK 0x03
+#define DRV_MASK 0x03
+
+struct rzv2m_dedicated_configs {
+ const char *name;
+ u32 config;
+};
+
+struct rzv2m_pinctrl_data {
+ const char * const *port_pins;
+ const u32 *port_pin_configs;
+ const struct rzv2m_dedicated_configs *dedicated_pins;
+ unsigned int n_port_pins;
+ unsigned int n_dedicated_pins;
+};
+
+struct rzv2m_pinctrl {
+ struct pinctrl_dev *pctl;
+ struct pinctrl_desc desc;
+ struct pinctrl_pin_desc *pins;
+
+ const struct rzv2m_pinctrl_data *data;
+ void __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+
+ struct gpio_chip gpio_chip;
+ struct pinctrl_gpio_range gpio_range;
+
+ spinlock_t lock;
+};
+
+static const unsigned int drv_1_8V_group2_uA[] = { 1800, 3800, 7800, 11000 };
+static const unsigned int drv_1_8V_group3_uA[] = { 1600, 3200, 6400, 9600 };
+static const unsigned int drv_SWIO_group2_3_3V_uA[] = { 9000, 11000, 13000, 18000 };
+static const unsigned int drv_3_3V_group_uA[] = { 2000, 4000, 8000, 12000 };
+
+/* Helper for registers that have a write enable bit in the upper word */
+static void rzv2m_writel_we(void __iomem *addr, u8 shift, u8 value)
+{
+ writel((BIT(16) | value) << shift, addr);
+}
+
+static void rzv2m_pinctrl_set_pfc_mode(struct rzv2m_pinctrl *pctrl,
+ u8 port, u8 pin, u8 func)
+{
+ void __iomem *addr;
+
+ /* Mask input/output */
+ rzv2m_writel_we(pctrl->base + DI_MSK(port), pin, 1);
+ rzv2m_writel_we(pctrl->base + EN_MSK(port), pin, 1);
+
+ /* Select the function and set the write enable bits */
+ addr = pctrl->base + PFSEL(port) + (pin / 4) * 4;
+ writel(((PFC_MASK << 16) | func) << ((pin % 4) * 4), addr);
+
+ /* Unmask input/output */
+ rzv2m_writel_we(pctrl->base + EN_MSK(port), pin, 0);
+ rzv2m_writel_we(pctrl->base + DI_MSK(port), pin, 0);
+};
+
+static int rzv2m_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct function_desc *func;
+ unsigned int i, *psel_val;
+ struct group_desc *group;
+ int *pins;
+
+ func = pinmux_generic_get_function(pctldev, func_selector);
+ if (!func)
+ return -EINVAL;
+ group = pinctrl_generic_get_group(pctldev, group_selector);
+ if (!group)
+ return -EINVAL;
+
+ psel_val = func->data;
+ pins = group->pins;
+
+ for (i = 0; i < group->num_pins; i++) {
+ dev_dbg(pctrl->dev, "port:%u pin: %u PSEL:%u\n",
+ RZV2M_PIN_ID_TO_PORT(pins[i]), RZV2M_PIN_ID_TO_PIN(pins[i]),
+ psel_val[i]);
+ rzv2m_pinctrl_set_pfc_mode(pctrl, RZV2M_PIN_ID_TO_PORT(pins[i]),
+ RZV2M_PIN_ID_TO_PIN(pins[i]), psel_val[i]);
+ }
+
+ return 0;
+};
+
+static int rzv2m_map_add_config(struct pinctrl_map *map,
+ const char *group_or_pin,
+ enum pinctrl_map_type type,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ unsigned long *cfgs;
+
+ cfgs = kmemdup(configs, num_configs * sizeof(*cfgs),
+ GFP_KERNEL);
+ if (!cfgs)
+ return -ENOMEM;
+
+ map->type = type;
+ map->data.configs.group_or_pin = group_or_pin;
+ map->data.configs.configs = cfgs;
+ map->data.configs.num_configs = num_configs;
+
+ return 0;
+}
+
+static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps,
+ unsigned int *index)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct pinctrl_map *maps = *map;
+ unsigned int nmaps = *num_maps;
+ unsigned long *configs = NULL;
+ unsigned int *pins, *psel_val;
+ unsigned int num_pinmux = 0;
+ unsigned int idx = *index;
+ unsigned int num_pins, i;
+ unsigned int num_configs;
+ struct property *pinmux;
+ struct property *prop;
+ int ret, gsel, fsel;
+ const char **pin_fn;
+ const char *pin;
+
+ pinmux = of_find_property(np, "pinmux", NULL);
+ if (pinmux)
+ num_pinmux = pinmux->length / sizeof(u32);
+
+ ret = of_property_count_strings(np, "pins");
+ if (ret == -EINVAL) {
+ num_pins = 0;
+ } else if (ret < 0) {
+ dev_err(pctrl->dev, "Invalid pins list in DT\n");
+ return ret;
+ } else {
+ num_pins = ret;
+ }
+
+ if (!num_pinmux && !num_pins)
+ return 0;
+
+ if (num_pinmux && num_pins) {
+ dev_err(pctrl->dev,
+ "DT node must contain either a pinmux or pins and not both\n");
+ return -EINVAL;
+ }
+
+ ret = pinconf_generic_parse_dt_config(np, NULL, &configs, &num_configs);
+ if (ret < 0)
+ return ret;
+
+ if (num_pins && !num_configs) {
+ dev_err(pctrl->dev, "DT node must contain a config\n");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (num_pinmux)
+ nmaps += 1;
+
+ if (num_pins)
+ nmaps += num_pins;
+
+ maps = krealloc_array(maps, nmaps, sizeof(*maps), GFP_KERNEL);
+ if (!maps) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ *map = maps;
+ *num_maps = nmaps;
+ if (num_pins) {
+ of_property_for_each_string(np, "pins", prop, pin) {
+ ret = rzv2m_map_add_config(&maps[idx], pin,
+ PIN_MAP_TYPE_CONFIGS_PIN,
+ configs, num_configs);
+ if (ret < 0)
+ goto done;
+
+ idx++;
+ }
+ ret = 0;
+ goto done;
+ }
+
+ pins = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*pins), GFP_KERNEL);
+ psel_val = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*psel_val),
+ GFP_KERNEL);
+ pin_fn = devm_kzalloc(pctrl->dev, sizeof(*pin_fn), GFP_KERNEL);
+ if (!pins || !psel_val || !pin_fn) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /* Collect pin locations and mux settings from DT properties */
+ for (i = 0; i < num_pinmux; ++i) {
+ u32 value;
+
+ ret = of_property_read_u32_index(np, "pinmux", i, &value);
+ if (ret)
+ goto done;
+ pins[i] = value & MUX_PIN_ID_MASK;
+ psel_val[i] = MUX_FUNC(value);
+ }
+
+ /* Register a single pin group listing all the pins we read from DT */
+ gsel = pinctrl_generic_add_group(pctldev, np->name, pins, num_pinmux, NULL);
+ if (gsel < 0) {
+ ret = gsel;
+ goto done;
+ }
+
+ /*
+ * Register a single group function where the 'data' is an array PSEL
+ * register values read from DT.
+ */
+ pin_fn[0] = np->name;
+ fsel = pinmux_generic_add_function(pctldev, np->name, pin_fn, 1,
+ psel_val);
+ if (fsel < 0) {
+ ret = fsel;
+ goto remove_group;
+ }
+
+ maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
+ maps[idx].data.mux.group = np->name;
+ maps[idx].data.mux.function = np->name;
+ idx++;
+
+ dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
+ ret = 0;
+ goto done;
+
+remove_group:
+ pinctrl_generic_remove_group(pctldev, gsel);
+done:
+ *index = idx;
+ kfree(configs);
+ return ret;
+}
+
+static void rzv2m_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map,
+ unsigned int num_maps)
+{
+ unsigned int i;
+
+ if (!map)
+ return;
+
+ for (i = 0; i < num_maps; ++i) {
+ if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP ||
+ map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+ kfree(map[i].data.configs.configs);
+ }
+ kfree(map);
+}
+
+static int rzv2m_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct device_node *child;
+ unsigned int index;
+ int ret;
+
+ *map = NULL;
+ *num_maps = 0;
+ index = 0;
+
+ for_each_child_of_node(np, child) {
+ ret = rzv2m_dt_subnode_to_map(pctldev, child, map,
+ num_maps, &index);
+ if (ret < 0) {
+ of_node_put(child);
+ goto done;
+ }
+ }
+
+ if (*num_maps == 0) {
+ ret = rzv2m_dt_subnode_to_map(pctldev, np, map,
+ num_maps, &index);
+ if (ret < 0)
+ goto done;
+ }
+
+ if (*num_maps)
+ return 0;
+
+ dev_err(pctrl->dev, "no mapping found in node %pOF\n", np);
+ ret = -EINVAL;
+
+done:
+ if (ret < 0)
+ rzv2m_dt_free_map(pctldev, *map, *num_maps);
+
+ return ret;
+}
+
+static int rzv2m_validate_gpio_pin(struct rzv2m_pinctrl *pctrl,
+ u32 cfg, u32 port, u8 bit)
+{
+ u8 pincount = RZV2M_GPIO_PORT_GET_PINCNT(cfg);
+ u32 port_index = RZV2M_GPIO_PORT_GET_INDEX(cfg);
+ u32 data;
+
+ if (bit >= pincount || port >= pctrl->data->n_port_pins)
+ return -EINVAL;
+
+ data = pctrl->data->port_pin_configs[port];
+ if (port_index != RZV2M_GPIO_PORT_GET_INDEX(data))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void rzv2m_rmw_pin_config(struct rzv2m_pinctrl *pctrl, u32 offset,
+ u8 shift, u32 mask, u32 val)
+{
+ void __iomem *addr = pctrl->base + offset;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ reg = readl(addr) & ~(mask << shift);
+ writel(reg | (val << shift), addr);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int rzv2m_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int _pin,
+ unsigned long *config)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
+ unsigned int *pin_data = pin->drv_data;
+ unsigned int arg = 0;
+ u32 port;
+ u32 cfg;
+ u8 bit;
+ u32 val;
+
+ if (!pin_data)
+ return -EINVAL;
+
+ if (*pin_data & RZV2M_SINGLE_PIN) {
+ port = RZV2M_SINGLE_PIN_GET_PORT(*pin_data);
+ cfg = RZV2M_SINGLE_PIN_GET_CFGS(*pin_data);
+ bit = RZV2M_SINGLE_PIN_GET_BIT(*pin_data);
+ } else {
+ cfg = RZV2M_GPIO_PORT_GET_CFGS(*pin_data);
+ port = RZV2M_PIN_ID_TO_PORT(_pin);
+ bit = RZV2M_PIN_ID_TO_PIN(_pin);
+
+ if (rzv2m_validate_gpio_pin(pctrl, *pin_data, RZV2M_PIN_ID_TO_PORT(_pin), bit))
+ return -EINVAL;
+ }
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN: {
+ enum pin_config_param bias;
+
+ if (!(cfg & PIN_CFG_BIAS))
+ return -EINVAL;
+
+ /* PUPD uses 2-bits per pin */
+ bit *= 2;
+
+ switch ((readl(pctrl->base + PUPD(port)) >> bit) & PUPD_MASK) {
+ case 0:
+ bias = PIN_CONFIG_BIAS_PULL_DOWN;
+ break;
+ case 2:
+ bias = PIN_CONFIG_BIAS_PULL_UP;
+ break;
+ default:
+ bias = PIN_CONFIG_BIAS_DISABLE;
+ }
+
+ if (bias != param)
+ return -EINVAL;
+ break;
+ }
+
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ if (!(cfg & PIN_CFG_DRV))
+ return -EINVAL;
+
+ /* DRV uses 2-bits per pin */
+ bit *= 2;
+
+ val = (readl(pctrl->base + DRV(port)) >> bit) & DRV_MASK;
+
+ switch (cfg & PIN_CFG_GRP_MASK) {
+ case PIN_CFG_GRP_1_8V_2:
+ arg = drv_1_8V_group2_uA[val];
+ break;
+ case PIN_CFG_GRP_1_8V_3:
+ arg = drv_1_8V_group3_uA[val];
+ break;
+ case PIN_CFG_GRP_SWIO_2:
+ arg = drv_SWIO_group2_3_3V_uA[val];
+ break;
+ case PIN_CFG_GRP_SWIO_1:
+ case PIN_CFG_GRP_3_3V:
+ arg = drv_3_3V_group_uA[val];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ if (!(cfg & PIN_CFG_SLEW))
+ return -EINVAL;
+
+ arg = readl(pctrl->base + SR(port)) & BIT(bit);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+};
+
+static int rzv2m_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int _pin,
+ unsigned long *_configs,
+ unsigned int num_configs)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
+ unsigned int *pin_data = pin->drv_data;
+ enum pin_config_param param;
+ u32 port;
+ unsigned int i;
+ u32 cfg;
+ u8 bit;
+ u32 val;
+
+ if (!pin_data)
+ return -EINVAL;
+
+ if (*pin_data & RZV2M_SINGLE_PIN) {
+ port = RZV2M_SINGLE_PIN_GET_PORT(*pin_data);
+ cfg = RZV2M_SINGLE_PIN_GET_CFGS(*pin_data);
+ bit = RZV2M_SINGLE_PIN_GET_BIT(*pin_data);
+ } else {
+ cfg = RZV2M_GPIO_PORT_GET_CFGS(*pin_data);
+ port = RZV2M_PIN_ID_TO_PORT(_pin);
+ bit = RZV2M_PIN_ID_TO_PIN(_pin);
+
+ if (rzv2m_validate_gpio_pin(pctrl, *pin_data, RZV2M_PIN_ID_TO_PORT(_pin), bit))
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(_configs[i]);
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (!(cfg & PIN_CFG_BIAS))
+ return -EINVAL;
+
+ /* PUPD uses 2-bits per pin */
+ bit *= 2;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ val = 0;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ val = 2;
+ break;
+ default:
+ val = 1;
+ }
+
+ rzv2m_rmw_pin_config(pctrl, PUPD(port), bit, PUPD_MASK, val);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH_UA: {
+ unsigned int arg = pinconf_to_config_argument(_configs[i]);
+ const unsigned int *drv_strengths;
+ unsigned int index;
+
+ if (!(cfg & PIN_CFG_DRV))
+ return -EINVAL;
+
+ switch (cfg & PIN_CFG_GRP_MASK) {
+ case PIN_CFG_GRP_1_8V_2:
+ drv_strengths = drv_1_8V_group2_uA;
+ break;
+ case PIN_CFG_GRP_1_8V_3:
+ drv_strengths = drv_1_8V_group3_uA;
+ break;
+ case PIN_CFG_GRP_SWIO_2:
+ drv_strengths = drv_SWIO_group2_3_3V_uA;
+ break;
+ case PIN_CFG_GRP_SWIO_1:
+ case PIN_CFG_GRP_3_3V:
+ drv_strengths = drv_3_3V_group_uA;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (index = 0; index < 4; index++) {
+ if (arg == drv_strengths[index])
+ break;
+ }
+ if (index >= 4)
+ return -EINVAL;
+
+ /* DRV uses 2-bits per pin */
+ bit *= 2;
+
+ rzv2m_rmw_pin_config(pctrl, DRV(port), bit, DRV_MASK, index);
+ break;
+ }
+
+ case PIN_CONFIG_SLEW_RATE: {
+ unsigned int arg = pinconf_to_config_argument(_configs[i]);
+
+ if (!(cfg & PIN_CFG_SLEW))
+ return -EINVAL;
+
+ rzv2m_writel_we(pctrl->base + SR(port), bit, !arg);
+ break;
+ }
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int rzv2m_pinctrl_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ const unsigned int *pins;
+ unsigned int i, npins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = rzv2m_pinctrl_pinconf_set(pctldev, pins[i], configs,
+ num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+};
+
+static int rzv2m_pinctrl_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *config)
+{
+ const unsigned int *pins;
+ unsigned int i, npins, prev_config = 0;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = rzv2m_pinctrl_pinconf_get(pctldev, pins[i], config);
+ if (ret)
+ return ret;
+
+ /* Check config matches previous pins */
+ if (i && prev_config != *config)
+ return -EOPNOTSUPP;
+
+ prev_config = *config;
+ }
+
+ return 0;
+};
+
+static const struct pinctrl_ops rzv2m_pinctrl_pctlops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = rzv2m_dt_node_to_map,
+ .dt_free_map = rzv2m_dt_free_map,
+};
+
+static const struct pinmux_ops rzv2m_pinctrl_pmxops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = rzv2m_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct pinconf_ops rzv2m_pinctrl_confops = {
+ .is_generic = true,
+ .pin_config_get = rzv2m_pinctrl_pinconf_get,
+ .pin_config_set = rzv2m_pinctrl_pinconf_set,
+ .pin_config_group_set = rzv2m_pinctrl_pinconf_group_set,
+ .pin_config_group_get = rzv2m_pinctrl_pinconf_group_get,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static int rzv2m_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+ int ret;
+
+ ret = pinctrl_gpio_request(chip->base + offset);
+ if (ret)
+ return ret;
+
+ rzv2m_pinctrl_set_pfc_mode(pctrl, port, bit, 0);
+
+ return 0;
+}
+
+static void rzv2m_gpio_set_direction(struct rzv2m_pinctrl *pctrl, u32 port,
+ u8 bit, bool output)
+{
+ rzv2m_writel_we(pctrl->base + OE(port), bit, output);
+ rzv2m_writel_we(pctrl->base + IE(port), bit, !output);
+}
+
+static int rzv2m_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ if (!(readl(pctrl->base + IE(port)) & BIT(bit)))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int rzv2m_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ rzv2m_gpio_set_direction(pctrl, port, bit, false);
+
+ return 0;
+}
+
+static void rzv2m_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ rzv2m_writel_we(pctrl->base + DO(port), bit, !!value);
+}
+
+static int rzv2m_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ rzv2m_gpio_set(chip, offset, value);
+ rzv2m_gpio_set_direction(pctrl, port, bit, true);
+
+ return 0;
+}
+
+static int rzv2m_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+ int direction = rzv2m_gpio_get_direction(chip, offset);
+
+ if (direction == GPIO_LINE_DIRECTION_IN)
+ return !!(readl(pctrl->base + DI(port)) & BIT(bit));
+ else
+ return !!(readl(pctrl->base + DO(port)) & BIT(bit));
+}
+
+static void rzv2m_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ pinctrl_gpio_free(chip->base + offset);
+
+ /*
+ * Set the GPIO as an input to ensure that the next GPIO request won't
+ * drive the GPIO pin as an output.
+ */
+ rzv2m_gpio_direction_input(chip, offset);
+}
+
+static const char * const rzv2m_gpio_names[] = {
+ "P0_0", "P0_1", "P0_2", "P0_3", "P0_4", "P0_5", "P0_6", "P0_7",
+ "P0_8", "P0_9", "P0_10", "P0_11", "P0_12", "P0_13", "P0_14", "P0_15",
+ "P1_0", "P1_1", "P1_2", "P1_3", "P1_4", "P1_5", "P1_6", "P1_7",
+ "P1_8", "P1_9", "P1_10", "P1_11", "P1_12", "P1_13", "P1_14", "P1_15",
+ "P2_0", "P2_1", "P2_2", "P2_3", "P2_4", "P2_5", "P2_6", "P2_7",
+ "P2_8", "P2_9", "P2_10", "P2_11", "P2_12", "P2_13", "P2_14", "P2_15",
+ "P3_0", "P3_1", "P3_2", "P3_3", "P3_4", "P3_5", "P3_6", "P3_7",
+ "P3_8", "P3_9", "P3_10", "P3_11", "P3_12", "P3_13", "P3_14", "P3_15",
+ "P4_0", "P4_1", "P4_2", "P4_3", "P4_4", "P4_5", "P4_6", "P4_7",
+ "P4_8", "P4_9", "P4_10", "P4_11", "P4_12", "P4_13", "P4_14", "P4_15",
+ "P5_0", "P5_1", "P5_2", "P5_3", "P5_4", "P5_5", "P5_6", "P5_7",
+ "P5_8", "P5_9", "P5_10", "P5_11", "P5_12", "P5_13", "P5_14", "P5_15",
+ "P6_0", "P6_1", "P6_2", "P6_3", "P6_4", "P6_5", "P6_6", "P6_7",
+ "P6_8", "P6_9", "P6_10", "P6_11", "P6_12", "P6_13", "P6_14", "P6_15",
+ "P7_0", "P7_1", "P7_2", "P7_3", "P7_4", "P7_5", "P7_6", "P7_7",
+ "P7_8", "P7_9", "P7_10", "P7_11", "P7_12", "P7_13", "P7_14", "P7_15",
+ "P8_0", "P8_1", "P8_2", "P8_3", "P8_4", "P8_5", "P8_6", "P8_7",
+ "P8_8", "P8_9", "P8_10", "P8_11", "P8_12", "P8_13", "P8_14", "P8_15",
+ "P9_0", "P9_1", "P9_2", "P9_3", "P9_4", "P9_5", "P9_6", "P9_7",
+ "P9_8", "P9_9", "P9_10", "P9_11", "P9_12", "P9_13", "P9_14", "P9_15",
+ "P10_0", "P10_1", "P10_2", "P10_3", "P10_4", "P10_5", "P10_6", "P10_7",
+ "P10_8", "P10_9", "P10_10", "P10_11", "P10_12", "P10_13", "P10_14", "P10_15",
+ "P11_0", "P11_1", "P11_2", "P11_3", "P11_4", "P11_5", "P11_6", "P11_7",
+ "P11_8", "P11_9", "P11_10", "P11_11", "P11_12", "P11_13", "P11_14", "P11_15",
+ "P12_0", "P12_1", "P12_2", "P12_3", "P12_4", "P12_5", "P12_6", "P12_7",
+ "P12_8", "P12_9", "P12_10", "P12_11", "P12_12", "P12_13", "P12_14", "P12_15",
+ "P13_0", "P13_1", "P13_2", "P13_3", "P13_4", "P13_5", "P13_6", "P13_7",
+ "P13_8", "P13_9", "P13_10", "P13_11", "P13_12", "P13_13", "P13_14", "P13_15",
+ "P14_0", "P14_1", "P14_2", "P14_3", "P14_4", "P14_5", "P14_6", "P14_7",
+ "P14_8", "P14_9", "P14_10", "P14_11", "P14_12", "P14_13", "P14_14", "P14_15",
+ "P15_0", "P15_1", "P15_2", "P15_3", "P15_4", "P15_5", "P15_6", "P15_7",
+ "P15_8", "P15_9", "P15_10", "P15_11", "P15_12", "P15_13", "P15_14", "P15_15",
+ "P16_0", "P16_1", "P16_2", "P16_3", "P16_4", "P16_5", "P16_6", "P16_7",
+ "P16_8", "P16_9", "P16_10", "P16_11", "P16_12", "P16_13", "P16_14", "P16_15",
+ "P17_0", "P17_1", "P17_2", "P17_3", "P17_4", "P17_5", "P17_6", "P17_7",
+ "P17_8", "P17_9", "P17_10", "P17_11", "P17_12", "P17_13", "P17_14", "P17_15",
+ "P18_0", "P18_1", "P18_2", "P18_3", "P18_4", "P18_5", "P18_6", "P18_7",
+ "P18_8", "P18_9", "P18_10", "P18_11", "P18_12", "P18_13", "P18_14", "P18_15",
+ "P19_0", "P19_1", "P19_2", "P19_3", "P19_4", "P19_5", "P19_6", "P19_7",
+ "P19_8", "P19_9", "P19_10", "P19_11", "P19_12", "P19_13", "P19_14", "P19_15",
+ "P20_0", "P20_1", "P20_2", "P20_3", "P20_4", "P20_5", "P20_6", "P20_7",
+ "P20_8", "P20_9", "P20_10", "P20_11", "P20_12", "P20_13", "P20_14", "P20_15",
+ "P21_0", "P21_1", "P21_2", "P21_3", "P21_4", "P21_5", "P21_6", "P21_7",
+ "P21_8", "P21_9", "P21_10", "P21_11", "P21_12", "P21_13", "P21_14", "P21_15",
+};
+
+static const u32 rzv2m_gpio_configs[] = {
+ RZV2M_GPIO_PORT_PACK(14, 0, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(16, 1, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 2, PIN_CFG_GRP_1_8V_3 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(16, 3, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 4, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(4, 5, PIN_CFG_GRP_1_8V_3 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(12, 6, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(6, 7, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 8, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 9, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(9, 10, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(9, 11, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(4, 12, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(12, 13, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 14, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(16, 15, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(14, 16, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(1, 17, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(0, 18, 0),
+ RZV2M_GPIO_PORT_PACK(0, 19, 0),
+ RZV2M_GPIO_PORT_PACK(3, 20, PIN_CFG_GRP_1_8V_2 | PIN_CFG_DRV),
+ RZV2M_GPIO_PORT_PACK(1, 21, PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW),
+};
+
+static const struct rzv2m_dedicated_configs rzv2m_dedicated_pins[] = {
+ { "NAWPN", RZV2M_SINGLE_PIN_PACK(0,
+ (PIN_CFG_GRP_SWIO_2 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "IM0CLK", RZV2M_SINGLE_PIN_PACK(1,
+ (PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "IM1CLK", RZV2M_SINGLE_PIN_PACK(2,
+ (PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "DETDO", RZV2M_SINGLE_PIN_PACK(5,
+ (PIN_CFG_GRP_1_8V_3 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "DETMS", RZV2M_SINGLE_PIN_PACK(6,
+ (PIN_CFG_GRP_1_8V_3 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "PCRSTOUTB", RZV2M_SINGLE_PIN_PACK(12,
+ (PIN_CFG_GRP_3_3V | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "USPWEN", RZV2M_SINGLE_PIN_PACK(14,
+ (PIN_CFG_GRP_3_3V | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+};
+
+static int rzv2m_gpio_register(struct rzv2m_pinctrl *pctrl)
+{
+ struct device_node *np = pctrl->dev->of_node;
+ struct gpio_chip *chip = &pctrl->gpio_chip;
+ const char *name = dev_name(pctrl->dev);
+ struct of_phandle_args of_args;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &of_args);
+ if (ret) {
+ dev_err(pctrl->dev, "Unable to parse gpio-ranges\n");
+ return ret;
+ }
+
+ if (of_args.args[0] != 0 || of_args.args[1] != 0 ||
+ of_args.args[2] != pctrl->data->n_port_pins) {
+ dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n");
+ return -EINVAL;
+ }
+
+ chip->names = pctrl->data->port_pins;
+ chip->request = rzv2m_gpio_request;
+ chip->free = rzv2m_gpio_free;
+ chip->get_direction = rzv2m_gpio_get_direction;
+ chip->direction_input = rzv2m_gpio_direction_input;
+ chip->direction_output = rzv2m_gpio_direction_output;
+ chip->get = rzv2m_gpio_get;
+ chip->set = rzv2m_gpio_set;
+ chip->label = name;
+ chip->parent = pctrl->dev;
+ chip->owner = THIS_MODULE;
+ chip->base = -1;
+ chip->ngpio = of_args.args[2];
+
+ pctrl->gpio_range.id = 0;
+ pctrl->gpio_range.pin_base = 0;
+ pctrl->gpio_range.base = 0;
+ pctrl->gpio_range.npins = chip->ngpio;
+ pctrl->gpio_range.name = chip->label;
+ pctrl->gpio_range.gc = chip;
+ ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO controller\n");
+ return ret;
+ }
+
+ dev_dbg(pctrl->dev, "Registered gpio controller\n");
+
+ return 0;
+}
+
+static int rzv2m_pinctrl_register(struct rzv2m_pinctrl *pctrl)
+{
+ struct pinctrl_pin_desc *pins;
+ unsigned int i, j;
+ u32 *pin_data;
+ int ret;
+
+ pctrl->desc.name = DRV_NAME;
+ pctrl->desc.npins = pctrl->data->n_port_pins + pctrl->data->n_dedicated_pins;
+ pctrl->desc.pctlops = &rzv2m_pinctrl_pctlops;
+ pctrl->desc.pmxops = &rzv2m_pinctrl_pmxops;
+ pctrl->desc.confops = &rzv2m_pinctrl_confops;
+ pctrl->desc.owner = THIS_MODULE;
+
+ pins = devm_kcalloc(pctrl->dev, pctrl->desc.npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ pin_data = devm_kcalloc(pctrl->dev, pctrl->desc.npins,
+ sizeof(*pin_data), GFP_KERNEL);
+ if (!pin_data)
+ return -ENOMEM;
+
+ pctrl->pins = pins;
+ pctrl->desc.pins = pins;
+
+ for (i = 0, j = 0; i < pctrl->data->n_port_pins; i++) {
+ pins[i].number = i;
+ pins[i].name = pctrl->data->port_pins[i];
+ if (i && !(i % RZV2M_PINS_PER_PORT))
+ j++;
+ pin_data[i] = pctrl->data->port_pin_configs[j];
+ pins[i].drv_data = &pin_data[i];
+ }
+
+ for (i = 0; i < pctrl->data->n_dedicated_pins; i++) {
+ unsigned int index = pctrl->data->n_port_pins + i;
+
+ pins[index].number = index;
+ pins[index].name = pctrl->data->dedicated_pins[i].name;
+ pin_data[index] = pctrl->data->dedicated_pins[i].config;
+ pins[index].drv_data = &pin_data[index];
+ }
+
+ ret = devm_pinctrl_register_and_init(pctrl->dev, &pctrl->desc, pctrl,
+ &pctrl->pctl);
+ if (ret) {
+ dev_err(pctrl->dev, "pinctrl registration failed\n");
+ return ret;
+ }
+
+ ret = pinctrl_enable(pctrl->pctl);
+ if (ret) {
+ dev_err(pctrl->dev, "pinctrl enable failed\n");
+ return ret;
+ }
+
+ ret = rzv2m_gpio_register(pctrl);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO chip: %i\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rzv2m_pinctrl_clk_disable(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int rzv2m_pinctrl_probe(struct platform_device *pdev)
+{
+ struct rzv2m_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->dev = &pdev->dev;
+
+ pctrl->data = of_device_get_match_data(&pdev->dev);
+ if (!pctrl->data)
+ return -EINVAL;
+
+ pctrl->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pctrl->base))
+ return PTR_ERR(pctrl->base);
+
+ pctrl->clk = devm_clk_get(pctrl->dev, NULL);
+ if (IS_ERR(pctrl->clk)) {
+ ret = PTR_ERR(pctrl->clk);
+ dev_err(pctrl->dev, "failed to get GPIO clk : %i\n", ret);
+ return ret;
+ }
+
+ spin_lock_init(&pctrl->lock);
+
+ platform_set_drvdata(pdev, pctrl);
+
+ ret = clk_prepare_enable(pctrl->clk);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to enable GPIO clk: %i\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev, rzv2m_pinctrl_clk_disable,
+ pctrl->clk);
+ if (ret) {
+ dev_err(pctrl->dev,
+ "failed to register GPIO clk disable action, %i\n",
+ ret);
+ return ret;
+ }
+
+ ret = rzv2m_pinctrl_register(pctrl);
+ if (ret)
+ return ret;
+
+ dev_info(pctrl->dev, "%s support registered\n", DRV_NAME);
+ return 0;
+}
+
+static struct rzv2m_pinctrl_data r9a09g011_data = {
+ .port_pins = rzv2m_gpio_names,
+ .port_pin_configs = rzv2m_gpio_configs,
+ .dedicated_pins = rzv2m_dedicated_pins,
+ .n_port_pins = ARRAY_SIZE(rzv2m_gpio_configs) * RZV2M_PINS_PER_PORT,
+ .n_dedicated_pins = ARRAY_SIZE(rzv2m_dedicated_pins),
+};
+
+static const struct of_device_id rzv2m_pinctrl_of_table[] = {
+ {
+ .compatible = "renesas,r9a09g011-pinctrl",
+ .data = &r9a09g011_data,
+ },
+ { /* sentinel */ }
+};
+
+static struct platform_driver rzv2m_pinctrl_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(rzv2m_pinctrl_of_table),
+ },
+ .probe = rzv2m_pinctrl_probe,
+};
+
+static int __init rzv2m_pinctrl_init(void)
+{
+ return platform_driver_register(&rzv2m_pinctrl_driver);
+}
+core_initcall(rzv2m_pinctrl_init);
+
+MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
+MODULE_DESCRIPTION("Pin and gpio controller driver for RZ/V2M");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/renesas/sh_pfc.h b/drivers/pinctrl/renesas/sh_pfc.h
index 12bc279f5733..0fcb29ab0c84 100644
--- a/drivers/pinctrl/renesas/sh_pfc.h
+++ b/drivers/pinctrl/renesas/sh_pfc.h
@@ -325,6 +325,7 @@ extern const struct sh_pfc_soc_info r8a77990_pinmux_info;
extern const struct sh_pfc_soc_info r8a77995_pinmux_info;
extern const struct sh_pfc_soc_info r8a779a0_pinmux_info;
extern const struct sh_pfc_soc_info r8a779f0_pinmux_info;
+extern const struct sh_pfc_soc_info r8a779g0_pinmux_info;
extern const struct sh_pfc_soc_info sh7203_pinmux_info;
extern const struct sh_pfc_soc_info sh7264_pinmux_info;
extern const struct sh_pfc_soc_info sh7269_pinmux_info;
@@ -492,9 +493,13 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
PORT_GP_CFG_1(bank, 11, fn, sfx, cfg)
#define PORT_GP_12(bank, fn, sfx) PORT_GP_CFG_12(bank, fn, sfx, 0)
-#define PORT_GP_CFG_14(bank, fn, sfx, cfg) \
+#define PORT_GP_CFG_13(bank, fn, sfx, cfg) \
PORT_GP_CFG_12(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 12, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 12, fn, sfx, cfg)
+#define PORT_GP_13(bank, fn, sfx) PORT_GP_CFG_13(bank, fn, sfx, 0)
+
+#define PORT_GP_CFG_14(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_13(bank, fn, sfx, cfg), \
PORT_GP_CFG_1(bank, 13, fn, sfx, cfg)
#define PORT_GP_14(bank, fn, sfx) PORT_GP_CFG_14(bank, fn, sfx, 0)
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 6d7ca1758292..a8212fc126bf 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -27,8 +27,6 @@
#include <linux/soc/samsung/exynos-pmu.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
-#include <dt-bindings/pinctrl/samsung.h>
-
#include "pinctrl-samsung.h"
#include "pinctrl-exynos.h"
@@ -173,7 +171,7 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
con = readl(bank->pctl_base + reg_con);
con &= ~(mask << shift);
- con |= EXYNOS_PIN_FUNC_EINT << shift;
+ con |= EXYNOS_PIN_CON_FUNC_EINT << shift;
writel(con, bank->pctl_base + reg_con);
raw_spin_unlock_irqrestore(&bank->slock, flags);
@@ -196,7 +194,7 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
con = readl(bank->pctl_base + reg_con);
con &= ~(mask << shift);
- con |= EXYNOS_PIN_FUNC_INPUT << shift;
+ con |= PIN_CON_FUNC_INPUT << shift;
writel(con, bank->pctl_base + reg_con);
raw_spin_unlock_irqrestore(&bank->slock, flags);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index bfad1ced8017..7bd6d82c9f36 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -16,6 +16,9 @@
#ifndef __PINCTRL_SAMSUNG_EXYNOS_H
#define __PINCTRL_SAMSUNG_EXYNOS_H
+/* Values for the pin CON register */
+#define EXYNOS_PIN_CON_FUNC_EINT 0xf
+
/* External GPIO and wakeup interrupt related definitions */
#define EXYNOS_GPIO_ECON_OFFSET 0x700
#define EXYNOS_GPIO_EFLTCON_OFFSET 0x800
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 26d309d2516d..4837bceb767b 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -26,8 +26,6 @@
#include <linux/of_device.h>
#include <linux/spinlock.h>
-#include <dt-bindings/pinctrl/samsung.h>
-
#include "../core.h"
#include "pinctrl-samsung.h"
@@ -614,7 +612,7 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
data = readl(reg);
data &= ~(mask << shift);
if (!input)
- data |= EXYNOS_PIN_FUNC_OUTPUT << shift;
+ data |= PIN_CON_FUNC_OUTPUT << shift;
writel(data, reg);
return 0;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index fc6f5199c548..9af93e3d8d9f 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -53,6 +53,14 @@ enum pincfg_type {
#define PINCFG_UNPACK_TYPE(cfg) ((cfg) & PINCFG_TYPE_MASK)
#define PINCFG_UNPACK_VALUE(cfg) (((cfg) & PINCFG_VALUE_MASK) >> \
PINCFG_VALUE_SHIFT)
+/*
+ * Values for the pin CON register, choosing pin function.
+ * The basic set (input and output) are same between: S3C24xx, S3C64xx, S5PV210,
+ * Exynos ARMv7, Exynos ARMv8, Tesla FSD.
+ */
+#define PIN_CON_FUNC_INPUT 0x0
+#define PIN_CON_FUNC_OUTPUT 0x1
+
/**
* enum eint_type - possible external interrupt types.
* @EINT_TYPE_NONE: bank does not support external interrupts
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index 33751a6a0757..a78fdbbdfc0c 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -29,7 +29,6 @@ config PINCTRL_SUN6I_A31
config PINCTRL_SUN6I_A31_R
bool "Support for the Allwinner A31 R-PIO"
default MACH_SUN6I
- depends on RESET_CONTROLLER
select PINCTRL_SUNXI
config PINCTRL_SUN8I_A23
@@ -55,7 +54,6 @@ config PINCTRL_SUN8I_A83T_R
config PINCTRL_SUN8I_A23_R
bool "Support for the Allwinner A23 and A33 R-PIO"
default MACH_SUN8I
- depends on RESET_CONTROLLER
select PINCTRL_SUNXI
config PINCTRL_SUN8I_H3
@@ -81,7 +79,11 @@ config PINCTRL_SUN9I_A80
config PINCTRL_SUN9I_A80_R
bool "Support for the Allwinner A80 R-PIO"
default MACH_SUN9I
- depends on RESET_CONTROLLER
+ select PINCTRL_SUNXI
+
+config PINCTRL_SUN20I_D1
+ bool "Support for the Allwinner D1 PIO"
+ default MACH_SUN8I || (RISCV && ARCH_SUNXI)
select PINCTRL_SUNXI
config PINCTRL_SUN50I_A64
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index d3440c42b9d6..2ff5a55927ad 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_PINCTRL_SUN8I_A83T_R) += pinctrl-sun8i-a83t-r.o
obj-$(CONFIG_PINCTRL_SUN8I_H3) += pinctrl-sun8i-h3.o
obj-$(CONFIG_PINCTRL_SUN8I_H3_R) += pinctrl-sun8i-h3-r.o
obj-$(CONFIG_PINCTRL_SUN8I_V3S) += pinctrl-sun8i-v3s.o
+obj-$(CONFIG_PINCTRL_SUN20I_D1) += pinctrl-sun20i-d1.o
obj-$(CONFIG_PINCTRL_SUN50I_H5) += pinctrl-sun50i-h5.o
obj-$(CONFIG_PINCTRL_SUN50I_H6) += pinctrl-sun50i-h6.o
obj-$(CONFIG_PINCTRL_SUN50I_H6_R) += pinctrl-sun50i-h6-r.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c b/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c
new file mode 100644
index 000000000000..40858b881298
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner D1 SoC pinctrl driver.
+ *
+ * Copyright (c) 2020 wuyan@allwinnertech.com
+ * Copyright (c) 2021-2022 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin d1_pins[] = {
+ /* PB */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm3"),
+ SUNXI_FUNCTION(0x3, "ir"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spi1"), /* WP */
+ SUNXI_FUNCTION(0x6, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x7, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x8, "spdif"), /* OUT */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm4"),
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT3 */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN3 */
+ SUNXI_FUNCTION(0x6, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x7, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x8, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT2 */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN2 */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x7, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN0 */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x7, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN1 */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x7, "uart5"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */
+ SUNXI_FUNCTION(0x3, "i2s2"), /* BCLK */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x5, "pwm0"),
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x7, "uart5"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D16 */
+ SUNXI_FUNCTION(0x3, "i2s2"), /* LRCK */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x5, "pwm1"),
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x7, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x8, "bist0"), /* BIST_RESULT0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D17 */
+ SUNXI_FUNCTION(0x3, "i2s2"), /* MCLK */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x5, "ir"), /* RX */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x7, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x8, "bist1"), /* BIST_RESULT1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA3 */
+ SUNXI_FUNCTION(0x3, "pwm5"),
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spi1"), /* HOLD */
+ SUNXI_FUNCTION(0x6, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x7, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA2 */
+ SUNXI_FUNCTION(0x3, "pwm6"),
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x5, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x6, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x7, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA1 */
+ SUNXI_FUNCTION(0x3, "pwm7"),
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x6, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x7, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA0 */
+ SUNXI_FUNCTION(0x3, "pwm2"),
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x6, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x7, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* CLK */
+ SUNXI_FUNCTION(0x3, "pwm0"),
+ SUNXI_FUNCTION(0x4, "spdif"), /* IN */
+ SUNXI_FUNCTION(0x5, "spi1"), /* CS0 */
+ SUNXI_FUNCTION(0x6, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x7, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 12)),
+ /* PC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "ledc"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* CS0 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D2 */
+ SUNXI_FUNCTION(0x4, "boot"), /* SEL0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* MISO */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D1 */
+ SUNXI_FUNCTION(0x4, "boot"), /* SEL1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* WP */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D0 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x5, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x6, "pll"), /* DBG-CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* HOLD */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D3 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x5, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x6, "tcon"), /* TRIG0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 7)),
+ /* PD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V0P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D0P */
+ SUNXI_FUNCTION(0x5, "i2c0"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V0N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D0N */
+ SUNXI_FUNCTION(0x5, "uart2"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V1P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D1P */
+ SUNXI_FUNCTION(0x5, "uart2"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V1N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D1N */
+ SUNXI_FUNCTION(0x5, "uart2"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V2P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* CKP */
+ SUNXI_FUNCTION(0x5, "uart2"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V2N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* CKN */
+ SUNXI_FUNCTION(0x5, "uart5"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* CKP */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D2P */
+ SUNXI_FUNCTION(0x5, "uart5"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* CKN */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D2N */
+ SUNXI_FUNCTION(0x5, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V3P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D3P */
+ SUNXI_FUNCTION(0x5, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V3N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D3N */
+ SUNXI_FUNCTION(0x5, "pwm6"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V0P */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CS0 */
+ SUNXI_FUNCTION(0x5, "uart3"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V0N */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x5, "uart3"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V1P */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x5, "i2c0"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V1N */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x5, "uart3"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V2P */
+ SUNXI_FUNCTION(0x4, "spi1"), /* HOLD */
+ SUNXI_FUNCTION(0x5, "uart3"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V2N */
+ SUNXI_FUNCTION(0x4, "spi1"), /* WP */
+ SUNXI_FUNCTION(0x5, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* CKP */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA3 */
+ SUNXI_FUNCTION(0x5, "pwm0"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 16)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* CKN */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA2 */
+ SUNXI_FUNCTION(0x5, "pwm1"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 17)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V3P */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA1 */
+ SUNXI_FUNCTION(0x5, "pwm2"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 18)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V3N */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA0 */
+ SUNXI_FUNCTION(0x5, "pwm3"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 19)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "dmic"), /* CLK */
+ SUNXI_FUNCTION(0x5, "pwm4"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 20)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x4, "uart1"), /* TX */
+ SUNXI_FUNCTION(0x5, "pwm5"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 21)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x3, "ir"), /* RX */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RX */
+ SUNXI_FUNCTION(0x5, "pwm7"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 22)),
+ /* PE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RTS */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x5, "lcd0"), /* HSYNC */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXCTL/CRS_DV */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "uart2"), /* CTS */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x5, "lcd0"), /* VSYNC */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* PCLK */
+ SUNXI_FUNCTION(0x3, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x6, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x6, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "uart4"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* MS */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* MS */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "uart4"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x5, "ledc"),
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* DI */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* DI */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spdif"), /* IN */
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* DO */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* DO */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXCTL/TXEN */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x5, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* CK */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* CK */
+ SUNXI_FUNCTION(0x8, "emac"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* RTS */
+ SUNXI_FUNCTION(0x4, "pwm2"),
+ SUNXI_FUNCTION(0x5, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x6, "jtag"), /* MS */
+ SUNXI_FUNCTION(0x8, "emac"), /* MDC */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* CTS */
+ SUNXI_FUNCTION(0x4, "pwm3"),
+ SUNXI_FUNCTION(0x5, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x6, "jtag"), /* DI */
+ SUNXI_FUNCTION(0x8, "emac"), /* MDIO */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* TX */
+ SUNXI_FUNCTION(0x4, "pwm4"),
+ SUNXI_FUNCTION(0x5, "ir"), /* RX */
+ SUNXI_FUNCTION(0x6, "jtag"), /* DO */
+ SUNXI_FUNCTION(0x8, "emac"), /* EPHY-25M */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT3 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN3 */
+ SUNXI_FUNCTION(0x6, "jtag"), /* CK */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD2 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x3, "ncsi0"), /* FIELD */
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT2 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN2 */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD3 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x3, "pwm5"),
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN1 */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA3 */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD2 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* MS */
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN0 */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA2 */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD3 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* DI */
+ SUNXI_FUNCTION(0x4, "pwm6"),
+ SUNXI_FUNCTION(0x5, "i2s0"), /* LRCK */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA1 */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* DO */
+ SUNXI_FUNCTION(0x4, "pwm7"),
+ SUNXI_FUNCTION(0x5, "i2s0"), /* BCLK */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 16)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* CK */
+ SUNXI_FUNCTION(0x4, "ir"), /* TX */
+ SUNXI_FUNCTION(0x5, "i2s0"), /* MCLK */
+ SUNXI_FUNCTION(0x6, "dmic"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 17)),
+ /* PF */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* MS */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* MS */
+ SUNXI_FUNCTION(0x5, "i2s2_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x6, "i2s2_din"), /* DIN0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DI */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* DI */
+ SUNXI_FUNCTION(0x5, "i2s2_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x6, "i2s2_din"), /* DIN1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "ledc"),
+ SUNXI_FUNCTION(0x6, "spdif"), /* IN */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DO */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* DO */
+ SUNXI_FUNCTION(0x5, "i2s2"), /* BCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "pwm6"),
+ SUNXI_FUNCTION(0x6, "ir"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* CK */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* CK */
+ SUNXI_FUNCTION(0x5, "i2s2"), /* LRCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x4, "ir"), /* RX */
+ SUNXI_FUNCTION(0x5, "i2s2"), /* MCLK */
+ SUNXI_FUNCTION(0x6, "pwm5"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 6)),
+ /* PG */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXCTRL/CRS_DV */
+ SUNXI_FUNCTION(0x5, "pwm7"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD0 */
+ SUNXI_FUNCTION(0x5, "pwm6"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD1 */
+ SUNXI_FUNCTION(0x5, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXCK */
+ SUNXI_FUNCTION(0x5, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* TX */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD0 */
+ SUNXI_FUNCTION(0x5, "pwm5"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* RX */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD1 */
+ SUNXI_FUNCTION(0x5, "pwm4"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* TX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD2 */
+ SUNXI_FUNCTION(0x5, "pwm1"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD3 */
+ SUNXI_FUNCTION(0x5, "spdif"), /* IN */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
+ SUNXI_FUNCTION(0x3, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD2 */
+ SUNXI_FUNCTION(0x5, "uart3"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
+ SUNXI_FUNCTION(0x3, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD3 */
+ SUNXI_FUNCTION(0x5, "uart3"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm3"),
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXCK */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x6, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* EPHY-25M */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x6, "tcon"), /* TRIG0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* LRCK */
+ SUNXI_FUNCTION(0x3, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXCTL/TXEN */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x6, "pwm0"),
+ SUNXI_FUNCTION(0x7, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* BCLK */
+ SUNXI_FUNCTION(0x3, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* CLKIN/RXER */
+ SUNXI_FUNCTION(0x5, "pwm2"),
+ SUNXI_FUNCTION(0x6, "ledc"),
+ SUNXI_FUNCTION(0x7, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1_din"), /* DIN0 */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* MDC */
+ SUNXI_FUNCTION(0x5, "i2s1_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x6, "spi0"), /* WP */
+ SUNXI_FUNCTION(0x7, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* MDIO */
+ SUNXI_FUNCTION(0x5, "i2s1_din"), /* DIN1 */
+ SUNXI_FUNCTION(0x6, "spi0"), /* HOLD */
+ SUNXI_FUNCTION(0x7, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ir"), /* RX */
+ SUNXI_FUNCTION(0x3, "tcon"), /* TRIG0 */
+ SUNXI_FUNCTION(0x4, "pwm5"),
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x6, "spdif"), /* IN */
+ SUNXI_FUNCTION(0x7, "ledc"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 16)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x4, "pwm7"),
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x6, "ir"), /* TX */
+ SUNXI_FUNCTION(0x7, "uart0"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 17)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x4, "pwm6"),
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x6, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x7, "uart0"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 18)),
+};
+
+static const unsigned int d1_irq_bank_map[] = { 1, 2, 3, 4, 5, 6 };
+
+static const struct sunxi_pinctrl_desc d1_pinctrl_data = {
+ .pins = d1_pins,
+ .npins = ARRAY_SIZE(d1_pins),
+ .irq_banks = ARRAY_SIZE(d1_irq_bank_map),
+ .irq_bank_map = d1_irq_bank_map,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
+};
+
+static int d1_pinctrl_probe(struct platform_device *pdev)
+{
+ unsigned long variant = (unsigned long)of_device_get_match_data(&pdev->dev);
+
+ return sunxi_pinctrl_init_with_variant(pdev, &d1_pinctrl_data, variant);
+}
+
+static const struct of_device_id d1_pinctrl_match[] = {
+ {
+ .compatible = "allwinner,sun20i-d1-pinctrl",
+ .data = (void *)PINCTRL_SUN20I_D1
+ },
+ {}
+};
+
+static struct platform_driver d1_pinctrl_driver = {
+ .probe = d1_pinctrl_probe,
+ .driver = {
+ .name = "sun20i-d1-pinctrl",
+ .of_match_table = d1_pinctrl_match,
+ },
+};
+builtin_platform_driver(d1_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
index 21054fcacd34..b82ad135bf2a 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
@@ -82,6 +82,7 @@ static const struct sunxi_pinctrl_desc a100_r_pinctrl_data = {
.npins = ARRAY_SIZE(a100_r_pins),
.pin_base = PL_BASE,
.irq_banks = 1,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
static int a100_r_pinctrl_probe(struct platform_device *pdev)
@@ -98,7 +99,7 @@ MODULE_DEVICE_TABLE(of, a100_r_pinctrl_match);
static struct platform_driver a100_r_pinctrl_driver = {
.probe = a100_r_pinctrl_probe,
.driver = {
- .name = "sun50iw10p1-r-pinctrl",
+ .name = "sun50i-a100-r-pinctrl",
.of_match_table = a100_r_pinctrl_match,
},
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
index e69f6da40dc0..f682e0e4244d 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
@@ -684,7 +684,7 @@ static const struct sunxi_pinctrl_desc a100_pinctrl_data = {
.npins = ARRAY_SIZE(a100_pins),
.irq_banks = 7,
.irq_bank_map = a100_irq_bank_map,
- .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
static int a100_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c
index e69c8dae121a..ef261eccda56 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c
@@ -24,7 +24,6 @@
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
index c7d90c44e87a..3aba0aec3d78 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
@@ -16,7 +16,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
@@ -107,6 +106,7 @@ static const struct sunxi_pinctrl_desc sun50i_h6_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun50i_h6_r_pins),
.pin_base = PL_BASE,
.irq_banks = 2,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
};
static int sun50i_h6_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c
index 8e4f10ab96ce..c39ea46046c2 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c
@@ -12,7 +12,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
index 152b71226a80..d6ca720ee8d8 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
@@ -525,7 +525,7 @@ static const struct sunxi_pinctrl_desc h616_pinctrl_data = {
.irq_banks = ARRAY_SIZE(h616_irq_bank_map),
.irq_bank_map = h616_irq_bank_map,
.irq_read_needs_mux = true,
- .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
static int h616_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
index a00246d3dd49..2486cdf345e1 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
@@ -17,7 +17,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
@@ -111,26 +110,7 @@ static const struct sunxi_pinctrl_desc sun6i_a31_r_pinctrl_data = {
static int sun6i_a31_r_pinctrl_probe(struct platform_device *pdev)
{
- struct reset_control *rstc;
- int ret;
-
- rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rstc)) {
- dev_err(&pdev->dev, "Reset controller missing\n");
- return PTR_ERR(rstc);
- }
-
- ret = reset_control_deassert(rstc);
- if (ret)
- return ret;
-
- ret = sunxi_pinctrl_init(pdev,
- &sun6i_a31_r_pinctrl_data);
-
- if (ret)
- reset_control_assert(rstc);
-
- return ret;
+ return sunxi_pinctrl_init(pdev, &sun6i_a31_r_pinctrl_data);
}
static const struct of_device_id sun6i_a31_r_pinctrl_match[] = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
index 9e5b61449999..4fae12c905b7 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
@@ -20,7 +20,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
@@ -98,29 +97,7 @@ static const struct sunxi_pinctrl_desc sun8i_a23_r_pinctrl_data = {
static int sun8i_a23_r_pinctrl_probe(struct platform_device *pdev)
{
- struct reset_control *rstc;
- int ret;
-
- rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rstc)) {
- ret = PTR_ERR(rstc);
- if (ret == -EPROBE_DEFER)
- return ret;
- dev_err(&pdev->dev, "Reset controller missing err=%d\n", ret);
- return ret;
- }
-
- ret = reset_control_deassert(rstc);
- if (ret)
- return ret;
-
- ret = sunxi_pinctrl_init(pdev,
- &sun8i_a23_r_pinctrl_data);
-
- if (ret)
- reset_control_assert(rstc);
-
- return ret;
+ return sunxi_pinctrl_init(pdev, &sun8i_a23_r_pinctrl_data);
}
static const struct of_device_id sun8i_a23_r_pinctrl_match[] = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c
index 6531cf67958e..0cb6c1a970c9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c
@@ -27,7 +27,6 @@
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
index a191a65217ac..f11cb5bba0f7 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
@@ -14,7 +14,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index dd928402af99..6c04027d0dd9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -46,6 +46,67 @@ static struct lock_class_key sunxi_pinctrl_irq_request_class;
static struct irq_chip sunxi_pinctrl_edge_irq_chip;
static struct irq_chip sunxi_pinctrl_level_irq_chip;
+/*
+ * The sunXi PIO registers are organized as a series of banks, with registers
+ * for each bank in the following order:
+ * - Mux config
+ * - Data value
+ * - Drive level
+ * - Pull direction
+ *
+ * Multiple consecutive registers are used for fields wider than one bit.
+ *
+ * The following functions calculate the register and the bit offset to access.
+ * They take a pin number which is relative to the start of the current device.
+ */
+static void sunxi_mux_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * MUX_FIELD_WIDTH;
+
+ *reg = bank * pctl->bank_mem_size + MUX_REGS_OFFSET +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(MUX_FIELD_WIDTH) - 1) << *shift;
+}
+
+static void sunxi_data_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * DATA_FIELD_WIDTH;
+
+ *reg = bank * pctl->bank_mem_size + DATA_REGS_OFFSET +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(DATA_FIELD_WIDTH) - 1) << *shift;
+}
+
+static void sunxi_dlevel_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * pctl->dlevel_field_width;
+
+ *reg = bank * pctl->bank_mem_size + DLEVEL_REGS_OFFSET +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(pctl->dlevel_field_width) - 1) << *shift;
+}
+
+static void sunxi_pull_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * PULL_FIELD_WIDTH;
+
+ *reg = bank * pctl->bank_mem_size + pctl->pull_regs_offset +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(PULL_FIELD_WIDTH) - 1) << *shift;
+}
+
static struct sunxi_pinctrl_group *
sunxi_pinctrl_find_group_by_name(struct sunxi_pinctrl *pctl, const char *group)
{
@@ -451,22 +512,19 @@ static const struct pinctrl_ops sunxi_pctrl_ops = {
.get_group_pins = sunxi_pctrl_get_group_pins,
};
-static int sunxi_pconf_reg(unsigned pin, enum pin_config_param param,
- u32 *offset, u32 *shift, u32 *mask)
+static int sunxi_pconf_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, enum pin_config_param param,
+ u32 *reg, u32 *shift, u32 *mask)
{
switch (param) {
case PIN_CONFIG_DRIVE_STRENGTH:
- *offset = sunxi_dlevel_reg(pin);
- *shift = sunxi_dlevel_offset(pin);
- *mask = DLEVEL_PINS_MASK;
+ sunxi_dlevel_reg(pctl, pin, reg, shift, mask);
break;
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_DISABLE:
- *offset = sunxi_pull_reg(pin);
- *shift = sunxi_pull_offset(pin);
- *mask = PULL_PINS_MASK;
+ sunxi_pull_reg(pctl, pin, reg, shift, mask);
break;
default:
@@ -481,17 +539,17 @@ static int sunxi_pconf_get(struct pinctrl_dev *pctldev, unsigned pin,
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param = pinconf_to_config_param(*config);
- u32 offset, shift, mask, val;
+ u32 reg, shift, mask, val;
u16 arg;
int ret;
pin -= pctl->desc->pin_base;
- ret = sunxi_pconf_reg(pin, param, &offset, &shift, &mask);
+ ret = sunxi_pconf_reg(pctl, pin, param, &reg, &shift, &mask);
if (ret < 0)
return ret;
- val = (readl(pctl->membase + offset) >> shift) & mask;
+ val = (readl(pctl->membase + reg) & mask) >> shift;
switch (pinconf_to_config_param(*config)) {
case PIN_CONFIG_DRIVE_STRENGTH:
@@ -547,16 +605,15 @@ static int sunxi_pconf_set(struct pinctrl_dev *pctldev, unsigned pin,
pin -= pctl->desc->pin_base;
for (i = 0; i < num_configs; i++) {
+ u32 arg, reg, shift, mask, val;
enum pin_config_param param;
unsigned long flags;
- u32 offset, shift, mask, reg;
- u32 arg, val;
int ret;
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
- ret = sunxi_pconf_reg(pin, param, &offset, &shift, &mask);
+ ret = sunxi_pconf_reg(pctl, pin, param, &reg, &shift, &mask);
if (ret < 0)
return ret;
@@ -593,9 +650,8 @@ static int sunxi_pconf_set(struct pinctrl_dev *pctldev, unsigned pin,
}
raw_spin_lock_irqsave(&pctl->lock, flags);
- reg = readl(pctl->membase + offset);
- reg &= ~(mask << shift);
- writel(reg | val << shift, pctl->membase + offset);
+ writel((readl(pctl->membase + reg) & ~mask) | val << shift,
+ pctl->membase + reg);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
} /* for each config */
@@ -624,7 +680,7 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
unsigned pin,
struct regulator *supply)
{
- unsigned short bank = pin / PINS_PER_BANK;
+ unsigned short bank;
unsigned long flags;
u32 val, reg;
int uV;
@@ -640,6 +696,9 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
if (uV == 0)
return 0;
+ pin -= pctl->desc->pin_base;
+ bank = pin / PINS_PER_BANK;
+
switch (pctl->desc->io_bias_cfg_variant) {
case BIAS_VOLTAGE_GRP_CONFIG:
/*
@@ -657,12 +716,20 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
else
val = 0xD; /* 3.3V */
- pin -= pctl->desc->pin_base;
-
reg = readl(pctl->membase + sunxi_grp_config_reg(pin));
reg &= ~IO_BIAS_MASK;
writel(reg | val, pctl->membase + sunxi_grp_config_reg(pin));
return 0;
+ case BIAS_VOLTAGE_PIO_POW_MODE_CTL:
+ val = uV > 1800000 && uV <= 2500000 ? BIT(bank) : 0;
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ reg = readl(pctl->membase + PIO_POW_MOD_CTL_REG);
+ reg &= ~BIT(bank);
+ writel(reg | val, pctl->membase + PIO_POW_MOD_CTL_REG);
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+
+ fallthrough;
case BIAS_VOLTAGE_PIO_POW_MODE_SEL:
val = uV <= 1800000 ? 1 : 0;
@@ -710,16 +777,16 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev,
u8 config)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ u32 reg, shift, mask;
unsigned long flags;
- u32 val, mask;
+
+ pin -= pctl->desc->pin_base;
+ sunxi_mux_reg(pctl, pin, &reg, &shift, &mask);
raw_spin_lock_irqsave(&pctl->lock, flags);
- pin -= pctl->desc->pin_base;
- val = readl(pctl->membase + sunxi_mux_reg(pin));
- mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
- writel((val & ~mask) | config << sunxi_mux_offset(pin),
- pctl->membase + sunxi_mux_reg(pin));
+ writel((readl(pctl->membase + reg) & ~mask) | config << shift,
+ pctl->membase + reg);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
}
@@ -852,43 +919,43 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
- u32 reg = sunxi_data_reg(offset);
- u8 index = sunxi_data_offset(offset);
bool set_mux = pctl->desc->irq_read_needs_mux &&
gpiochip_line_is_irq(chip, offset);
u32 pin = offset + chip->base;
- u32 val;
+ u32 reg, shift, mask, val;
+
+ sunxi_data_reg(pctl, offset, &reg, &shift, &mask);
if (set_mux)
sunxi_pmx_set(pctl->pctl_dev, pin, SUN4I_FUNC_INPUT);
- val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
+ val = (readl(pctl->membase + reg) & mask) >> shift;
if (set_mux)
sunxi_pmx_set(pctl->pctl_dev, pin, SUN4I_FUNC_IRQ);
- return !!val;
+ return val;
}
static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
- u32 reg = sunxi_data_reg(offset);
- u8 index = sunxi_data_offset(offset);
+ u32 reg, shift, mask, val;
unsigned long flags;
- u32 regval;
+
+ sunxi_data_reg(pctl, offset, &reg, &shift, &mask);
raw_spin_lock_irqsave(&pctl->lock, flags);
- regval = readl(pctl->membase + reg);
+ val = readl(pctl->membase + reg);
if (value)
- regval |= BIT(index);
+ val |= mask;
else
- regval &= ~(BIT(index));
+ val &= ~mask;
- writel(regval, pctl->membase + reg);
+ writel(val, pctl->membase + reg);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
}
@@ -1232,11 +1299,11 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
/*
* Find an upper bound for the maximum number of functions: in
- * the worst case we have gpio_in, gpio_out, irq and up to four
+ * the worst case we have gpio_in, gpio_out, irq and up to seven
* special functions per pin, plus one entry for the sentinel.
* We'll reallocate that later anyway.
*/
- pctl->functions = kcalloc(4 * pctl->ngroups + 4,
+ pctl->functions = kcalloc(7 * pctl->ngroups + 4,
sizeof(*pctl->functions),
GFP_KERNEL);
if (!pctl->functions)
@@ -1429,6 +1496,15 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
pctl->dev = &pdev->dev;
pctl->desc = desc;
pctl->variant = variant;
+ if (pctl->variant >= PINCTRL_SUN20I_D1) {
+ pctl->bank_mem_size = D1_BANK_MEM_SIZE;
+ pctl->pull_regs_offset = D1_PULL_REGS_OFFSET;
+ pctl->dlevel_field_width = D1_DLEVEL_FIELD_WIDTH;
+ } else {
+ pctl->bank_mem_size = BANK_MEM_SIZE;
+ pctl->pull_regs_offset = PULL_REGS_OFFSET;
+ pctl->dlevel_field_width = DLEVEL_FIELD_WIDTH;
+ }
pctl->irq_array = devm_kcalloc(&pdev->dev,
IRQ_PER_BANK * pctl->desc->irq_banks,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index a32bb5bcb754..a87a2f944d60 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -36,23 +36,19 @@
#define BANK_MEM_SIZE 0x24
#define MUX_REGS_OFFSET 0x0
+#define MUX_FIELD_WIDTH 4
#define DATA_REGS_OFFSET 0x10
+#define DATA_FIELD_WIDTH 1
#define DLEVEL_REGS_OFFSET 0x14
+#define DLEVEL_FIELD_WIDTH 2
#define PULL_REGS_OFFSET 0x1c
+#define PULL_FIELD_WIDTH 2
+
+#define D1_BANK_MEM_SIZE 0x30
+#define D1_DLEVEL_FIELD_WIDTH 4
+#define D1_PULL_REGS_OFFSET 0x24
#define PINS_PER_BANK 32
-#define MUX_PINS_PER_REG 8
-#define MUX_PINS_BITS 4
-#define MUX_PINS_MASK 0x0f
-#define DATA_PINS_PER_REG 32
-#define DATA_PINS_BITS 1
-#define DATA_PINS_MASK 0x01
-#define DLEVEL_PINS_PER_REG 16
-#define DLEVEL_PINS_BITS 2
-#define DLEVEL_PINS_MASK 0x03
-#define PULL_PINS_PER_REG 16
-#define PULL_PINS_BITS 2
-#define PULL_PINS_MASK 0x03
#define IRQ_PER_BANK 32
@@ -96,8 +92,11 @@
#define PINCTRL_SUN8I_R40 BIT(8)
#define PINCTRL_SUN8I_V3 BIT(9)
#define PINCTRL_SUN8I_V3S BIT(10)
+/* Variants below here have an updated register layout. */
+#define PINCTRL_SUN20I_D1 BIT(11)
#define PIO_POW_MOD_SEL_REG 0x340
+#define PIO_POW_MOD_CTL_REG 0x344
enum sunxi_desc_bias_voltage {
BIAS_VOLTAGE_NONE,
@@ -111,6 +110,12 @@ enum sunxi_desc_bias_voltage {
* register, as seen on H6 SoC, for example.
*/
BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+ /*
+ * Bias voltage is set through PIO_POW_MOD_SEL_REG
+ * and PIO_POW_MOD_CTL_REG register, as seen on
+ * A100 and D1 SoC, for example.
+ */
+ BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
struct sunxi_desc_function {
@@ -170,6 +175,9 @@ struct sunxi_pinctrl {
raw_spinlock_t lock;
struct pinctrl_dev *pctl_dev;
unsigned long variant;
+ u32 bank_mem_size;
+ u32 pull_regs_offset;
+ u32 dlevel_field_width;
};
#define SUNXI_PIN(_pin, ...) \
@@ -215,83 +223,6 @@ struct sunxi_pinctrl {
.irqnum = _irq, \
}
-/*
- * The sunXi PIO registers are organized as is:
- * 0x00 - 0x0c Muxing values.
- * 8 pins per register, each pin having a 4bits value
- * 0x10 Pin values
- * 32 bits per register, each pin corresponding to one bit
- * 0x14 - 0x18 Drive level
- * 16 pins per register, each pin having a 2bits value
- * 0x1c - 0x20 Pull-Up values
- * 16 pins per register, each pin having a 2bits value
- *
- * This is for the first bank. Each bank will have the same layout,
- * with an offset being a multiple of 0x24.
- *
- * The following functions calculate from the pin number the register
- * and the bit offset that we should access.
- */
-static inline u32 sunxi_mux_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += MUX_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / MUX_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_mux_offset(u16 pin)
-{
- u32 pin_num = pin % MUX_PINS_PER_REG;
- return pin_num * MUX_PINS_BITS;
-}
-
-static inline u32 sunxi_data_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += DATA_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / DATA_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_data_offset(u16 pin)
-{
- u32 pin_num = pin % DATA_PINS_PER_REG;
- return pin_num * DATA_PINS_BITS;
-}
-
-static inline u32 sunxi_dlevel_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += DLEVEL_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / DLEVEL_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_dlevel_offset(u16 pin)
-{
- u32 pin_num = pin % DLEVEL_PINS_PER_REG;
- return pin_num * DLEVEL_PINS_BITS;
-}
-
-static inline u32 sunxi_pull_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += PULL_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / PULL_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_pull_offset(u16 pin)
-{
- u32 pin_num = pin % PULL_PINS_PER_REG;
- return pin_num * PULL_PINS_BITS;
-}
-
static inline u32 sunxi_irq_hw_bank_num(const struct sunxi_pinctrl_desc *desc, u8 bank)
{
if (!desc->irq_bank_map)
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 38800e86ed8a..1ae3c56b66b0 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -959,6 +959,8 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
goto error;
}
+ vq->num_max = vring->num;
+
vqs[i] = vq;
vring->vq = vq;
vq->priv = vring;
diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c
index 55834ccb4ac7..1e071df4c9f5 100644
--- a/drivers/platform/mellanox/mlxreg-lc.c
+++ b/drivers/platform/mellanox/mlxreg-lc.c
@@ -460,8 +460,6 @@ static int mlxreg_lc_power_on_off(struct mlxreg_lc *mlxreg_lc, u8 action)
u32 regval;
int err;
- mutex_lock(&mlxreg_lc->lock);
-
err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, &regval);
if (err)
goto regmap_read_fail;
@@ -474,7 +472,6 @@ static int mlxreg_lc_power_on_off(struct mlxreg_lc *mlxreg_lc, u8 action)
err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, regval);
regmap_read_fail:
- mutex_unlock(&mlxreg_lc->lock);
return err;
}
@@ -491,8 +488,6 @@ static int mlxreg_lc_enable_disable(struct mlxreg_lc *mlxreg_lc, bool action)
* line card which is already has been enabled. Disabling does not affect the disabled line
* card.
*/
- mutex_lock(&mlxreg_lc->lock);
-
err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, &regval);
if (err)
goto regmap_read_fail;
@@ -505,7 +500,6 @@ static int mlxreg_lc_enable_disable(struct mlxreg_lc *mlxreg_lc, bool action)
err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, regval);
regmap_read_fail:
- mutex_unlock(&mlxreg_lc->lock);
return err;
}
@@ -538,6 +532,15 @@ mlxreg_lc_sn4800_c16_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
static void
mlxreg_lc_state_update(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
{
+ if (action)
+ mlxreg_lc->state |= state;
+ else
+ mlxreg_lc->state &= ~state;
+}
+
+static void
+mlxreg_lc_state_update_locked(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
+{
mutex_lock(&mlxreg_lc->lock);
if (action)
@@ -560,8 +563,11 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
dev_info(mlxreg_lc->dev, "linecard#%d state %d event kind %d action %d\n",
mlxreg_lc->data->slot, mlxreg_lc->state, kind, action);
- if (!(mlxreg_lc->state & MLXREG_LC_INITIALIZED))
+ mutex_lock(&mlxreg_lc->lock);
+ if (!(mlxreg_lc->state & MLXREG_LC_INITIALIZED)) {
+ mutex_unlock(&mlxreg_lc->lock);
return 0;
+ }
switch (kind) {
case MLXREG_HOTPLUG_LC_SYNCED:
@@ -574,7 +580,7 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
if (!(mlxreg_lc->state & MLXREG_LC_POWERED) && action) {
err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
if (err)
- return err;
+ goto mlxreg_lc_power_on_off_fail;
}
/* In case line card is configured - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED && action)
@@ -588,12 +594,13 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
/* In case line card is configured - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
err = mlxreg_lc_enable_disable(mlxreg_lc, 1);
+ mutex_unlock(&mlxreg_lc->lock);
return err;
}
err = mlxreg_lc_create_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
mlxreg_lc->main_devs_num);
if (err)
- return err;
+ goto mlxreg_lc_create_static_devices_fail;
/* In case line card is already in ready state - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
@@ -620,6 +627,10 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
break;
}
+mlxreg_lc_power_on_off_fail:
+mlxreg_lc_create_static_devices_fail:
+ mutex_unlock(&mlxreg_lc->lock);
+
return err;
}
@@ -665,7 +676,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
if (err)
goto mlxreg_lc_create_static_devices_failed;
- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_POWERED, 1);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_POWERED, 1);
}
/* Verify if line card is synchronized. */
@@ -676,7 +687,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
/* Power on line card if necessary. */
if (regval & mlxreg_lc->data->mask) {
mlxreg_lc->state |= MLXREG_LC_SYNCED;
- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_SYNCED, 1);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_SYNCED, 1);
if (mlxreg_lc->state & ~MLXREG_LC_POWERED) {
err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
if (err)
@@ -684,7 +695,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
}
}
- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_INITIALIZED, 1);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_INITIALIZED, 1);
return 0;
@@ -814,10 +825,9 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
mutex_init(&mlxreg_lc->lock);
/* Set event notification callback. */
- if (data->notifier) {
- data->notifier->user_handler = mlxreg_lc_event_handler;
- data->notifier->handle = mlxreg_lc;
- }
+ data->notifier->user_handler = mlxreg_lc_event_handler;
+ data->notifier->handle = mlxreg_lc;
+
data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr);
if (!data->hpdev.adapter) {
dev_err(&pdev->dev, "Failed to get adapter for bus %d\n",
@@ -863,7 +873,6 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "Failed to sync regmap for client %s at bus %d at addr 0x%02x\n",
data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
- err = PTR_ERR(regmap);
goto regcache_sync_fail;
}
@@ -878,16 +887,14 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
if (err)
goto mlxreg_lc_config_init_fail;
- return err;
+ return 0;
mlxreg_lc_config_init_fail:
regcache_sync_fail:
regmap_write_fail:
devm_regmap_init_i2c_fail:
- if (data->hpdev.client) {
- i2c_unregister_device(data->hpdev.client);
- data->hpdev.client = NULL;
- }
+ i2c_unregister_device(data->hpdev.client);
+ data->hpdev.client = NULL;
i2c_new_device_fail:
i2c_put_adapter(data->hpdev.adapter);
data->hpdev.adapter = NULL;
@@ -905,6 +912,8 @@ static int mlxreg_lc_remove(struct platform_device *pdev)
struct mlxreg_core_data *data = dev_get_platdata(&pdev->dev);
struct mlxreg_lc *mlxreg_lc = platform_get_drvdata(pdev);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_INITIALIZED, 0);
+
/*
* Probing and removing are invoked by hotplug events raised upon line card insertion and
* removing. If probing procedure fails all data is cleared. However, hotplug event still
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index d5655f6a4a41..585911020cea 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -86,38 +86,38 @@ static const struct software_node ssam_node_bas_dtx = {
.parent = &ssam_node_root,
};
-/* HID keyboard (TID1). */
-static const struct software_node ssam_node_hid_tid1_keyboard = {
+/* HID keyboard (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_keyboard = {
.name = "ssam:01:15:01:01:00",
.parent = &ssam_node_root,
};
-/* HID pen stash (TID1; pen taken / stashed away evens). */
-static const struct software_node ssam_node_hid_tid1_penstash = {
+/* HID pen stash (SAM, TID=1; pen taken / stashed away evens). */
+static const struct software_node ssam_node_hid_sam_penstash = {
.name = "ssam:01:15:01:02:00",
.parent = &ssam_node_root,
};
-/* HID touchpad (TID1). */
-static const struct software_node ssam_node_hid_tid1_touchpad = {
+/* HID touchpad (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_touchpad = {
.name = "ssam:01:15:01:03:00",
.parent = &ssam_node_root,
};
-/* HID device instance 6 (TID1, unknown HID device). */
-static const struct software_node ssam_node_hid_tid1_iid6 = {
+/* HID device instance 6 (SAM, TID=1, HID sensor collection). */
+static const struct software_node ssam_node_hid_sam_sensors = {
.name = "ssam:01:15:01:06:00",
.parent = &ssam_node_root,
};
-/* HID device instance 7 (TID1, unknown HID device). */
-static const struct software_node ssam_node_hid_tid1_iid7 = {
+/* HID device instance 7 (SAM, TID=1, UCM UCSI HID client). */
+static const struct software_node ssam_node_hid_sam_ucm_ucsi = {
.name = "ssam:01:15:01:07:00",
.parent = &ssam_node_root,
};
-/* HID system controls (TID1). */
-static const struct software_node ssam_node_hid_tid1_sysctrl = {
+/* HID system controls (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_sysctrl = {
.name = "ssam:01:15:01:08:00",
.parent = &ssam_node_root,
};
@@ -182,8 +182,8 @@ static const struct software_node ssam_node_hid_kip_touchpad = {
.parent = &ssam_node_hub_kip,
};
-/* HID device instance 5 (KIP hub, unknown HID device). */
-static const struct software_node ssam_node_hid_kip_iid5 = {
+/* HID device instance 5 (KIP hub, type-cover firmware update). */
+static const struct software_node ssam_node_hid_kip_fwupd = {
.name = "ssam:01:15:02:05:00",
.parent = &ssam_node_hub_kip,
};
@@ -241,12 +241,12 @@ static const struct software_node *ssam_node_group_sls[] = {
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
&ssam_node_pos_tablet_switch,
- &ssam_node_hid_tid1_keyboard,
- &ssam_node_hid_tid1_penstash,
- &ssam_node_hid_tid1_touchpad,
- &ssam_node_hid_tid1_iid6,
- &ssam_node_hid_tid1_iid7,
- &ssam_node_hid_tid1_sysctrl,
+ &ssam_node_hid_sam_keyboard,
+ &ssam_node_hid_sam_penstash,
+ &ssam_node_hid_sam_touchpad,
+ &ssam_node_hid_sam_sensors,
+ &ssam_node_hid_sam_ucm_ucsi,
+ &ssam_node_hid_sam_sysctrl,
NULL,
};
@@ -278,7 +278,9 @@ static const struct software_node *ssam_node_group_sp8[] = {
&ssam_node_hid_kip_keyboard,
&ssam_node_hid_kip_penstash,
&ssam_node_hid_kip_touchpad,
- &ssam_node_hid_kip_iid5,
+ &ssam_node_hid_kip_fwupd,
+ &ssam_node_hid_sam_sensors,
+ &ssam_node_hid_sam_ucm_ucsi,
NULL,
};
@@ -325,6 +327,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
/* Surface Laptop Go 1 */
{ "MSHW0118", (unsigned long)ssam_node_group_slg1 },
+ /* Surface Laptop Go 2 */
+ { "MSHW0290", (unsigned long)ssam_node_group_slg1 },
+
/* Surface Laptop Studio */
{ "MSHW0123", (unsigned long)ssam_node_group_sls },
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index e0230ea0cb7e..f1259d81d86d 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -99,6 +99,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
{KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
{KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
{KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
+ {KE_KEY, 0x27, {KEY_HELP} },
{KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */
{KE_IGNORE, 0x41, {KEY_MUTE} },
{KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
@@ -112,7 +113,13 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
{KE_IGNORE, 0x48, {KEY_VOLUMEUP} },
{KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} },
{KE_IGNORE, 0x4a, {KEY_VOLUMEDOWN} },
- {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} },
+ /*
+ * 0x61 is KEY_SWITCHVIDEOMODE. Usually this is a duplicate input event
+ * with the "Video Bus" input device events. But sometimes it is not
+ * a dup. Map it to KEY_UNKNOWN instead of using KE_IGNORE so that
+ * udev/hwdb can override it on systems where it is not a dup.
+ */
+ {KE_KEY, 0x61, {KEY_UNKNOWN} },
{KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} },
{KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 89b604e04d7f..eec7d0ed7cf2 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -107,7 +107,7 @@ module_param(fnlock_default, bool, 0444);
#define WMI_EVENT_MASK 0xFFFF
#define FAN_CURVE_POINTS 8
-#define FAN_CURVE_BUF_LEN (FAN_CURVE_POINTS * 2)
+#define FAN_CURVE_BUF_LEN 32
#define FAN_CURVE_DEV_CPU 0x00
#define FAN_CURVE_DEV_GPU 0x01
/* Mask to determine if setting temperature or percentage */
@@ -1118,7 +1118,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
}
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MICMUTE_LED)) {
- asus->micmute_led.name = "asus::micmute";
+ asus->micmute_led.name = "platform::micmute";
asus->micmute_led.max_brightness = 1;
asus->micmute_led.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
asus->micmute_led.brightness_set_blocking = micmute_led_set;
@@ -2233,8 +2233,10 @@ static int fan_curve_get_factory_default(struct asus_wmi *asus, u32 fan_dev)
curves = &asus->custom_fan_curves[fan_idx];
err = asus_wmi_evaluate_method_buf(asus->dsts_id, fan_dev, mode, buf,
FAN_CURVE_BUF_LEN);
- if (err)
+ if (err) {
+ pr_warn("%s (0x%08x) failed: %d\n", __func__, fan_dev, err);
return err;
+ }
fan_curve_copy_from_buf(curves, buf);
curves->device_id = fan_dev;
@@ -2252,9 +2254,6 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
err = fan_curve_get_factory_default(asus, fan_dev);
if (err) {
- pr_debug("fan_curve_get_factory_default(0x%08x) failed: %d\n",
- fan_dev, err);
- /* Don't cause probe to fail on devices without fan-curves */
return 0;
}
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index fb2e141f3eb8..384d0962ae93 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -42,10 +42,24 @@ static int p2sb_get_devfn(unsigned int *devfn)
return 0;
}
+/* Copy resource from the first BAR of the device in question */
static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
{
- /* Copy resource from the first BAR of the device in question */
- *mem = pdev->resource[0];
+ struct resource *bar0 = &pdev->resource[0];
+
+ /* Make sure we have no dangling pointers in the output */
+ memset(mem, 0, sizeof(*mem));
+
+ /*
+ * We copy only selected fields from the original resource.
+ * Because a PCI device will be removed soon, we may not use
+ * any allocated data, hence we may not copy any pointers.
+ */
+ mem->start = bar0->start;
+ mem->end = bar0->end;
+ mem->flags = bar0->flags;
+ mem->desc = bar0->desc;
+
return 0;
}
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 154317e9910d..5c757c7f64de 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -232,7 +232,7 @@ static void pmc_power_off(void)
pm1_cnt_port = acpi_base_addr + PM1_CNT;
pm1_cnt_value = inl(pm1_cnt_port);
- pm1_cnt_value &= SLEEP_TYPE_MASK;
+ pm1_cnt_value &= ~SLEEP_TYPE_MASK;
pm1_cnt_value |= SLEEP_TYPE_S5;
pm1_cnt_value |= SLEEP_ENABLE;
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index 67feed25c9db..5362f1a7b77c 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -328,6 +328,7 @@ static const struct acpi_device_id smi_acpi_ids[] = {
{ "INT3515", (unsigned long)&int3515_data },
/* Non-conforming _HID for Cirrus Logic already released */
{ "CLSA0100", (unsigned long)&cs35l41_hda },
+ { "CLSA0101", (unsigned long)&cs35l41_hda },
{ }
};
MODULE_DEVICE_TABLE(acpi, smi_acpi_ids);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 22d4e8633e30..2dbb9fc011a7 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -10592,10 +10592,9 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
/* Ensure initial values are correct */
dytc_profile_refresh();
- /* Set AMT correctly now we know current profile */
- if ((dytc_capabilities & BIT(DYTC_FC_PSC)) &&
- (dytc_capabilities & BIT(DYTC_FC_AMT)))
- dytc_control_amt(dytc_current_profile == PLATFORM_PROFILE_BALANCED);
+ /* Workaround for https://bugzilla.kernel.org/show_bug.cgi?id=216347 */
+ if (dytc_capabilities & BIT(DYTC_FC_PSC))
+ dytc_profile_set(NULL, PLATFORM_PROFILE_BALANCED);
return 0;
}
diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
index 480375977435..4acd6fa8d43b 100644
--- a/drivers/platform/x86/x86-android-tablets.c
+++ b/drivers/platform/x86/x86-android-tablets.c
@@ -663,9 +663,23 @@ static const struct x86_i2c_client_info chuwi_hi8_i2c_clients[] __initconst = {
},
};
+static int __init chuwi_hi8_init(void)
+{
+ /*
+ * Avoid the acpi_unregister_gsi() call in x86_acpi_irq_helper_get()
+ * breaking the touchscreen + logging various errors when the Windows
+ * BIOS is used.
+ */
+ if (acpi_dev_present("MSSL0001", NULL, 1))
+ return -ENODEV;
+
+ return 0;
+}
+
static const struct x86_dev_info chuwi_hi8_info __initconst = {
.i2c_client_info = chuwi_hi8_i2c_clients,
.i2c_client_count = ARRAY_SIZE(chuwi_hi8_i2c_clients),
+ .init = chuwi_hi8_init,
};
#define CZC_EC_EXTRA_PORT 0x68
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 4b563db3ab3e..a8c46ba5878f 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -297,4 +297,10 @@ config NVMEM_REBOOT_MODE
then the bootloader can read it and take different
action according to the mode.
+config POWER_MLXBF
+ tristate "Mellanox BlueField power handling driver"
+ depends on (GPIO_MLXBF2 && ACPI)
+ help
+ This driver supports reset or low power mode handling for Mellanox BlueField.
+
endif
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index f606a2f60539..0a39424fc558 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -35,3 +35,4 @@ obj-$(CONFIG_REBOOT_MODE) += reboot-mode.o
obj-$(CONFIG_SYSCON_REBOOT_MODE) += syscon-reboot-mode.o
obj-$(CONFIG_POWER_RESET_SC27XX) += sc27xx-poweroff.o
obj-$(CONFIG_NVMEM_REBOOT_MODE) += nvmem-reboot-mode.o
+obj-$(CONFIG_POWER_MLXBF) += pwr-mlxbf.o
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 64def79d557a..741e44a017c3 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -17,10 +17,13 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
+#include <linux/reset-controller.h>
#include <soc/at91/at91sam9_ddrsdr.h>
#include <soc/at91/at91sam9_sdramc.h>
+#include <dt-bindings/reset/sama7g5-reset.h>
+
#define AT91_RSTC_CR 0x00 /* Reset Controller Control Register */
#define AT91_RSTC_PROCRST BIT(0) /* Processor Reset */
#define AT91_RSTC_PERRST BIT(2) /* Peripheral Reset */
@@ -39,6 +42,17 @@
#define AT91_RSTC_URSTIEN BIT(4) /* User Reset Interrupt Enable */
#define AT91_RSTC_ERSTL GENMASK(11, 8) /* External Reset Length */
+/**
+ * enum reset_type - reset types
+ * @RESET_TYPE_GENERAL: first power-up reset
+ * @RESET_TYPE_WAKEUP: return from backup mode
+ * @RESET_TYPE_WATCHDOG: watchdog fault
+ * @RESET_TYPE_SOFTWARE: processor reset required by software
+ * @RESET_TYPE_USER: NRST pin detected low
+ * @RESET_TYPE_CPU_FAIL: CPU clock failure detection
+ * @RESET_TYPE_XTAL_FAIL: 32KHz crystal failure dectection fault
+ * @RESET_TYPE_ULP2: ULP2 reset
+ */
enum reset_type {
RESET_TYPE_GENERAL = 0,
RESET_TYPE_WAKEUP = 1,
@@ -50,15 +64,48 @@ enum reset_type {
RESET_TYPE_ULP2 = 8,
};
+/**
+ * struct at91_reset - AT91 reset specific data structure
+ * @rstc_base: base address for system reset
+ * @ramc_base: array with base addresses of RAM controllers
+ * @dev_base: base address for devices reset
+ * @sclk: slow clock
+ * @data: platform specific reset data
+ * @rcdev: reset controller device
+ * @lock: lock for devices reset register access
+ * @nb: reset notifier block
+ * @args: SoC specific system reset arguments
+ * @ramc_lpr: SDRAM Controller Low Power Register
+ */
struct at91_reset {
void __iomem *rstc_base;
void __iomem *ramc_base[2];
+ void __iomem *dev_base;
struct clk *sclk;
+ const struct at91_reset_data *data;
+ struct reset_controller_dev rcdev;
+ spinlock_t lock;
struct notifier_block nb;
u32 args;
u32 ramc_lpr;
};
+#define to_at91_reset(r) container_of(r, struct at91_reset, rcdev)
+
+/**
+ * struct at91_reset_data - AT91 reset data
+ * @reset_args: SoC specific system reset arguments
+ * @n_device_reset: number of device resets
+ * @device_reset_min_id: min id for device reset
+ * @device_reset_max_id: max id for device reset
+ */
+struct at91_reset_data {
+ u32 reset_args;
+ u32 n_device_reset;
+ u8 device_reset_min_id;
+ u8 device_reset_max_id;
+};
+
/*
* unless the SDRAM is cleanly shutdown before we hit the
* reset register it can be left driving the data bus and
@@ -95,7 +142,7 @@ static int at91_reset(struct notifier_block *this, unsigned long mode,
"r" (reset->rstc_base),
"r" (1),
"r" cpu_to_le32(AT91_DDRSDRC_LPCB_POWER_DOWN),
- "r" (reset->args),
+ "r" (reset->data->reset_args),
"r" (reset->ramc_lpr)
: "r4");
@@ -153,34 +200,133 @@ static const struct of_device_id at91_ramc_of_match[] = {
{ /* sentinel */ }
};
+static const struct at91_reset_data sam9260 = {
+ .reset_args = AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST,
+};
+
+static const struct at91_reset_data samx7 = {
+ .reset_args = AT91_RSTC_KEY | AT91_RSTC_PROCRST,
+};
+
+static const struct at91_reset_data sama7g5 = {
+ .reset_args = AT91_RSTC_KEY | AT91_RSTC_PROCRST,
+ .n_device_reset = 3,
+ .device_reset_min_id = SAMA7G5_RESET_USB_PHY1,
+ .device_reset_max_id = SAMA7G5_RESET_USB_PHY3,
+};
+
static const struct of_device_id at91_reset_of_match[] = {
{
.compatible = "atmel,at91sam9260-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
- AT91_RSTC_PROCRST),
+ .data = &sam9260,
},
{
.compatible = "atmel,at91sam9g45-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
- AT91_RSTC_PROCRST)
+ .data = &sam9260,
},
{
.compatible = "atmel,sama5d3-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
- AT91_RSTC_PROCRST)
+ .data = &sam9260,
},
{
.compatible = "atmel,samx7-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PROCRST)
+ .data = &samx7,
},
{
.compatible = "microchip,sam9x60-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PROCRST)
+ .data = &samx7,
+ },
+ {
+ .compatible = "microchip,sama7g5-rstc",
+ .data = &sama7g5,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_reset_of_match);
+static int at91_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct at91_reset *reset = to_at91_reset(rcdev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&reset->lock, flags);
+ val = readl_relaxed(reset->dev_base);
+ if (assert)
+ val |= BIT(id);
+ else
+ val &= ~BIT(id);
+ writel_relaxed(val, reset->dev_base);
+ spin_unlock_irqrestore(&reset->lock, flags);
+
+ return 0;
+}
+
+static int at91_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return at91_reset_update(rcdev, id, true);
+}
+
+static int at91_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return at91_reset_update(rcdev, id, false);
+}
+
+static int at91_reset_dev_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct at91_reset *reset = to_at91_reset(rcdev);
+ u32 val;
+
+ val = readl_relaxed(reset->dev_base);
+
+ return !!(val & BIT(id));
+}
+
+static const struct reset_control_ops at91_reset_ops = {
+ .assert = at91_reset_assert,
+ .deassert = at91_reset_deassert,
+ .status = at91_reset_dev_status,
+};
+
+static int at91_reset_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct at91_reset *reset = to_at91_reset(rcdev);
+
+ if (!reset->data->n_device_reset ||
+ (reset_spec->args[0] < reset->data->device_reset_min_id ||
+ reset_spec->args[0] > reset->data->device_reset_max_id))
+ return -EINVAL;
+
+ return reset_spec->args[0];
+}
+
+static int at91_rcdev_init(struct at91_reset *reset,
+ struct platform_device *pdev)
+{
+ if (!reset->data->n_device_reset)
+ return 0;
+
+ reset->dev_base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 1,
+ NULL);
+ if (IS_ERR(reset->dev_base))
+ return -ENODEV;
+
+ spin_lock_init(&reset->lock);
+ reset->rcdev.ops = &at91_reset_ops;
+ reset->rcdev.owner = THIS_MODULE;
+ reset->rcdev.of_node = pdev->dev.of_node;
+ reset->rcdev.nr_resets = reset->data->n_device_reset;
+ reset->rcdev.of_reset_n_cells = 1;
+ reset->rcdev.of_xlate = at91_reset_of_xlate;
+
+ return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
+}
+
static int __init at91_reset_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -212,10 +358,12 @@ static int __init at91_reset_probe(struct platform_device *pdev)
}
}
- match = of_match_node(at91_reset_of_match, pdev->dev.of_node);
+ reset->data = device_get_match_data(&pdev->dev);
+ if (!reset->data)
+ return -ENODEV;
+
reset->nb.notifier_call = at91_reset;
reset->nb.priority = 192;
- reset->args = (u32)match->data;
reset->sclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(reset->sclk))
@@ -229,6 +377,10 @@ static int __init at91_reset_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, reset);
+ ret = at91_rcdev_init(reset, pdev);
+ if (ret)
+ goto disable_clk;
+
if (of_device_is_compatible(pdev->dev.of_node, "microchip,sam9x60-rstc")) {
u32 val = readl(reset->rstc_base + AT91_RSTC_MR);
@@ -237,14 +389,16 @@ static int __init at91_reset_probe(struct platform_device *pdev)
}
ret = register_restart_handler(&reset->nb);
- if (ret) {
- clk_disable_unprepare(reset->sclk);
- return ret;
- }
+ if (ret)
+ goto disable_clk;
at91_reset_status(pdev, reset->rstc_base);
return 0;
+
+disable_clk:
+ clk_disable_unprepare(reset->sclk);
+ return ret;
}
static int __exit at91_reset_remove(struct platform_device *pdev)
diff --git a/drivers/power/reset/pwr-mlxbf.c b/drivers/power/reset/pwr-mlxbf.c
new file mode 100644
index 000000000000..12dedf841a44
--- /dev/null
+++ b/drivers/power/reset/pwr-mlxbf.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-only or BSD-3-Clause
+
+/*
+ * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/devm-helpers.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/reboot.h>
+#include <linux/types.h>
+
+struct pwr_mlxbf {
+ struct work_struct send_work;
+ const char *hid;
+};
+
+static void pwr_mlxbf_send_work(struct work_struct *work)
+{
+ acpi_bus_generate_netlink_event("button/power.*", "Power Button", 0x80, 1);
+}
+
+static irqreturn_t pwr_mlxbf_irq(int irq, void *ptr)
+{
+ const char *rst_pwr_hid = "MLNXBF24";
+ const char *low_pwr_hid = "MLNXBF29";
+ struct pwr_mlxbf *priv = ptr;
+
+ if (!strncmp(priv->hid, rst_pwr_hid, 8))
+ emergency_restart();
+
+ if (!strncmp(priv->hid, low_pwr_hid, 8))
+ schedule_work(&priv->send_work);
+
+ return IRQ_HANDLED;
+}
+
+static int pwr_mlxbf_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev;
+ struct pwr_mlxbf *priv;
+ const char *hid;
+ int irq, err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENXIO;
+
+ hid = acpi_device_hid(adev);
+ priv->hid = hid;
+
+ irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Error getting %s irq.\n", priv->hid);
+
+ err = devm_work_autocancel(dev, &priv->send_work, pwr_mlxbf_send_work);
+ if (err)
+ return err;
+
+ err = devm_request_irq(dev, irq, pwr_mlxbf_irq, 0, hid, priv);
+ if (err)
+ dev_err(dev, "Failed request of %s irq\n", priv->hid);
+
+ return err;
+}
+
+static const struct acpi_device_id __maybe_unused pwr_mlxbf_acpi_match[] = {
+ { "MLNXBF24", 0 },
+ { "MLNXBF29", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, pwr_mlxbf_acpi_match);
+
+static struct platform_driver pwr_mlxbf_driver = {
+ .driver = {
+ .name = "pwr_mlxbf",
+ .acpi_match_table = pwr_mlxbf_acpi_match,
+ },
+ .probe = pwr_mlxbf_probe,
+};
+
+module_platform_driver(pwr_mlxbf_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField power driver");
+MODULE_AUTHOR("Asmaa Mnebhi <asmaa@nvidia.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/power/supply/ab8500-chargalg.h b/drivers/power/supply/ab8500-chargalg.h
index f47a0061c36a..8534d067ba95 100644
--- a/drivers/power/supply/ab8500-chargalg.h
+++ b/drivers/power/supply/ab8500-chargalg.h
@@ -34,7 +34,6 @@ struct ux500_charger_ops {
* @max_out_volt_uv maximum output charger voltage in uV
* @max_out_curr_ua maximum output charger current in uA
* @enabled indicates if this charger is used or not
- * @external external charger unit (pm2xxx)
*/
struct ux500_charger {
struct power_supply *psy;
@@ -43,9 +42,6 @@ struct ux500_charger {
int max_out_curr_ua;
int wdt_refresh;
bool enabled;
- bool external;
};
-extern struct blocking_notifier_head charger_notifier_list;
-
#endif /* _AB8500_CHARGALG_H_ */
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index b7e842dff567..863fabe05bdc 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -697,7 +697,6 @@ static void ab8500_btemp_unbind(struct device *dev, struct device *master,
/* Delete the work queue */
destroy_workqueue(di->btemp_wq);
- flush_scheduled_work();
}
static const struct component_ops ab8500_btemp_component_ops = {
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index 431bbc352d1b..ae4be553f424 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -246,9 +246,6 @@ struct ab8500_chargalg {
struct kobject chargalg_kobject;
};
-/*External charger prepare notifier*/
-BLOCKING_NOTIFIER_HEAD(charger_notifier_list);
-
/* Main battery properties */
static enum power_supply_property ab8500_chargalg_props[] = {
POWER_SUPPLY_PROP_STATUS,
@@ -343,8 +340,7 @@ static int ab8500_chargalg_check_charger_enable(struct ab8500_chargalg *di)
return di->usb_chg->ops.check_enable(di->usb_chg,
bi->constant_charge_voltage_max_uv,
bi->constant_charge_current_max_ua);
- } else if ((di->chg_info.charger_type & AC_CHG) &&
- !(di->ac_chg->external)) {
+ } else if (di->chg_info.charger_type & AC_CHG) {
return di->ac_chg->ops.check_enable(di->ac_chg,
bi->constant_charge_voltage_max_uv,
bi->constant_charge_current_max_ua);
@@ -473,15 +469,6 @@ static int ab8500_chargalg_kick_watchdog(struct ab8500_chargalg *di)
/* Check if charger exists and kick watchdog if charging */
if (di->ac_chg && di->ac_chg->ops.kick_wd &&
di->chg_info.online_chg & AC_CHG) {
- /*
- * If AB charger watchdog expired, pm2xxx charging
- * gets disabled. To be safe, kick both AB charger watchdog
- * and pm2xxx watchdog.
- */
- if (di->ac_chg->external &&
- di->usb_chg && di->usb_chg->ops.kick_wd)
- di->usb_chg->ops.kick_wd(di->usb_chg);
-
return di->ac_chg->ops.kick_wd(di->ac_chg);
} else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
di->chg_info.online_chg & USB_CHG)
@@ -517,14 +504,6 @@ static int ab8500_chargalg_ac_en(struct ab8500_chargalg *di, int enable,
di->chg_info.ac_iset_ua = iset_ua;
di->chg_info.ac_vset_uv = vset_uv;
- /* Enable external charger */
- if (enable && di->ac_chg->external &&
- !ab8500_chargalg_ex_ac_enable_toggle) {
- blocking_notifier_call_chain(&charger_notifier_list,
- 0, di->dev);
- ab8500_chargalg_ex_ac_enable_toggle++;
- }
-
return di->ac_chg->ops.enable(di->ac_chg, enable, vset_uv, iset_ua);
}
@@ -1217,6 +1196,34 @@ static void ab8500_chargalg_external_power_changed(struct power_supply *psy)
}
/**
+ * ab8500_chargalg_time_to_restart() - time to restart CC/CV charging?
+ * @di: charging algorithm state
+ *
+ * This checks if the voltage or capacity of the battery has fallen so
+ * low that we need to restart the CC/CV charge cycle.
+ */
+static bool ab8500_chargalg_time_to_restart(struct ab8500_chargalg *di)
+{
+ struct power_supply_battery_info *bi = di->bm->bi;
+
+ /* Sanity check - these need to have some reasonable values */
+ if (!di->batt_data.volt_uv || !di->batt_data.percent)
+ return false;
+
+ /* Some batteries tell us at which voltage we should restart charging */
+ if (bi->charge_restart_voltage_uv > 0) {
+ if (di->batt_data.volt_uv <= bi->charge_restart_voltage_uv)
+ return true;
+ /* Else we restart as we reach a certain capacity */
+ } else {
+ if (di->batt_data.percent <= AB8500_RECHARGE_CAP)
+ return true;
+ }
+
+ return false;
+}
+
+/**
* ab8500_chargalg_algorithm() - Main function for the algorithm
* @di: pointer to the ab8500_chargalg structure
*
@@ -1459,7 +1466,7 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
fallthrough;
case STATE_WAIT_FOR_RECHARGE:
- if (di->batt_data.percent <= AB8500_RECHARGE_CAP)
+ if (ab8500_chargalg_time_to_restart(di))
ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
break;
@@ -1486,6 +1493,14 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
ab8500_chargalg_stop_maintenance_timer(di);
ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
}
+ /*
+ * This happens if the voltage drops too quickly during
+ * maintenance charging, especially in older batteries.
+ */
+ if (ab8500_chargalg_time_to_restart(di)) {
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ dev_info(di->dev, "restarted charging from maintenance state A - battery getting old?\n");
+ }
break;
case STATE_MAINTENANCE_B_INIT:
@@ -1510,6 +1525,14 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
ab8500_chargalg_stop_maintenance_timer(di);
ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
}
+ /*
+ * This happens if the voltage drops too quickly during
+ * maintenance charging, especially in older batteries.
+ */
+ if (ab8500_chargalg_time_to_restart(di)) {
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ dev_info(di->dev, "restarted charging from maintenance state B - battery getting old?\n");
+ }
break;
case STATE_TEMP_LOWHIGH_INIT:
@@ -1746,7 +1769,6 @@ static void ab8500_chargalg_unbind(struct device *dev, struct device *master,
/* Delete the work queue */
destroy_workqueue(di->chargalg_wq);
- flush_scheduled_work();
}
static const struct component_ops ab8500_chargalg_component_ops = {
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index d04d087caa50..c19c50442761 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -1716,29 +1716,6 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
return ret;
}
-static int ab8500_external_charger_prepare(struct notifier_block *charger_nb,
- unsigned long event, void *data)
-{
- int ret;
- struct device *dev = data;
- /*Toggle External charger control pin*/
- ret = abx500_set_register_interruptible(dev, AB8500_SYS_CTRL1_BLOCK,
- AB8500_SYS_CHARGER_CONTROL_REG,
- EXTERNAL_CHARGER_DISABLE_REG_VAL);
- if (ret < 0) {
- dev_err(dev, "write reg failed %d\n", ret);
- goto out;
- }
- ret = abx500_set_register_interruptible(dev, AB8500_SYS_CTRL1_BLOCK,
- AB8500_SYS_CHARGER_CONTROL_REG,
- EXTERNAL_CHARGER_ENABLE_REG_VAL);
- if (ret < 0)
- dev_err(dev, "Write reg failed %d\n", ret);
-
-out:
- return ret;
-}
-
/**
* ab8500_charger_usb_check_enable() - enable usb charging
* @charger: pointer to the ux500_charger structure
@@ -3316,10 +3293,6 @@ static int __maybe_unused ab8500_charger_suspend(struct device *dev)
return 0;
}
-static struct notifier_block charger_nb = {
- .notifier_call = ab8500_external_charger_prepare,
-};
-
static char *supply_interface[] = {
"ab8500_chargalg",
"ab8500_fg",
@@ -3378,6 +3351,7 @@ static int ab8500_charger_bind(struct device *dev)
ret = component_bind_all(dev, di);
if (ret) {
dev_err(dev, "can't bind component devices\n");
+ destroy_workqueue(di->charger_wq);
return ret;
}
@@ -3404,8 +3378,6 @@ static void ab8500_charger_unbind(struct device *dev)
/* Delete the work queue */
destroy_workqueue(di->charger_wq);
- flush_scheduled_work();
-
/* Unbind fg, btemp, algorithm */
component_unbind_all(dev, di);
}
@@ -3540,7 +3512,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
*/
if (!is_ab8505(di->parent))
di->ac_chg.enabled = true;
- di->ac_chg.external = false;
/* USB supply */
/* ux500_charger sub-class */
@@ -3553,7 +3524,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->usb_chg.max_out_curr_ua =
ab8500_charge_output_curr_map[ARRAY_SIZE(ab8500_charge_output_curr_map) - 1];
di->usb_chg.wdt_refresh = CHG_WD_INTERVAL;
- di->usb_chg.external = false;
di->usb_state.usb_current_ua = -1;
mutex_init(&di->charger_attached_mutex);
@@ -3677,17 +3647,11 @@ static int ab8500_charger_probe(struct platform_device *pdev)
goto remove_ab8500_bm;
}
- /* Notifier for external charger enabling */
- if (!di->ac_chg.enabled)
- blocking_notifier_chain_register(
- &charger_notifier_list, &charger_nb);
-
-
di->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(di->usb_phy)) {
dev_err(dev, "failed to get usb transceiver\n");
ret = -EINVAL;
- goto out_charger_notifier;
+ goto remove_ab8500_bm;
}
di->nb.notifier_call = ab8500_charger_usb_notifier_call;
ret = usb_register_notifier(di->usb_phy, &di->nb);
@@ -3696,7 +3660,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
goto put_usb_phy;
}
-
ret = component_master_add_with_match(&pdev->dev,
&ab8500_charger_comp_ops,
match);
@@ -3711,10 +3674,6 @@ free_notifier:
usb_unregister_notifier(di->usb_phy, &di->nb);
put_usb_phy:
usb_put_phy(di->usb_phy);
-out_charger_notifier:
- if (!di->ac_chg.enabled)
- blocking_notifier_chain_unregister(
- &charger_notifier_list, &charger_nb);
remove_ab8500_bm:
ab8500_bm_of_remove(di->usb_chg.psy, di->bm);
return ret;
@@ -3729,9 +3688,6 @@ static int ab8500_charger_remove(struct platform_device *pdev)
usb_unregister_notifier(di->usb_phy, &di->nb);
ab8500_bm_of_remove(di->usb_chg.psy, di->bm);
usb_put_phy(di->usb_phy);
- if (!di->ac_chg.enabled)
- blocking_notifier_chain_unregister(
- &charger_notifier_list, &charger_nb);
return 0;
}
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 4339fa9ff009..c6c9804280db 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -412,7 +412,7 @@ static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
* ab8500_fg_clear_cap_samples() - Clear average filter
* @di: pointer to the ab8500_fg structure
*
- * The capacity filter is is reset to zero.
+ * The capacity filter is reset to zero.
*/
static void ab8500_fg_clear_cap_samples(struct ab8500_fg *di)
{
@@ -3234,7 +3234,6 @@ static int ab8500_fg_remove(struct platform_device *pdev)
struct ab8500_fg *di = platform_get_drvdata(pdev);
destroy_workqueue(di->fg_wq);
- flush_scheduled_work();
component_del(&pdev->dev, &ab8500_fg_component_ops);
list_del(&di->node);
ab8500_fg_sysfs_exit(di);
diff --git a/drivers/power/supply/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c
index 96cb3290bcaa..ecba9ab86faf 100644
--- a/drivers/power/supply/bq24257_charger.c
+++ b/drivers/power/supply/bq24257_charger.c
@@ -287,7 +287,7 @@ static int bq24257_set_input_current_limit(struct bq24257_device *bq,
{
/*
* Address the case where the user manually sets an input current limit
- * while the charger auto-detection mechanism is is active. In this
+ * while the charger auto-detection mechanism is active. In this
* case we want to abort and go straight to the user-specified value.
*/
if (bq->iilimit_autoset_enable)
diff --git a/drivers/power/supply/cros_peripheral_charger.c b/drivers/power/supply/cros_peripheral_charger.c
index 9fe6d826148d..1379afd9698d 100644
--- a/drivers/power/supply/cros_peripheral_charger.c
+++ b/drivers/power/supply/cros_peripheral_charger.c
@@ -63,7 +63,7 @@ static int cros_pchg_ec_command(const struct charger_data *charger,
struct cros_ec_command *msg;
int ret;
- msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ msg = kzalloc(struct_size(msg, data, max(outsize, insize)), GFP_KERNEL);
if (!msg)
return -ENOMEM;
diff --git a/drivers/power/supply/goldfish_battery.c b/drivers/power/supply/goldfish_battery.c
index bf1754355c9f..a58d713d75ce 100644
--- a/drivers/power/supply/goldfish_battery.c
+++ b/drivers/power/supply/goldfish_battery.c
@@ -221,10 +221,8 @@ static int goldfish_battery_probe(struct platform_device *pdev)
}
data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq failed\n");
+ if (data->irq < 0)
return -ENODEV;
- }
ret = devm_request_irq(&pdev->dev, data->irq,
goldfish_battery_interrupt,
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 397e5a03b7d9..56c57529c228 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -376,7 +376,7 @@ static int lp8788_update_charger_params(struct platform_device *pdev,
return 0;
}
- /* settting charging parameters */
+ /* setting charging parameters */
for (i = 0; i < pdata->num_chg_params; i++) {
param = pdata->chg_params + i;
diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c
index 8b6c8cfa7503..4fed74511931 100644
--- a/drivers/power/supply/max77976_charger.c
+++ b/drivers/power/supply/max77976_charger.c
@@ -3,7 +3,7 @@
* max77976_charger.c - Driver for the Maxim MAX77976 battery charger
*
* Copyright (C) 2021 Luca Ceresoli
- * Author: Luca Ceresoli <luca@lucaceresoli.net>
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
*/
#include <linux/i2c.h>
@@ -504,6 +504,6 @@ static struct i2c_driver max77976_driver = {
};
module_i2c_driver(max77976_driver);
-MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
MODULE_DESCRIPTION("Maxim MAX77976 charger driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index e0476ec06601..a5da20ffd685 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -635,6 +635,7 @@ static int olpc_battery_probe(struct platform_device *pdev)
struct power_supply_config bat_psy_cfg = {};
struct power_supply_config ac_psy_cfg = {};
struct olpc_battery_data *data;
+ struct device_node *np;
uint8_t status;
uint8_t ecver;
int ret;
@@ -649,7 +650,9 @@ static int olpc_battery_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (of_find_compatible_node(NULL, NULL, "olpc,xo1.75-ec")) {
+ np = of_find_compatible_node(NULL, NULL, "olpc,xo1.75-ec");
+ if (np) {
+ of_node_put(np);
/* XO 1.75 */
data->new_proto = true;
data->little_endian = true;
diff --git a/drivers/power/supply/pm2301_charger.h b/drivers/power/supply/pm2301_charger.h
deleted file mode 100644
index 74397e377982..000000000000
--- a/drivers/power/supply/pm2301_charger.h
+++ /dev/null
@@ -1,492 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2012
- *
- * PM2301 power supply interface
- */
-
-#ifndef PM2301_CHARGER_H
-#define PM2301_CHARGER_H
-
-/* Watchdog timeout constant */
-#define WD_TIMER 0x30 /* 4min */
-#define WD_KICK_INTERVAL (30 * HZ)
-
-#define PM2XXX_NUM_INT_REG 0x6
-
-/* Constant voltage/current */
-#define PM2XXX_CONST_CURR 0x0
-#define PM2XXX_CONST_VOLT 0x1
-
-/* Lowest charger voltage is 3.39V -> 0x4E */
-#define LOW_VOLT_REG 0x4E
-
-#define PM2XXX_BATT_CTRL_REG1 0x00
-#define PM2XXX_BATT_CTRL_REG2 0x01
-#define PM2XXX_BATT_CTRL_REG3 0x02
-#define PM2XXX_BATT_CTRL_REG4 0x03
-#define PM2XXX_BATT_CTRL_REG5 0x04
-#define PM2XXX_BATT_CTRL_REG6 0x05
-#define PM2XXX_BATT_CTRL_REG7 0x06
-#define PM2XXX_BATT_CTRL_REG8 0x07
-#define PM2XXX_NTC_CTRL_REG1 0x08
-#define PM2XXX_NTC_CTRL_REG2 0x09
-#define PM2XXX_BATT_CTRL_REG9 0x0A
-#define PM2XXX_BATT_STAT_REG1 0x0B
-#define PM2XXX_INP_VOLT_VPWR2 0x11
-#define PM2XXX_INP_DROP_VPWR2 0x13
-#define PM2XXX_INP_VOLT_VPWR1 0x15
-#define PM2XXX_INP_DROP_VPWR1 0x17
-#define PM2XXX_INP_MODE_VPWR 0x18
-#define PM2XXX_BATT_WD_KICK 0x70
-#define PM2XXX_DEV_VER_STAT 0x0C
-#define PM2XXX_THERM_WARN_CTRL_REG 0x20
-#define PM2XXX_BATT_DISC_REG 0x21
-#define PM2XXX_BATT_LOW_LEV_COMP_REG 0x22
-#define PM2XXX_BATT_LOW_LEV_VAL_REG 0x23
-#define PM2XXX_I2C_PAD_CTRL_REG 0x24
-#define PM2XXX_SW_CTRL_REG 0x26
-#define PM2XXX_LED_CTRL_REG 0x28
-
-#define PM2XXX_REG_INT1 0x40
-#define PM2XXX_MASK_REG_INT1 0x50
-#define PM2XXX_SRCE_REG_INT1 0x60
-#define PM2XXX_REG_INT2 0x41
-#define PM2XXX_MASK_REG_INT2 0x51
-#define PM2XXX_SRCE_REG_INT2 0x61
-#define PM2XXX_REG_INT3 0x42
-#define PM2XXX_MASK_REG_INT3 0x52
-#define PM2XXX_SRCE_REG_INT3 0x62
-#define PM2XXX_REG_INT4 0x43
-#define PM2XXX_MASK_REG_INT4 0x53
-#define PM2XXX_SRCE_REG_INT4 0x63
-#define PM2XXX_REG_INT5 0x44
-#define PM2XXX_MASK_REG_INT5 0x54
-#define PM2XXX_SRCE_REG_INT5 0x64
-#define PM2XXX_REG_INT6 0x45
-#define PM2XXX_MASK_REG_INT6 0x55
-#define PM2XXX_SRCE_REG_INT6 0x65
-
-#define VPWR_OVV 0x0
-#define VSYSTEM_OVV 0x1
-
-/* control Reg 1 */
-#define PM2XXX_CH_RESUME_EN 0x1
-#define PM2XXX_CH_RESUME_DIS 0x0
-
-/* control Reg 2 */
-#define PM2XXX_CH_AUTO_RESUME_EN 0X2
-#define PM2XXX_CH_AUTO_RESUME_DIS 0X0
-#define PM2XXX_CHARGER_ENA 0x4
-#define PM2XXX_CHARGER_DIS 0x0
-
-/* control Reg 3 */
-#define PM2XXX_CH_WD_CC_PHASE_OFF 0x0
-#define PM2XXX_CH_WD_CC_PHASE_5MIN 0x1
-#define PM2XXX_CH_WD_CC_PHASE_10MIN 0x2
-#define PM2XXX_CH_WD_CC_PHASE_30MIN 0x3
-#define PM2XXX_CH_WD_CC_PHASE_60MIN 0x4
-#define PM2XXX_CH_WD_CC_PHASE_120MIN 0x5
-#define PM2XXX_CH_WD_CC_PHASE_240MIN 0x6
-#define PM2XXX_CH_WD_CC_PHASE_360MIN 0x7
-
-#define PM2XXX_CH_WD_CV_PHASE_OFF (0x0<<3)
-#define PM2XXX_CH_WD_CV_PHASE_5MIN (0x1<<3)
-#define PM2XXX_CH_WD_CV_PHASE_10MIN (0x2<<3)
-#define PM2XXX_CH_WD_CV_PHASE_30MIN (0x3<<3)
-#define PM2XXX_CH_WD_CV_PHASE_60MIN (0x4<<3)
-#define PM2XXX_CH_WD_CV_PHASE_120MIN (0x5<<3)
-#define PM2XXX_CH_WD_CV_PHASE_240MIN (0x6<<3)
-#define PM2XXX_CH_WD_CV_PHASE_360MIN (0x7<<3)
-
-/* control Reg 4 */
-#define PM2XXX_CH_WD_PRECH_PHASE_OFF 0x0
-#define PM2XXX_CH_WD_PRECH_PHASE_1MIN 0x1
-#define PM2XXX_CH_WD_PRECH_PHASE_5MIN 0x2
-#define PM2XXX_CH_WD_PRECH_PHASE_10MIN 0x3
-#define PM2XXX_CH_WD_PRECH_PHASE_30MIN 0x4
-#define PM2XXX_CH_WD_PRECH_PHASE_60MIN 0x5
-#define PM2XXX_CH_WD_PRECH_PHASE_120MIN 0x6
-#define PM2XXX_CH_WD_PRECH_PHASE_240MIN 0x7
-
-/* control Reg 5 */
-#define PM2XXX_CH_WD_AUTO_TIMEOUT_NONE 0x0
-#define PM2XXX_CH_WD_AUTO_TIMEOUT_20MIN 0x1
-
-/* control Reg 6 */
-#define PM2XXX_DIR_CH_CC_CURRENT_MASK 0x0F
-#define PM2XXX_DIR_CH_CC_CURRENT_200MA 0x0
-#define PM2XXX_DIR_CH_CC_CURRENT_400MA 0x2
-#define PM2XXX_DIR_CH_CC_CURRENT_600MA 0x3
-#define PM2XXX_DIR_CH_CC_CURRENT_800MA 0x4
-#define PM2XXX_DIR_CH_CC_CURRENT_1000MA 0x5
-#define PM2XXX_DIR_CH_CC_CURRENT_1200MA 0x6
-#define PM2XXX_DIR_CH_CC_CURRENT_1400MA 0x7
-#define PM2XXX_DIR_CH_CC_CURRENT_1600MA 0x8
-#define PM2XXX_DIR_CH_CC_CURRENT_1800MA 0x9
-#define PM2XXX_DIR_CH_CC_CURRENT_2000MA 0xA
-#define PM2XXX_DIR_CH_CC_CURRENT_2200MA 0xB
-#define PM2XXX_DIR_CH_CC_CURRENT_2400MA 0xC
-#define PM2XXX_DIR_CH_CC_CURRENT_2600MA 0xD
-#define PM2XXX_DIR_CH_CC_CURRENT_2800MA 0xE
-#define PM2XXX_DIR_CH_CC_CURRENT_3000MA 0xF
-
-#define PM2XXX_CH_PRECH_CURRENT_MASK 0x30
-#define PM2XXX_CH_PRECH_CURRENT_25MA (0x0<<4)
-#define PM2XXX_CH_PRECH_CURRENT_50MA (0x1<<4)
-#define PM2XXX_CH_PRECH_CURRENT_75MA (0x2<<4)
-#define PM2XXX_CH_PRECH_CURRENT_100MA (0x3<<4)
-
-#define PM2XXX_CH_EOC_CURRENT_MASK 0xC0
-#define PM2XXX_CH_EOC_CURRENT_100MA (0x0<<6)
-#define PM2XXX_CH_EOC_CURRENT_150MA (0x1<<6)
-#define PM2XXX_CH_EOC_CURRENT_300MA (0x2<<6)
-#define PM2XXX_CH_EOC_CURRENT_400MA (0x3<<6)
-
-/* control Reg 7 */
-#define PM2XXX_CH_PRECH_VOL_2_5 0x0
-#define PM2XXX_CH_PRECH_VOL_2_7 0x1
-#define PM2XXX_CH_PRECH_VOL_2_9 0x2
-#define PM2XXX_CH_PRECH_VOL_3_1 0x3
-
-#define PM2XXX_CH_VRESUME_VOL_3_2 (0x0<<2)
-#define PM2XXX_CH_VRESUME_VOL_3_4 (0x1<<2)
-#define PM2XXX_CH_VRESUME_VOL_3_6 (0x2<<2)
-#define PM2XXX_CH_VRESUME_VOL_3_8 (0x3<<2)
-
-/* control Reg 8 */
-#define PM2XXX_CH_VOLT_MASK 0x3F
-#define PM2XXX_CH_VOLT_3_5 0x0
-#define PM2XXX_CH_VOLT_3_5225 0x1
-#define PM2XXX_CH_VOLT_3_6 0x4
-#define PM2XXX_CH_VOLT_3_7 0x8
-#define PM2XXX_CH_VOLT_4_0 0x14
-#define PM2XXX_CH_VOLT_4_175 0x1B
-#define PM2XXX_CH_VOLT_4_2 0x1C
-#define PM2XXX_CH_VOLT_4_275 0x1F
-#define PM2XXX_CH_VOLT_4_3 0x20
-
-/*NTC control register 1*/
-#define PM2XXX_BTEMP_HIGH_TH_45 0x0
-#define PM2XXX_BTEMP_HIGH_TH_50 0x1
-#define PM2XXX_BTEMP_HIGH_TH_55 0x2
-#define PM2XXX_BTEMP_HIGH_TH_60 0x3
-#define PM2XXX_BTEMP_HIGH_TH_65 0x4
-
-#define PM2XXX_BTEMP_LOW_TH_N5 (0x0<<3)
-#define PM2XXX_BTEMP_LOW_TH_0 (0x1<<3)
-#define PM2XXX_BTEMP_LOW_TH_5 (0x2<<3)
-#define PM2XXX_BTEMP_LOW_TH_10 (0x3<<3)
-
-/*NTC control register 2*/
-#define PM2XXX_NTC_BETA_COEFF_3477 0x0
-#define PM2XXX_NTC_BETA_COEFF_3964 0x1
-
-#define PM2XXX_NTC_RES_10K (0x0<<2)
-#define PM2XXX_NTC_RES_47K (0x1<<2)
-#define PM2XXX_NTC_RES_100K (0x2<<2)
-#define PM2XXX_NTC_RES_NO_NTC (0x3<<2)
-
-/* control Reg 9 */
-#define PM2XXX_CH_CC_MODEDROP_EN 1
-#define PM2XXX_CH_CC_MODEDROP_DIS 0
-
-#define PM2XXX_CH_CC_REDUCED_CURRENT_100MA (0x0<<1)
-#define PM2XXX_CH_CC_REDUCED_CURRENT_200MA (0x1<<1)
-#define PM2XXX_CH_CC_REDUCED_CURRENT_400MA (0x2<<1)
-#define PM2XXX_CH_CC_REDUCED_CURRENT_IDENT (0x3<<1)
-
-#define PM2XXX_CHARCHING_INFO_DIS (0<<3)
-#define PM2XXX_CHARCHING_INFO_EN (1<<3)
-
-#define PM2XXX_CH_150MV_DROP_300MV (0<<4)
-#define PM2XXX_CH_150MV_DROP_150MV (1<<4)
-
-
-/* charger status register */
-#define PM2XXX_CHG_STATUS_OFF 0x0
-#define PM2XXX_CHG_STATUS_ON 0x1
-#define PM2XXX_CHG_STATUS_FULL 0x2
-#define PM2XXX_CHG_STATUS_ERR 0x3
-#define PM2XXX_CHG_STATUS_WAIT 0x4
-#define PM2XXX_CHG_STATUS_NOBAT 0x5
-
-/* Input charger voltage VPWR2 */
-#define PM2XXX_VPWR2_OVV_6_0 0x0
-#define PM2XXX_VPWR2_OVV_6_3 0x1
-#define PM2XXX_VPWR2_OVV_10 0x2
-#define PM2XXX_VPWR2_OVV_NONE 0x3
-
-/* Input charger drop VPWR2 */
-#define PM2XXX_VPWR2_HW_OPT_EN (0x1<<4)
-#define PM2XXX_VPWR2_HW_OPT_DIS (0x0<<4)
-
-#define PM2XXX_VPWR2_VALID_EN (0x1<<3)
-#define PM2XXX_VPWR2_VALID_DIS (0x0<<3)
-
-#define PM2XXX_VPWR2_DROP_EN (0x1<<2)
-#define PM2XXX_VPWR2_DROP_DIS (0x0<<2)
-
-/* Input charger voltage VPWR1 */
-#define PM2XXX_VPWR1_OVV_6_0 0x0
-#define PM2XXX_VPWR1_OVV_6_3 0x1
-#define PM2XXX_VPWR1_OVV_10 0x2
-#define PM2XXX_VPWR1_OVV_NONE 0x3
-
-/* Input charger drop VPWR1 */
-#define PM2XXX_VPWR1_HW_OPT_EN (0x1<<4)
-#define PM2XXX_VPWR1_HW_OPT_DIS (0x0<<4)
-
-#define PM2XXX_VPWR1_VALID_EN (0x1<<3)
-#define PM2XXX_VPWR1_VALID_DIS (0x0<<3)
-
-#define PM2XXX_VPWR1_DROP_EN (0x1<<2)
-#define PM2XXX_VPWR1_DROP_DIS (0x0<<2)
-
-/* Battery low level comparator control register */
-#define PM2XXX_VBAT_LOW_MONITORING_DIS 0x0
-#define PM2XXX_VBAT_LOW_MONITORING_ENA 0x1
-
-/* Battery low level value control register */
-#define PM2XXX_VBAT_LOW_LEVEL_2_3 0x0
-#define PM2XXX_VBAT_LOW_LEVEL_2_4 0x1
-#define PM2XXX_VBAT_LOW_LEVEL_2_5 0x2
-#define PM2XXX_VBAT_LOW_LEVEL_2_6 0x3
-#define PM2XXX_VBAT_LOW_LEVEL_2_7 0x4
-#define PM2XXX_VBAT_LOW_LEVEL_2_8 0x5
-#define PM2XXX_VBAT_LOW_LEVEL_2_9 0x6
-#define PM2XXX_VBAT_LOW_LEVEL_3_0 0x7
-#define PM2XXX_VBAT_LOW_LEVEL_3_1 0x8
-#define PM2XXX_VBAT_LOW_LEVEL_3_2 0x9
-#define PM2XXX_VBAT_LOW_LEVEL_3_3 0xA
-#define PM2XXX_VBAT_LOW_LEVEL_3_4 0xB
-#define PM2XXX_VBAT_LOW_LEVEL_3_5 0xC
-#define PM2XXX_VBAT_LOW_LEVEL_3_6 0xD
-#define PM2XXX_VBAT_LOW_LEVEL_3_7 0xE
-#define PM2XXX_VBAT_LOW_LEVEL_3_8 0xF
-#define PM2XXX_VBAT_LOW_LEVEL_3_9 0x10
-#define PM2XXX_VBAT_LOW_LEVEL_4_0 0x11
-#define PM2XXX_VBAT_LOW_LEVEL_4_1 0x12
-#define PM2XXX_VBAT_LOW_LEVEL_4_2 0x13
-
-/* SW CTRL */
-#define PM2XXX_SWCTRL_HW 0x0
-#define PM2XXX_SWCTRL_SW 0x1
-
-
-/* LED Driver Control */
-#define PM2XXX_LED_CURRENT_MASK 0x0C
-#define PM2XXX_LED_CURRENT_2_5MA (0X0<<2)
-#define PM2XXX_LED_CURRENT_1MA (0X1<<2)
-#define PM2XXX_LED_CURRENT_5MA (0X2<<2)
-#define PM2XXX_LED_CURRENT_10MA (0X3<<2)
-
-#define PM2XXX_LED_SELECT_MASK 0x02
-#define PM2XXX_LED_SELECT_EN (0X0<<1)
-#define PM2XXX_LED_SELECT_DIS (0X1<<1)
-
-#define PM2XXX_ANTI_OVERSHOOT_MASK 0x01
-#define PM2XXX_ANTI_OVERSHOOT_DIS 0X0
-#define PM2XXX_ANTI_OVERSHOOT_EN 0X1
-
-enum pm2xxx_reg_int1 {
- PM2XXX_INT1_ITVBATDISCONNECT = 0x02,
- PM2XXX_INT1_ITVBATLOWR = 0x04,
- PM2XXX_INT1_ITVBATLOWF = 0x08,
-};
-
-enum pm2xxx_mask_reg_int1 {
- PM2XXX_INT1_M_ITVBATDISCONNECT = 0x02,
- PM2XXX_INT1_M_ITVBATLOWR = 0x04,
- PM2XXX_INT1_M_ITVBATLOWF = 0x08,
-};
-
-enum pm2xxx_source_reg_int1 {
- PM2XXX_INT1_S_ITVBATDISCONNECT = 0x02,
- PM2XXX_INT1_S_ITVBATLOWR = 0x04,
- PM2XXX_INT1_S_ITVBATLOWF = 0x08,
-};
-
-enum pm2xxx_reg_int2 {
- PM2XXX_INT2_ITVPWR2PLUG = 0x01,
- PM2XXX_INT2_ITVPWR2UNPLUG = 0x02,
- PM2XXX_INT2_ITVPWR1PLUG = 0x04,
- PM2XXX_INT2_ITVPWR1UNPLUG = 0x08,
-};
-
-enum pm2xxx_mask_reg_int2 {
- PM2XXX_INT2_M_ITVPWR2PLUG = 0x01,
- PM2XXX_INT2_M_ITVPWR2UNPLUG = 0x02,
- PM2XXX_INT2_M_ITVPWR1PLUG = 0x04,
- PM2XXX_INT2_M_ITVPWR1UNPLUG = 0x08,
-};
-
-enum pm2xxx_source_reg_int2 {
- PM2XXX_INT2_S_ITVPWR2PLUG = 0x03,
- PM2XXX_INT2_S_ITVPWR1PLUG = 0x0c,
-};
-
-enum pm2xxx_reg_int3 {
- PM2XXX_INT3_ITCHPRECHARGEWD = 0x01,
- PM2XXX_INT3_ITCHCCWD = 0x02,
- PM2XXX_INT3_ITCHCVWD = 0x04,
- PM2XXX_INT3_ITAUTOTIMEOUTWD = 0x08,
-};
-
-enum pm2xxx_mask_reg_int3 {
- PM2XXX_INT3_M_ITCHPRECHARGEWD = 0x01,
- PM2XXX_INT3_M_ITCHCCWD = 0x02,
- PM2XXX_INT3_M_ITCHCVWD = 0x04,
- PM2XXX_INT3_M_ITAUTOTIMEOUTWD = 0x08,
-};
-
-enum pm2xxx_source_reg_int3 {
- PM2XXX_INT3_S_ITCHPRECHARGEWD = 0x01,
- PM2XXX_INT3_S_ITCHCCWD = 0x02,
- PM2XXX_INT3_S_ITCHCVWD = 0x04,
- PM2XXX_INT3_S_ITAUTOTIMEOUTWD = 0x08,
-};
-
-enum pm2xxx_reg_int4 {
- PM2XXX_INT4_ITBATTEMPCOLD = 0x01,
- PM2XXX_INT4_ITBATTEMPHOT = 0x02,
- PM2XXX_INT4_ITVPWR2OVV = 0x04,
- PM2XXX_INT4_ITVPWR1OVV = 0x08,
- PM2XXX_INT4_ITCHARGINGON = 0x10,
- PM2XXX_INT4_ITVRESUME = 0x20,
- PM2XXX_INT4_ITBATTFULL = 0x40,
- PM2XXX_INT4_ITCVPHASE = 0x80,
-};
-
-enum pm2xxx_mask_reg_int4 {
- PM2XXX_INT4_M_ITBATTEMPCOLD = 0x01,
- PM2XXX_INT4_M_ITBATTEMPHOT = 0x02,
- PM2XXX_INT4_M_ITVPWR2OVV = 0x04,
- PM2XXX_INT4_M_ITVPWR1OVV = 0x08,
- PM2XXX_INT4_M_ITCHARGINGON = 0x10,
- PM2XXX_INT4_M_ITVRESUME = 0x20,
- PM2XXX_INT4_M_ITBATTFULL = 0x40,
- PM2XXX_INT4_M_ITCVPHASE = 0x80,
-};
-
-enum pm2xxx_source_reg_int4 {
- PM2XXX_INT4_S_ITBATTEMPCOLD = 0x01,
- PM2XXX_INT4_S_ITBATTEMPHOT = 0x02,
- PM2XXX_INT4_S_ITVPWR2OVV = 0x04,
- PM2XXX_INT4_S_ITVPWR1OVV = 0x08,
- PM2XXX_INT4_S_ITCHARGINGON = 0x10,
- PM2XXX_INT4_S_ITVRESUME = 0x20,
- PM2XXX_INT4_S_ITBATTFULL = 0x40,
- PM2XXX_INT4_S_ITCVPHASE = 0x80,
-};
-
-enum pm2xxx_reg_int5 {
- PM2XXX_INT5_ITTHERMALSHUTDOWNRISE = 0x01,
- PM2XXX_INT5_ITTHERMALSHUTDOWNFALL = 0x02,
- PM2XXX_INT5_ITTHERMALWARNINGRISE = 0x04,
- PM2XXX_INT5_ITTHERMALWARNINGFALL = 0x08,
- PM2XXX_INT5_ITVSYSTEMOVV = 0x10,
-};
-
-enum pm2xxx_mask_reg_int5 {
- PM2XXX_INT5_M_ITTHERMALSHUTDOWNRISE = 0x01,
- PM2XXX_INT5_M_ITTHERMALSHUTDOWNFALL = 0x02,
- PM2XXX_INT5_M_ITTHERMALWARNINGRISE = 0x04,
- PM2XXX_INT5_M_ITTHERMALWARNINGFALL = 0x08,
- PM2XXX_INT5_M_ITVSYSTEMOVV = 0x10,
-};
-
-enum pm2xxx_source_reg_int5 {
- PM2XXX_INT5_S_ITTHERMALSHUTDOWNRISE = 0x01,
- PM2XXX_INT5_S_ITTHERMALSHUTDOWNFALL = 0x02,
- PM2XXX_INT5_S_ITTHERMALWARNINGRISE = 0x04,
- PM2XXX_INT5_S_ITTHERMALWARNINGFALL = 0x08,
- PM2XXX_INT5_S_ITVSYSTEMOVV = 0x10,
-};
-
-enum pm2xxx_reg_int6 {
- PM2XXX_INT6_ITVPWR2DROP = 0x01,
- PM2XXX_INT6_ITVPWR1DROP = 0x02,
- PM2XXX_INT6_ITVPWR2VALIDRISE = 0x04,
- PM2XXX_INT6_ITVPWR2VALIDFALL = 0x08,
- PM2XXX_INT6_ITVPWR1VALIDRISE = 0x10,
- PM2XXX_INT6_ITVPWR1VALIDFALL = 0x20,
-};
-
-enum pm2xxx_mask_reg_int6 {
- PM2XXX_INT6_M_ITVPWR2DROP = 0x01,
- PM2XXX_INT6_M_ITVPWR1DROP = 0x02,
- PM2XXX_INT6_M_ITVPWR2VALIDRISE = 0x04,
- PM2XXX_INT6_M_ITVPWR2VALIDFALL = 0x08,
- PM2XXX_INT6_M_ITVPWR1VALIDRISE = 0x10,
- PM2XXX_INT6_M_ITVPWR1VALIDFALL = 0x20,
-};
-
-enum pm2xxx_source_reg_int6 {
- PM2XXX_INT6_S_ITVPWR2DROP = 0x01,
- PM2XXX_INT6_S_ITVPWR1DROP = 0x02,
- PM2XXX_INT6_S_ITVPWR2VALIDRISE = 0x04,
- PM2XXX_INT6_S_ITVPWR2VALIDFALL = 0x08,
- PM2XXX_INT6_S_ITVPWR1VALIDRISE = 0x10,
- PM2XXX_INT6_S_ITVPWR1VALIDFALL = 0x20,
-};
-
-struct pm2xxx_charger_info {
- int charger_connected;
- int charger_online;
- int cv_active;
- bool wd_expired;
-};
-
-struct pm2xxx_charger_event_flags {
- bool mainextchnotok;
- bool main_thermal_prot;
- bool ovv;
- bool chgwdexp;
-};
-
-struct pm2xxx_interrupts {
- u8 reg[PM2XXX_NUM_INT_REG];
- int (*handler[PM2XXX_NUM_INT_REG])(void *, int);
-};
-
-struct pm2xxx_config {
- struct i2c_client *pm2xxx_i2c;
- struct i2c_device_id *pm2xxx_id;
-};
-
-struct pm2xxx_irq {
- char *name;
- irqreturn_t (*isr)(int irq, void *data);
-};
-
-struct pm2xxx_charger {
- struct device *dev;
- u8 chip_id;
- bool vddadc_en_ac;
- struct pm2xxx_config config;
- bool ac_conn;
- unsigned int gpio_irq;
- int vbat;
- int old_vbat;
- int failure_case;
- int failure_input_ovv;
- unsigned int lpn_pin;
- struct pm2xxx_interrupts *pm2_int;
- struct regulator *regu;
- struct pm2xxx_bm_data *bat;
- struct mutex lock;
- struct ab8500 *parent;
- struct pm2xxx_charger_info ac;
- struct pm2xxx_charger_platform_data *pdata;
- struct workqueue_struct *charger_wq;
- struct delayed_work check_vbat_work;
- struct work_struct ac_work;
- struct work_struct check_main_thermal_prot_work;
- struct delayed_work check_hw_failure_work;
- struct ux500_charger ac_chg;
- struct power_supply_desc ac_chg_desc;
- struct pm2xxx_charger_event_flags flags;
-};
-
-#endif /* PM2301_CHARGER_H */
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 470253c337c7..4b5fb172fa99 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -263,13 +263,13 @@ static int power_supply_check_supplies(struct power_supply *psy)
return 0;
/* All supplies found, allocate char ** array for filling */
- psy->supplied_from = devm_kzalloc(&psy->dev, sizeof(psy->supplied_from),
+ psy->supplied_from = devm_kzalloc(&psy->dev, sizeof(*psy->supplied_from),
GFP_KERNEL);
if (!psy->supplied_from)
return -ENOMEM;
*psy->supplied_from = devm_kcalloc(&psy->dev,
- cnt - 1, sizeof(char *),
+ cnt - 1, sizeof(**psy->supplied_from),
GFP_KERNEL);
if (!*psy->supplied_from)
return -ENOMEM;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7150b1d0159e..d3e8dc32832d 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2733,13 +2733,18 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
*/
static int _regulator_handle_consumer_enable(struct regulator *regulator)
{
+ int ret;
struct regulator_dev *rdev = regulator->rdev;
lockdep_assert_held_once(&rdev->mutex.base);
regulator->enable_count++;
- if (regulator->uA_load && regulator->enable_count == 1)
- return drms_uA_update(rdev);
+ if (regulator->uA_load && regulator->enable_count == 1) {
+ ret = drms_uA_update(rdev);
+ if (ret)
+ regulator->enable_count--;
+ return ret;
+ }
return 0;
}
@@ -4784,10 +4789,10 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
consumers[i].consumer = regulator_get(dev,
consumers[i].supply);
if (IS_ERR(consumers[i].consumer)) {
- consumers[i].consumer = NULL;
ret = dev_err_probe(dev, PTR_ERR(consumers[i].consumer),
"Failed to get supply '%s'",
consumers[i].supply);
+ consumers[i].consumer = NULL;
goto err;
}
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 6b617024a67d..d899d6e98fb8 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -766,7 +766,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
((pfuze_chip->chip_id == PFUZE3000) ? "3000" : "3001"))));
memcpy(pfuze_chip->regulator_descs, pfuze_chip->pfuze_regulators,
- sizeof(pfuze_chip->regulator_descs));
+ regulator_num * sizeof(struct pfuze_regulator));
ret = pfuze_parse_regulators_dt(pfuze_chip);
if (ret)
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 4a3352821b1d..38383e7de3c1 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -594,16 +594,17 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
node = of_parse_phandle(np, "memory-region", a);
/* Not map vdevbuffer, vdevring region */
- if (!strncmp(node->name, "vdev", strlen("vdev")))
+ if (!strncmp(node->name, "vdev", strlen("vdev"))) {
+ of_node_put(node);
continue;
+ }
err = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
if (err) {
dev_err(dev, "unable to resolve memory region\n");
return err;
}
- of_node_put(node);
-
if (b >= IMX_RPROC_MEM_MAX)
break;
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 54781f553f4e..594a9b43b7ae 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -410,10 +410,9 @@ static int keystone_rproc_probe(struct platform_device *pdev)
/* enable clock for accessing DSP internal memories */
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "failed to enable clock, status = %d\n", ret);
- pm_runtime_put_noidle(dev);
goto disable_rpm;
}
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index 47b2a40e1b4a..d421a2ccaa1e 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -401,6 +401,14 @@ static int mt8186_scp_before_load(struct mtk_scp *scp)
writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
+ /*
+ * Set I-cache and D-cache size before loading SCP FW.
+ * SCP SRAM logical address may change when cache size setting differs.
+ */
+ writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
+ scp->reg_base + MT8183_SCP_CACHE_CON);
+ writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+
return 0;
}
@@ -943,7 +951,19 @@ static const struct mtk_scp_of_data mt8186_of_data = {
.scp_da_to_va = mt8183_scp_da_to_va,
.host_to_scp_reg = MT8183_HOST_TO_SCP,
.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
- .ipi_buf_offset = 0x7bdb0,
+ .ipi_buf_offset = 0x3bdb0,
+};
+
+static const struct mtk_scp_of_data mt8188_of_data = {
+ .scp_clk_get = mt8195_scp_clk_get,
+ .scp_before_load = mt8192_scp_before_load,
+ .scp_irq_handler = mt8192_scp_irq_handler,
+ .scp_reset_assert = mt8192_scp_reset_assert,
+ .scp_reset_deassert = mt8192_scp_reset_deassert,
+ .scp_stop = mt8192_scp_stop,
+ .scp_da_to_va = mt8192_scp_da_to_va,
+ .host_to_scp_reg = MT8192_GIPC_IN_SET,
+ .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
};
static const struct mtk_scp_of_data mt8192_of_data = {
@@ -973,6 +993,7 @@ static const struct mtk_scp_of_data mt8195_of_data = {
static const struct of_device_id mtk_scp_of_match[] = {
{ .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
{ .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
+ { .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
{ .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
{ .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
{},
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 32a588fefbdc..430fab0266ed 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -243,7 +243,7 @@ static inline int omap_rproc_get_timer_irq(struct omap_rproc_timer *timer)
* omap_rproc_ack_timer_irq() - acknowledge a timer irq
* @timer: handle to a OMAP rproc timer
*
- * This function is used to clear the irq associated with a watchdog timer. The
+ * This function is used to clear the irq associated with a watchdog timer.
* The function is called by the OMAP remoteproc upon a watchdog event on the
* remote processor to clear the interrupt status of the watchdog timer.
*/
@@ -303,7 +303,7 @@ static irqreturn_t omap_rproc_watchdog_isr(int irq, void *data)
* @configure: boolean flag used to acquire and configure the timer handle
*
* This function is used primarily to enable the timers associated with
- * a remoteproc. The configure flag is provided to allow the driver to
+ * a remoteproc. The configure flag is provided to allow the driver
* to either acquire and start a timer (during device initialization) or
* to just start a timer (during a resume operation).
*
@@ -443,7 +443,7 @@ free_timers:
* @configure: boolean flag used to release the timer handle
*
* This function is used primarily to disable the timers associated with
- * a remoteproc. The configure flag is provided to allow the driver to
+ * a remoteproc. The configure flag is provided to allow the driver
* to either stop and release a timer (during device shutdown) or to just
* stop a timer (during a suspend operation).
*
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index 1777a01fa84e..128bf9912f2c 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -897,6 +897,7 @@ static const struct of_device_id pru_rproc_match[] = {
{ .compatible = "ti,j721e-pru", .data = &k3_pru_data },
{ .compatible = "ti,j721e-rtu", .data = &k3_rtu_data },
{ .compatible = "ti,j721e-tx-pru", .data = &k3_tx_pru_data },
+ { .compatible = "ti,am625-pru", .data = &k3_pru_data },
{},
};
MODULE_DEVICE_TABLE(of, pru_rproc_match);
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
index 4b91e3c9eafa..020349f8979d 100644
--- a/drivers/remoteproc/qcom_common.c
+++ b/drivers/remoteproc/qcom_common.c
@@ -50,7 +50,7 @@ struct minidump_region {
};
/**
- * struct minidump_subsystem_toc: Subsystem's SMEM Table of content
+ * struct minidump_subsystem - Subsystem's SMEM Table of content
* @status : Subsystem toc init status
* @enabled : if set to 1, this region would be copied during coredump
* @encryption_status: Encryption status for this subsystem
@@ -68,7 +68,7 @@ struct minidump_subsystem {
};
/**
- * struct minidump_global_toc: Global Table of Content
+ * struct minidump_global_toc - Global Table of Content
* @status : Global Minidump init status
* @md_revision : Minidump revision
* @enabled : Minidump enable status
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index 5280ec9b5449..497acfb33f8f 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -112,6 +112,7 @@ static irqreturn_t q6v5_wdog_interrupt(int irq, void *data)
else
dev_err(q6v5->dev, "watchdog without message\n");
+ q6v5->running = false;
rproc_report_crash(q6v5->rproc, RPROC_WATCHDOG);
return IRQ_HANDLED;
@@ -123,6 +124,9 @@ static irqreturn_t q6v5_fatal_interrupt(int irq, void *data)
size_t len;
char *msg;
+ if (!q6v5->running)
+ return IRQ_HANDLED;
+
msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len);
if (!IS_ERR(msg) && len > 0 && msg[0])
dev_err(q6v5->dev, "fatal error received: %s\n", msg);
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 2f3b9f54251e..4c9a1b99cd51 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -175,9 +175,8 @@ static int qcom_rproc_pds_enable(struct qcom_adsp *adsp, struct device **pds,
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
- ret = pm_runtime_get_sync(pds[i]);
+ ret = pm_runtime_resume_and_get(pds[i]);
if (ret < 0) {
- pm_runtime_put_noidle(pds[i]);
dev_pm_genpd_set_performance_state(pds[i], 0);
goto unroll_pd_votes;
}
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index af217de75e4d..fddb63cffee0 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/devcoredump.h>
+#include <linux/dma-map-ops.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -932,27 +933,52 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
const char *fw_name)
{
- unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
+ unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS | DMA_ATTR_NO_KERNEL_MAPPING;
+ unsigned long flags = VM_DMA_COHERENT | VM_FLUSH_RESET_PERMS;
+ struct page **pages;
+ struct page *page;
dma_addr_t phys;
void *metadata;
int mdata_perm;
int xferop_ret;
size_t size;
- void *ptr;
+ void *vaddr;
+ int count;
int ret;
+ int i;
metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
if (IS_ERR(metadata))
return PTR_ERR(metadata);
- ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
- if (!ptr) {
+ page = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
+ if (!page) {
kfree(metadata);
dev_err(qproc->dev, "failed to allocate mdt buffer\n");
return -ENOMEM;
}
- memcpy(ptr, metadata, size);
+ count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
+ ret = -ENOMEM;
+ goto free_dma_attrs;
+ }
+
+ for (i = 0; i < count; i++)
+ pages[i] = nth_page(page, i);
+
+ vaddr = vmap(pages, count, flags, pgprot_dmacoherent(PAGE_KERNEL));
+ kfree(pages);
+ if (!vaddr) {
+ dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", &phys, size);
+ ret = -EBUSY;
+ goto free_dma_attrs;
+ }
+
+ memcpy(vaddr, metadata, size);
+
+ vunmap(vaddr);
/* Hypervisor mapping to access metadata by modem */
mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
@@ -982,7 +1008,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
"mdt buffer not reclaimed system may become unstable\n");
free_dma_attrs:
- dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
+ dma_free_attrs(qproc->dev, size, page, phys, dma_attrs);
kfree(metadata);
return ret < 0 ? ret : 0;
@@ -1102,6 +1128,9 @@ static int q6v5_mba_load(struct q6v5 *qproc)
if (ret)
goto reclaim_mba;
+ if (qproc->has_mba_logs)
+ qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
+
ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "MBA boot timed out\n");
@@ -1594,11 +1623,19 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
return ret;
}
+static unsigned long q6v5_panic(struct rproc *rproc)
+{
+ struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+
+ return qcom_q6v5_panic(&qproc->q6v5);
+}
+
static const struct rproc_ops q6v5_ops = {
.start = q6v5_start,
.stop = q6v5_stop,
.parse_fw = qcom_q6v5_register_dump_segments,
.load = q6v5_load,
+ .panic = q6v5_panic,
};
static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
@@ -2188,6 +2225,11 @@ static const struct rproc_hexagon_res msm8996_mss = {
"mnoc_axi",
NULL
},
+ .proxy_pd_names = (char*[]){
+ "mx",
+ "cx",
+ NULL
+ },
.need_mem_protection = true,
.has_alt_reset = false,
.has_mba_logs = false,
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 6ae39c5653b1..6afd0941e552 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -29,6 +30,8 @@
#include "qcom_q6v5.h"
#include "remoteproc_internal.h"
+#define ADSP_DECRYPT_SHUTDOWN_DELAY_MS 100
+
struct adsp_data {
int crash_reason_smem;
const char *firmware_name;
@@ -36,6 +39,7 @@ struct adsp_data {
unsigned int minidump_id;
bool has_aggre2_clk;
bool auto_boot;
+ bool decrypt_shutdown;
char **proxy_pd_names;
@@ -65,6 +69,7 @@ struct qcom_adsp {
unsigned int minidump_id;
int crash_reason_smem;
bool has_aggre2_clk;
+ bool decrypt_shutdown;
const char *info_name;
struct completion start_done;
@@ -87,6 +92,9 @@ static void adsp_minidump(struct rproc *rproc)
{
struct qcom_adsp *adsp = rproc->priv;
+ if (rproc->dump_conf == RPROC_COREDUMP_DISABLED)
+ return;
+
qcom_minidump(rproc, adsp->minidump_id);
}
@@ -128,6 +136,19 @@ static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
}
}
+static int adsp_shutdown_poll_decrypt(struct qcom_adsp *adsp)
+{
+ unsigned int retry_num = 50;
+ int ret;
+
+ do {
+ msleep(ADSP_DECRYPT_SHUTDOWN_DELAY_MS);
+ ret = qcom_scm_pas_shutdown(adsp->pas_id);
+ } while (ret == -EINVAL && --retry_num);
+
+ return ret;
+}
+
static int adsp_unprepare(struct rproc *rproc)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
@@ -185,13 +206,17 @@ static int adsp_start(struct rproc *rproc)
if (ret)
goto disable_xo_clk;
- ret = regulator_enable(adsp->cx_supply);
- if (ret)
- goto disable_aggre2_clk;
+ if (adsp->cx_supply) {
+ ret = regulator_enable(adsp->cx_supply);
+ if (ret)
+ goto disable_aggre2_clk;
+ }
- ret = regulator_enable(adsp->px_supply);
- if (ret)
- goto disable_cx_supply;
+ if (adsp->px_supply) {
+ ret = regulator_enable(adsp->px_supply);
+ if (ret)
+ goto disable_cx_supply;
+ }
ret = qcom_scm_pas_auth_and_reset(adsp->pas_id);
if (ret) {
@@ -212,9 +237,11 @@ static int adsp_start(struct rproc *rproc)
return 0;
disable_px_supply:
- regulator_disable(adsp->px_supply);
+ if (adsp->px_supply)
+ regulator_disable(adsp->px_supply);
disable_cx_supply:
- regulator_disable(adsp->cx_supply);
+ if (adsp->cx_supply)
+ regulator_disable(adsp->cx_supply);
disable_aggre2_clk:
clk_disable_unprepare(adsp->aggre2_clk);
disable_xo_clk:
@@ -231,8 +258,10 @@ static void qcom_pas_handover(struct qcom_q6v5 *q6v5)
{
struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
- regulator_disable(adsp->px_supply);
- regulator_disable(adsp->cx_supply);
+ if (adsp->px_supply)
+ regulator_disable(adsp->px_supply);
+ if (adsp->cx_supply)
+ regulator_disable(adsp->cx_supply);
clk_disable_unprepare(adsp->aggre2_clk);
clk_disable_unprepare(adsp->xo);
adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
@@ -249,6 +278,9 @@ static int adsp_stop(struct rproc *rproc)
dev_err(adsp->dev, "timed out on wait\n");
ret = qcom_scm_pas_shutdown(adsp->pas_id);
+ if (ret && adsp->decrypt_shutdown)
+ ret = adsp_shutdown_poll_decrypt(adsp);
+
if (ret)
dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
@@ -268,6 +300,9 @@ static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iom
if (offset < 0 || offset + len > adsp->mem_size)
return NULL;
+ if (is_iomem)
+ *is_iomem = true;
+
return adsp->mem_region + offset;
}
@@ -326,14 +361,26 @@ static int adsp_init_clock(struct qcom_adsp *adsp)
static int adsp_init_regulator(struct qcom_adsp *adsp)
{
- adsp->cx_supply = devm_regulator_get(adsp->dev, "cx");
- if (IS_ERR(adsp->cx_supply))
- return PTR_ERR(adsp->cx_supply);
+ adsp->cx_supply = devm_regulator_get_optional(adsp->dev, "cx");
+ if (IS_ERR(adsp->cx_supply)) {
+ if (PTR_ERR(adsp->cx_supply) == -ENODEV)
+ adsp->cx_supply = NULL;
+ else
+ return PTR_ERR(adsp->cx_supply);
+ }
- regulator_set_load(adsp->cx_supply, 100000);
+ if (adsp->cx_supply)
+ regulator_set_load(adsp->cx_supply, 100000);
- adsp->px_supply = devm_regulator_get(adsp->dev, "px");
- return PTR_ERR_OR_ZERO(adsp->px_supply);
+ adsp->px_supply = devm_regulator_get_optional(adsp->dev, "px");
+ if (IS_ERR(adsp->px_supply)) {
+ if (PTR_ERR(adsp->px_supply) == -ENODEV)
+ adsp->px_supply = NULL;
+ else
+ return PTR_ERR(adsp->px_supply);
+ }
+
+ return 0;
}
static int adsp_pds_attach(struct device *dev, struct device **devs,
@@ -459,9 +506,12 @@ static int adsp_probe(struct platform_device *pdev)
adsp->pas_id = desc->pas_id;
adsp->has_aggre2_clk = desc->has_aggre2_clk;
adsp->info_name = desc->sysmon_name;
+ adsp->decrypt_shutdown = desc->decrypt_shutdown;
platform_set_drvdata(pdev, adsp);
- device_wakeup_enable(adsp->dev);
+ ret = device_init_wakeup(adsp->dev, true);
+ if (ret)
+ goto free_rproc;
ret = adsp_alloc_memory_region(adsp);
if (ret)
@@ -877,6 +927,25 @@ static const struct adsp_data sdx55_mpss_resource = {
.ssctl_id = 0x22,
};
+static const struct adsp_data sm8450_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .minidump_id = 3,
+ .has_aggre2_clk = false,
+ .auto_boot = false,
+ .decrypt_shutdown = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .load_state = "modem",
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+};
+
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
@@ -916,7 +985,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sm8450-adsp-pas", .data = &sm8350_adsp_resource},
{ .compatible = "qcom,sm8450-cdsp-pas", .data = &sm8350_cdsp_resource},
{ .compatible = "qcom,sm8450-slpi-pas", .data = &sm8350_slpi_resource},
- { .compatible = "qcom,sm8450-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sm8450-mpss-pas", .data = &sm8450_mpss_resource},
{ },
};
MODULE_DEVICE_TABLE(of, adsp_of_match);
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index 9fca81492863..57dde2a69b9d 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -41,6 +41,7 @@ struct qcom_sysmon {
struct completion comp;
struct completion ind_comp;
struct completion shutdown_comp;
+ struct completion ssctl_comp;
struct mutex lock;
bool ssr_ack;
@@ -445,6 +446,8 @@ static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
svc->priv = sysmon;
+ complete(&sysmon->ssctl_comp);
+
return 0;
}
@@ -501,6 +504,7 @@ static int sysmon_start(struct rproc_subdev *subdev)
.ssr_event = SSCTL_SSR_EVENT_AFTER_POWERUP
};
+ reinit_completion(&sysmon->ssctl_comp);
mutex_lock(&sysmon->state_lock);
sysmon->state = SSCTL_SSR_EVENT_AFTER_POWERUP;
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
@@ -508,10 +512,12 @@ static int sysmon_start(struct rproc_subdev *subdev)
mutex_lock(&sysmon_lock);
list_for_each_entry(target, &sysmon_list, node) {
- if (target == sysmon)
+ mutex_lock(&target->state_lock);
+ if (target == sysmon || target->state != SSCTL_SSR_EVENT_AFTER_POWERUP) {
+ mutex_unlock(&target->state_lock);
continue;
+ }
- mutex_lock(&target->state_lock);
event.subsys_name = target->name;
event.ssr_event = target->state;
@@ -545,6 +551,11 @@ static void sysmon_stop(struct rproc_subdev *subdev, bool crashed)
if (crashed)
return;
+ if (sysmon->ssctl_instance) {
+ if (!wait_for_completion_timeout(&sysmon->ssctl_comp, HZ / 2))
+ dev_err(sysmon->dev, "timeout waiting for ssctl service\n");
+ }
+
if (sysmon->ssctl_version)
sysmon->shutdown_acked = ssctl_request_shutdown(sysmon);
else if (sysmon->ept)
@@ -631,6 +642,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
init_completion(&sysmon->comp);
init_completion(&sysmon->ind_comp);
init_completion(&sysmon->shutdown_comp);
+ init_completion(&sysmon->ssctl_comp);
mutex_init(&sysmon->lock);
mutex_init(&sysmon->state_lock);
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 9a223d394087..68f37296b151 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -467,6 +467,7 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss,
irq_handler_t thread_fn)
{
int ret;
+ int irq_number;
ret = platform_get_irq_byname(pdev, name);
if (ret < 0 && optional) {
@@ -477,14 +478,19 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss,
return ret;
}
+ irq_number = ret;
+
ret = devm_request_threaded_irq(&pdev->dev, ret,
NULL, thread_fn,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"wcnss", wcnss);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+ return ret;
+ }
- return ret;
+ /* Return the IRQ number if the IRQ was successfully acquired */
+ return irq_number;
}
static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 02a04ab34a23..e5279ed9a8d7 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -59,6 +59,7 @@ static int rproc_release_carveout(struct rproc *rproc,
/* Unique indices for remoteproc devices */
static DEFINE_IDA(rproc_dev_index);
+static struct workqueue_struct *rproc_recovery_wq;
static const char * const rproc_crash_names[] = {
[RPROC_MMUFAULT] = "mmufault",
@@ -334,7 +335,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
size_t size;
/* actual size of vring (in bytes) */
- size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
+ size = PAGE_ALIGN(vring_size(rvring->num, rvring->align));
rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
@@ -401,7 +402,7 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
return -EINVAL;
}
- rvring->len = vring->num;
+ rvring->num = vring->num;
rvring->align = vring->align;
rvring->rvdev = rvdev;
@@ -461,6 +462,7 @@ static void rproc_rvdev_release(struct device *dev)
struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev);
of_reserved_mem_device_release(dev);
+ dma_release_coherent_memory(dev);
kfree(rvdev);
}
@@ -970,7 +972,7 @@ static int rproc_handle_carveout(struct rproc *rproc,
return 0;
}
- /* Register carveout in in list */
+ /* Register carveout in list */
carveout = rproc_mem_entry_init(dev, NULL, 0, rsc->len, rsc->da,
rproc_alloc_carveout,
rproc_release_carveout, rsc->name);
@@ -2434,7 +2436,7 @@ static void rproc_type_release(struct device *dev)
idr_destroy(&rproc->notifyids);
if (rproc->index >= 0)
- ida_simple_remove(&rproc_dev_index, rproc->index);
+ ida_free(&rproc_dev_index, rproc->index);
kfree_const(rproc->firmware);
kfree_const(rproc->name);
@@ -2551,9 +2553,9 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
goto put_device;
/* Assign a unique device index and name */
- rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL);
+ rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
if (rproc->index < 0) {
- dev_err(dev, "ida_simple_get failed: %d\n", rproc->index);
+ dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
goto put_device;
}
@@ -2762,8 +2764,7 @@ void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
dev_err(&rproc->dev, "crash detected in %s: type %s\n",
rproc->name, rproc_crash_to_string(type));
- /* Have a worker handle the error; ensure system is not suspended */
- queue_work(system_freezable_wq, &rproc->crash_handler);
+ queue_work(rproc_recovery_wq, &rproc->crash_handler);
}
EXPORT_SYMBOL(rproc_report_crash);
@@ -2812,6 +2813,13 @@ static void __exit rproc_exit_panic(void)
static int __init remoteproc_init(void)
{
+ rproc_recovery_wq = alloc_workqueue("rproc_recovery_wq",
+ WQ_UNBOUND | WQ_FREEZABLE, 0);
+ if (!rproc_recovery_wq) {
+ pr_err("remoteproc: creation of rproc_recovery_wq failed\n");
+ return -ENOMEM;
+ }
+
rproc_init_sysfs();
rproc_init_debugfs();
rproc_init_cdev();
@@ -2825,9 +2833,13 @@ static void __exit remoteproc_exit(void)
{
ida_destroy(&rproc_dev_index);
+ if (!rproc_recovery_wq)
+ return;
+
rproc_exit_panic();
rproc_exit_debugfs();
rproc_exit_sysfs();
+ destroy_workqueue(rproc_recovery_wq);
}
module_exit(remoteproc_exit);
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 70ab496d0431..0f7706e23eb9 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -87,7 +87,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
struct fw_rsc_vdev *rsc;
struct virtqueue *vq;
void *addr;
- int len, size;
+ int num, size;
/* we're temporarily limited to two virtqueues per rvdev */
if (id >= ARRAY_SIZE(rvdev->vring))
@@ -104,20 +104,20 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
rvring = &rvdev->vring[id];
addr = mem->va;
- len = rvring->len;
+ num = rvring->num;
/* zero vring */
- size = vring_size(len, rvring->align);
+ size = vring_size(num, rvring->align);
memset(addr, 0, size);
dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
- id, addr, len, rvring->notifyid);
+ id, addr, num, rvring->notifyid);
/*
* Create the new vq, and tell virtio we're not interested in
* the 'weak' smp barriers, since we're talking with a real device.
*/
- vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx,
+ vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
addr, rproc_virtio_notify, callback, name);
if (!vq) {
dev_err(dev, "vring_new_virtqueue %s failed\n", name);
@@ -125,6 +125,8 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
return ERR_PTR(-ENOMEM);
}
+ vq->num_max = num;
+
rvring->vq = vq;
vq->priv = rvring;
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 4840ad906018..0481926c6975 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -1655,6 +1655,7 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev)
if (!cpdev) {
ret = -ENODEV;
dev_err(dev, "could not get R5 core platform device\n");
+ of_node_put(child);
goto fail;
}
@@ -1663,6 +1664,7 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev)
dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
ret);
put_device(&cpdev->dev);
+ of_node_put(child);
goto fail;
}
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 48d94649ea82..806773e88832 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -17,7 +17,7 @@ if RESET_CONTROLLER
config RESET_A10SR
tristate "Altera Arria10 System Resource Reset"
- depends on MFD_ALTERA_A10SR
+ depends on MFD_ALTERA_A10SR || COMPILE_TEST
help
This option enables support for the external reset functions for
peripheral PHYs on the Altera Arria10 System Resource Chip.
@@ -200,8 +200,9 @@ config RESET_SCMI
firmware controlling all the reset signals.
config RESET_SIMPLE
- bool "Simple Reset Controller Driver" if COMPILE_TEST
+ bool "Simple Reset Controller Driver" if COMPILE_TEST || EXPERT
default ARCH_ASPEED || ARCH_BCM4908 || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
+ depends on HAS_IOMEM
help
This enables a simple reset controller driver for reset lines that
that can be asserted and deasserted by toggling bits in a contiguous,
@@ -265,6 +266,14 @@ config RESET_TI_SYSCON
you wish to use the reset framework for such memory-mapped devices,
say Y here. Otherwise, say N.
+config RESET_TI_TPS380X
+ tristate "TI TPS380x Reset Driver"
+ select GPIOLIB
+ help
+ This enables the reset driver support for TI TPS380x devices. If
+ you wish to use the reset framework for such devices, say Y here.
+ Otherwise, say N.
+
config RESET_TN48M_CPLD
tristate "Delta Networks TN48M switch CPLD reset controller"
depends on MFD_TN48M_CPLD || COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 3ff378f43348..cd5cf8e7c6a7 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_RESET_SUNPLUS) += reset-sunplus.o
obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
+obj-$(CONFIG_RESET_TI_TPS380X) += reset-tps380x.o
obj-$(CONFIG_RESET_TN48M_CPLD) += reset-tn48m.o
obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
diff --git a/drivers/reset/reset-tps380x.c b/drivers/reset/reset-tps380x.c
new file mode 100644
index 000000000000..09d511f069ba
--- /dev/null
+++ b/drivers/reset/reset-tps380x.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * TI TPS380x Supply Voltage Supervisor and Reset Controller Driver
+ *
+ * Copyright (C) 2022 Pengutronix, Marco Felsch <kernel@pengutronix.de>
+ *
+ * Based on Simple Reset Controller Driver
+ *
+ * Copyright (C) 2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/reset-controller.h>
+
+struct tps380x_reset {
+ struct reset_controller_dev rcdev;
+ struct gpio_desc *reset_gpio;
+ unsigned int reset_ms;
+};
+
+struct tps380x_reset_devdata {
+ unsigned int min_reset_ms;
+ unsigned int typ_reset_ms;
+ unsigned int max_reset_ms;
+};
+
+static inline
+struct tps380x_reset *to_tps380x_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct tps380x_reset, rcdev);
+}
+
+static int
+tps380x_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct tps380x_reset *tps380x = to_tps380x_reset(rcdev);
+
+ gpiod_set_value_cansleep(tps380x->reset_gpio, 1);
+
+ return 0;
+}
+
+static int
+tps380x_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct tps380x_reset *tps380x = to_tps380x_reset(rcdev);
+
+ gpiod_set_value_cansleep(tps380x->reset_gpio, 0);
+ msleep(tps380x->reset_ms);
+
+ return 0;
+}
+
+static const struct reset_control_ops reset_tps380x_ops = {
+ .assert = tps380x_reset_assert,
+ .deassert = tps380x_reset_deassert,
+};
+
+static int tps380x_reset_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ /* No special handling needed, we have only one reset line per device */
+ return 0;
+}
+
+static int tps380x_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct tps380x_reset_devdata *devdata;
+ struct tps380x_reset *tps380x;
+
+ devdata = device_get_match_data(dev);
+ if (!devdata)
+ return -EINVAL;
+
+ tps380x = devm_kzalloc(dev, sizeof(*tps380x), GFP_KERNEL);
+ if (!tps380x)
+ return -ENOMEM;
+
+ tps380x->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(tps380x->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(tps380x->reset_gpio),
+ "Failed to get GPIO\n");
+
+ tps380x->reset_ms = devdata->max_reset_ms;
+
+ tps380x->rcdev.ops = &reset_tps380x_ops;
+ tps380x->rcdev.owner = THIS_MODULE;
+ tps380x->rcdev.dev = dev;
+ tps380x->rcdev.of_node = dev->of_node;
+ tps380x->rcdev.of_reset_n_cells = 0;
+ tps380x->rcdev.of_xlate = tps380x_reset_of_xlate;
+ tps380x->rcdev.nr_resets = 1;
+
+ return devm_reset_controller_register(dev, &tps380x->rcdev);
+}
+
+static const struct tps380x_reset_devdata tps3801_reset_data = {
+ .min_reset_ms = 120,
+ .typ_reset_ms = 200,
+ .max_reset_ms = 280,
+};
+
+static const struct of_device_id tps380x_reset_dt_ids[] = {
+ { .compatible = "ti,tps3801", .data = &tps3801_reset_data },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, tps380x_reset_dt_ids);
+
+static struct platform_driver tps380x_reset_driver = {
+ .probe = tps380x_reset_probe,
+ .driver = {
+ .name = "tps380x-reset",
+ .of_match_table = tps380x_reset_dt_ids,
+ },
+};
+module_platform_driver(tps380x_reset_driver);
+
+MODULE_AUTHOR("Marco Felsch <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("TI TPS380x Supply Voltage Supervisor and Reset Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
index 5b4404b8be4c..d1213c33da20 100644
--- a/drivers/rpmsg/mtk_rpmsg.c
+++ b/drivers/rpmsg/mtk_rpmsg.c
@@ -234,7 +234,9 @@ static void mtk_register_device_work_function(struct work_struct *register_work)
if (info->registered)
continue;
+ mutex_unlock(&subdev->channels_lock);
ret = mtk_rpmsg_register_device(subdev, &info->info);
+ mutex_lock(&subdev->channels_lock);
if (ret) {
dev_err(&pdev->dev, "Can't create rpmsg_device\n");
continue;
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 07586514991f..115c0a1eddb1 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -98,8 +98,6 @@ struct glink_core_rx_intent {
struct qcom_glink {
struct device *dev;
- const char *name;
-
struct mbox_client mbox_client;
struct mbox_chan *mbox_chan;
@@ -1546,7 +1544,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
cancel_work_sync(&channel->intent_work);
if (channel->rpdev) {
- strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
@@ -1674,7 +1672,7 @@ static ssize_t rpmsg_name_show(struct device *dev,
if (ret < 0)
name = dev->of_node->name;
- return snprintf(buf, RPMSG_NAME_SIZE, "%s\n", name);
+ return sysfs_emit(buf, "%s\n", name);
}
static DEVICE_ATTR_RO(rpmsg_name);
@@ -1755,10 +1753,6 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
if (ret)
dev_err(dev, "failed to add groups\n");
- ret = of_property_read_string(dev->of_node, "label", &glink->name);
- if (ret < 0)
- glink->name = dev->of_node->name;
-
glink->mbox_client.dev = dev;
glink->mbox_client.knows_txdone = true;
glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
diff --git a/drivers/rpmsg/qcom_glink_ssr.c b/drivers/rpmsg/qcom_glink_ssr.c
index dea929c6045d..776d64446879 100644
--- a/drivers/rpmsg/qcom_glink_ssr.c
+++ b/drivers/rpmsg/qcom_glink_ssr.c
@@ -39,7 +39,7 @@ struct cleanup_done_msg {
__le32 seq_num;
};
-/**
+/*
* G-Link SSR protocol commands
*/
#define GLINK_SSR_DO_CLEANUP 0
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 1957b27c4cf3..1044cf03c542 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -729,11 +729,11 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
}
/**
- * qcom_smd_send - write data to smd channel
+ * __qcom_smd_send - write data to smd channel
* @channel: channel handle
* @data: buffer of data to write
* @len: number of bytes to write
- * @wait: flag to indicate if write has ca wait
+ * @wait: flag to indicate if write can wait
*
* This is a blocking write of len bytes into the channel's tx ring buffer and
* signal the remote end. It will sleep until there is enough space available
@@ -1089,7 +1089,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
/* Assign public information to the rpmsg_device */
rpdev = &qsdev->rpdev;
- strncpy(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
+ strscpy_pad(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
rpdev->src = RPMSG_ADDR_ANY;
rpdev->dst = RPMSG_ADDR_ANY;
@@ -1323,7 +1323,7 @@ static void qcom_channel_state_worker(struct work_struct *work)
spin_unlock_irqrestore(&edge->channels_lock, flags);
- strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
rpmsg_unregister_device(&edge->dev, &chinfo);
@@ -1383,6 +1383,7 @@ static int qcom_smd_parse_edge(struct device *dev,
}
edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
+ of_node_put(syscon_np);
if (IS_ERR(edge->ipc_regmap)) {
ret = PTR_ERR(edge->ipc_regmap);
goto put_node;
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index b6183d4f62a2..4f2189111494 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -120,8 +120,11 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
struct rpmsg_device *rpdev = eptdev->rpdev;
struct device *dev = &eptdev->dev;
- if (eptdev->ept)
+ mutex_lock(&eptdev->ept_lock);
+ if (eptdev->ept) {
+ mutex_unlock(&eptdev->ept_lock);
return -EBUSY;
+ }
get_device(dev);
@@ -137,11 +140,13 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
if (!ept) {
dev_err(dev, "failed to open %s\n", eptdev->chinfo.name);
put_device(dev);
+ mutex_unlock(&eptdev->ept_lock);
return -EINVAL;
}
eptdev->ept = ept;
filp->private_data = eptdev;
+ mutex_unlock(&eptdev->ept_lock);
return 0;
}
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 290c1f02da10..d6dde00efdae 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -604,7 +604,7 @@ int rpmsg_register_device_override(struct rpmsg_device *rpdev,
int ret;
if (driver_override)
- strcpy(rpdev->id.name, driver_override);
+ strscpy_pad(rpdev->id.name, driver_override, RPMSG_NAME_SIZE);
dev_set_name(dev, "%s.%s.%d.%d", dev_name(dev->parent),
rpdev->id.name, rpdev->src, rpdev->dst);
@@ -618,6 +618,7 @@ int rpmsg_register_device_override(struct rpmsg_device *rpdev,
strlen(driver_override));
if (ret) {
dev_err(dev, "device_set_override failed: %d\n", ret);
+ put_device(dev);
return ret;
}
}
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index a22cd4abe7d1..39b646d0d40d 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -41,8 +41,8 @@ struct rpmsg_device_ops {
rpmsg_rx_cb_t cb, void *priv,
struct rpmsg_channel_info chinfo);
- int (*announce_create)(struct rpmsg_device *ept);
- int (*announce_destroy)(struct rpmsg_device *ept);
+ int (*announce_create)(struct rpmsg_device *rpdev);
+ int (*announce_destroy)(struct rpmsg_device *rpdev);
};
/**
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index a00f901b5c1d..b8de25118ad0 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -383,6 +383,16 @@ config RTC_DRV_MAX77686
This driver can also be built as a module. If so, the module
will be called rtc-max77686.
+config RTC_DRV_NCT3018Y
+ tristate "Nuvoton NCT3018Y"
+ depends on OF
+ help
+ If you say yes here you get support for the Nuvoton NCT3018Y I2C RTC
+ chip.
+
+ This driver can also be built as a module, if so, the module will be
+ called "rtc-nct3018y".
+
config RTC_DRV_RK808
tristate "Rockchip RK805/RK808/RK809/RK817/RK818 RTC"
depends on MFD_RK808
@@ -1478,16 +1488,6 @@ config RTC_DRV_SUNPLUS
This driver can also be built as a module. If so, the module
will be called rtc-sunplus.
-config RTC_DRV_VR41XX
- tristate "NEC VR41XX"
- depends on CPU_VR41XX || COMPILE_TEST
- help
- If you say Y here you will get access to the real time clock
- built into your NEC VR41XX CPU.
-
- To compile this driver as a module, choose M here: the
- module will be called rtc-vr41xx.
-
config RTC_DRV_PL030
tristate "ARM AMBA PL030 RTC"
depends on ARM_AMBA
@@ -1929,6 +1929,17 @@ config RTC_DRV_ASPEED
This driver can also be built as a module, if so, the module
will be called "rtc-aspeed".
+config RTC_DRV_TI_K3
+ tristate "TI K3 RTC"
+ depends on ARCH_K3 || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ If you say yes here you get support for the Texas Instruments's
+ Real Time Clock for K3 architecture.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-ti-k3".
+
comment "HID Sensor RTC drivers"
config RTC_DRV_HID_SENSOR_TIME
@@ -1973,4 +1984,14 @@ config RTC_DRV_MSC313
This driver can also be built as a module, if so, the module
will be called "rtc-msc313".
+config RTC_DRV_POLARFIRE_SOC
+ tristate "Microchip PolarFire SoC built-in RTC"
+ depends on SOC_MICROCHIP_POLARFIRE
+ help
+ If you say yes here you will get support for the
+ built-in RTC on Polarfire SoC.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-mpfs".
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index fb04467b652d..aab22bc63432 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -112,6 +112,7 @@ obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MXC_V2) += rtc-mxc_v2.o
obj-$(CONFIG_RTC_DRV_GAMECUBE) += rtc-gamecube.o
+obj-$(CONFIG_RTC_DRV_NCT3018Y) += rtc-nct3018y.o
obj-$(CONFIG_RTC_DRV_NTXEC) += rtc-ntxec.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o
@@ -130,6 +131,7 @@ obj-$(CONFIG_RTC_DRV_PIC32) += rtc-pic32.o
obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o
obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
obj-$(CONFIG_RTC_DRV_PM8XXX) += rtc-pm8xxx.o
+obj-$(CONFIG_RTC_DRV_POLARFIRE_SOC) += rtc-mpfs.o
obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
obj-$(CONFIG_RTC_DRV_R7301) += rtc-r7301.o
@@ -172,11 +174,11 @@ obj-$(CONFIG_RTC_DRV_SUNPLUS) += rtc-sunplus.o
obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o
obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
+obj-$(CONFIG_RTC_DRV_TI_K3) += rtc-ti-k3.o
obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
obj-$(CONFIG_RTC_DRV_TPS65910) += rtc-tps65910.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
-obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
obj-$(CONFIG_RTC_DRV_VT8500) += rtc-vt8500.o
obj-$(CONFIG_RTC_DRV_WILCO_EC) += rtc-wilco-ec.o
obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 3c8eec2218df..e48223c00c67 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -36,7 +36,7 @@ static void rtc_device_release(struct device *dev)
cancel_work_sync(&rtc->irqwork);
- ida_simple_remove(&rtc_ida, rtc->id);
+ ida_free(&rtc_ida, rtc->id);
mutex_destroy(&rtc->ops_lock);
kfree(rtc);
}
@@ -262,7 +262,7 @@ static int rtc_device_get_id(struct device *dev)
}
if (id < 0)
- id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&rtc_ida, GFP_KERNEL);
return id;
}
@@ -368,7 +368,7 @@ struct rtc_device *devm_rtc_allocate_device(struct device *dev)
rtc = rtc_allocate_device();
if (!rtc) {
- ida_simple_remove(&rtc_ida, id);
+ ida_free(&rtc_ida, id);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 69325aeede1a..4aad9bb99868 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -96,7 +96,7 @@ static int clear_uie(struct rtc_device *rtc)
}
if (rtc->uie_task_active) {
spin_unlock_irq(&rtc->irq_lock);
- flush_scheduled_work();
+ flush_work(&rtc->uie_task);
spin_lock_irq(&rtc->irq_lock);
}
rtc->uie_irq_active = 0;
@@ -566,9 +566,3 @@ void __init rtc_dev_init(void)
if (err < 0)
pr_err("failed to allocate char dev region\n");
}
-
-void __exit rtc_dev_exit(void)
-{
- if (rtc_devt)
- unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
-}
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index 6e3e320dc727..f2b0971d2c65 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -817,8 +817,7 @@ static const struct regmap_config abb5zes3_rtc_regmap_config = {
.val_bits = 8,
};
-static int abb5zes3_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int abb5zes3_probe(struct i2c_client *client)
{
struct abb5zes3_rtc_data *data = NULL;
struct device *dev = &client->dev;
@@ -945,7 +944,7 @@ static struct i2c_driver abb5zes3_driver = {
.pm = &abb5zes3_rtc_pm_ops,
.of_match_table = of_match_ptr(abb5zes3_dt_match),
},
- .probe = abb5zes3_probe,
+ .probe_new = abb5zes3_probe,
.id_table = abb5zes3_id,
};
module_i2c_driver(abb5zes3_driver);
diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
index e188ab517f1e..2f8deb8c4cd3 100644
--- a/drivers/rtc/rtc-ab-eoz9.c
+++ b/drivers/rtc/rtc-ab-eoz9.c
@@ -495,8 +495,7 @@ static void abeoz9_hwmon_register(struct device *dev,
#endif
-static int abeoz9_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int abeoz9_probe(struct i2c_client *client)
{
struct abeoz9_rtc_data *data = NULL;
struct device *dev = &client->dev;
@@ -580,7 +579,7 @@ static struct i2c_driver abeoz9_driver = {
.name = "rtc-ab-eoz9",
.of_match_table = of_match_ptr(abeoz9_dt_match),
},
- .probe = abeoz9_probe,
+ .probe_new = abeoz9_probe,
.id_table = abeoz9_id,
};
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 2235c968842d..e0bbb11d912e 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -249,8 +249,7 @@ static void bq32k_sysfs_unregister(struct device *dev)
device_remove_file(dev, &dev_attr_trickle_charge_bypass);
}
-static int bq32k_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int bq32k_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct rtc_device *rtc;
@@ -322,7 +321,7 @@ static struct i2c_driver bq32k_driver = {
.name = "bq32k",
.of_match_table = of_match_ptr(bq32k_of_match),
},
- .probe = bq32k_probe,
+ .probe_new = bq32k_probe,
.remove = bq32k_remove,
.id_table = bq32k_id,
};
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 7c006c2b125f..bdb1df843c78 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1260,9 +1260,6 @@ static void use_acpi_alarm_quirks(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
- return;
-
if (!is_hpet_enabled())
return;
diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h
index 0abf98983e13..4b10a1b8f370 100644
--- a/drivers/rtc/rtc-core.h
+++ b/drivers/rtc/rtc-core.h
@@ -2,7 +2,6 @@
#ifdef CONFIG_RTC_INTF_DEV
extern void __init rtc_dev_init(void);
-extern void __exit rtc_dev_exit(void);
extern void rtc_dev_prepare(struct rtc_device *rtc);
#else
@@ -11,10 +10,6 @@ static inline void rtc_dev_init(void)
{
}
-static inline void rtc_dev_exit(void)
-{
-}
-
static inline void rtc_dev_prepare(struct rtc_device *rtc)
{
}
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index 70626793ca69..887f5193e253 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -375,10 +375,8 @@ static int cros_ec_rtc_remove(struct platform_device *pdev)
ret = blocking_notifier_chain_unregister(
&cros_ec_rtc->cros_ec->event_notifier,
&cros_ec_rtc->notifier);
- if (ret) {
+ if (ret)
dev_err(dev, "failed to unregister notifier\n");
- return ret;
- }
return 0;
}
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 8db5a631bca8..b19de5100b1a 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -467,8 +467,7 @@ static const struct watchdog_ops ds1374_wdt_ops = {
*
*****************************************************************************
*/
-static int ds1374_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds1374_probe(struct i2c_client *client)
{
struct ds1374 *ds1374;
int ret;
@@ -575,7 +574,7 @@ static struct i2c_driver ds1374_driver = {
.of_match_table = of_match_ptr(ds1374_of_match),
.pm = &ds1374_pm,
},
- .probe = ds1374_probe,
+ .probe_new = ds1374_probe,
.remove = ds1374_remove,
.id_table = ds1374_id,
};
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 4cd8efbef6cf..a3bb2cd9c881 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -106,8 +106,7 @@ static const struct rtc_class_ops ds1672_rtc_ops = {
.set_time = ds1672_set_time,
};
-static int ds1672_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds1672_probe(struct i2c_client *client)
{
int err = 0;
struct rtc_device *rtc;
@@ -150,7 +149,7 @@ static struct i2c_driver ds1672_driver = {
.name = "rtc-ds1672",
.of_match_table = of_match_ptr(ds1672_of_match),
},
- .probe = &ds1672_probe,
+ .probe_new = ds1672_probe,
.id_table = ds1672_id,
};
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 168bc27f1f5a..dd31a60c1fc6 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -566,8 +566,7 @@ static const struct dev_pm_ops ds3232_pm_ops = {
#if IS_ENABLED(CONFIG_I2C)
-static int ds3232_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds3232_i2c_probe(struct i2c_client *client)
{
struct regmap *regmap;
static const struct regmap_config config = {
@@ -604,7 +603,7 @@ static struct i2c_driver ds3232_driver = {
.of_match_table = of_match_ptr(ds3232_of_match),
.pm = &ds3232_pm_ops,
},
- .probe = ds3232_i2c_probe,
+ .probe_new = ds3232_i2c_probe,
.id_table = ds3232_id,
};
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
index 9f176bce48ba..53f9f9391a5f 100644
--- a/drivers/rtc/rtc-em3027.c
+++ b/drivers/rtc/rtc-em3027.c
@@ -111,8 +111,7 @@ static const struct rtc_class_ops em3027_rtc_ops = {
.set_time = em3027_set_time,
};
-static int em3027_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int em3027_probe(struct i2c_client *client)
{
struct rtc_device *rtc;
@@ -148,7 +147,7 @@ static struct i2c_driver em3027_driver = {
.name = "rtc-em3027",
.of_match_table = of_match_ptr(em3027_of_match),
},
- .probe = &em3027_probe,
+ .probe_new = em3027_probe,
.id_table = em3027_id,
};
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index 677ec2da13d8..f59bb81f23c0 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -340,8 +340,7 @@ static const struct rtc_class_ops fm3130_rtc_ops = {
static struct i2c_driver fm3130_driver;
-static int fm3130_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int fm3130_probe(struct i2c_client *client)
{
struct fm3130 *fm3130;
int err = -ENODEV;
@@ -518,7 +517,7 @@ static struct i2c_driver fm3130_driver = {
.driver = {
.name = "rtc-fm3130",
},
- .probe = fm3130_probe,
+ .probe_new = fm3130_probe,
.id_table = fm3130_id,
};
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 90e602e99d03..cc710d682121 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -495,8 +495,7 @@ static int hym8563_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(hym8563_pm_ops, hym8563_suspend, hym8563_resume);
-static int hym8563_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int hym8563_probe(struct i2c_client *client)
{
struct hym8563 *hym8563;
int ret;
@@ -572,7 +571,7 @@ static struct i2c_driver hym8563_driver = {
.pm = &hym8563_pm_ops,
.of_match_table = hym8563_dt_idtable,
},
- .probe = hym8563_probe,
+ .probe_new = hym8563_probe,
.id_table = hym8563_id,
};
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index 961bd5d1d109..79461ded1a48 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -232,8 +232,7 @@ static const struct rtc_class_ops isl12022_rtc_ops = {
.set_time = isl12022_rtc_set_time,
};
-static int isl12022_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int isl12022_probe(struct i2c_client *client)
{
struct isl12022 *isl12022;
@@ -275,7 +274,7 @@ static struct i2c_driver isl12022_driver = {
.of_match_table = of_match_ptr(isl12022_dt_match),
#endif
},
- .probe = isl12022_probe,
+ .probe_new = isl12022_probe,
.id_table = isl12022_id,
};
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 182dfa605515..f448a525333e 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -880,10 +880,14 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (rc)
return rc;
- if (client->irq > 0)
+ if (client->irq > 0) {
rc = isl1208_setup_irq(client, client->irq);
- if (rc)
- return rc;
+ if (rc)
+ return rc;
+
+ } else {
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, isl1208->rtc->features);
+ }
if (evdet_irq > 0 && evdet_irq != client->irq)
rc = isl1208_setup_irq(client, evdet_irq);
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index 4beadfa41644..0a33851cc51f 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -197,8 +197,7 @@ static const struct rtc_class_ops max6900_rtc_ops = {
.set_time = max6900_rtc_set_time,
};
-static int
-max6900_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int max6900_probe(struct i2c_client *client)
{
struct rtc_device *rtc;
@@ -225,7 +224,7 @@ static struct i2c_driver max6900_driver = {
.driver = {
.name = "rtc-max6900",
},
- .probe = max6900_probe,
+ .probe_new = max6900_probe,
.id_table = max6900_id,
};
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index 522449b25921..f1c09f1db044 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -21,13 +21,13 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
unsigned long flags;
unsigned char seconds;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 100; i++) {
spin_lock_irqsave(&rtc_lock, flags);
/*
* Check whether there is an update in progress during which the
* readout is unspecified. The maximum update time is ~2ms. Poll
- * every msec for completion.
+ * every 100 usec for completion.
*
* Store the second value before checking UIP so a long lasting
* NMI which happens to hit after the UIP check cannot make
@@ -37,7 +37,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
spin_unlock_irqrestore(&rtc_lock, flags);
- mdelay(1);
+ udelay(100);
continue;
}
@@ -56,7 +56,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
*/
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
spin_unlock_irqrestore(&rtc_lock, flags);
- mdelay(1);
+ udelay(100);
continue;
}
diff --git a/drivers/rtc/rtc-mpfs.c b/drivers/rtc/rtc-mpfs.c
new file mode 100644
index 000000000000..f14d1925e0c9
--- /dev/null
+++ b/drivers/rtc/rtc-mpfs.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip MPFS RTC driver
+ *
+ * Copyright (c) 2021-2022 Microchip Corporation. All rights reserved.
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ * & Conor Dooley <conor.dooley@microchip.com>
+ */
+#include "linux/bits.h"
+#include "linux/iopoll.h"
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/slab.h>
+#include <linux/rtc.h>
+
+#define CONTROL_REG 0x00
+#define MODE_REG 0x04
+#define PRESCALER_REG 0x08
+#define ALARM_LOWER_REG 0x0c
+#define ALARM_UPPER_REG 0x10
+#define COMPARE_LOWER_REG 0x14
+#define COMPARE_UPPER_REG 0x18
+#define DATETIME_LOWER_REG 0x20
+#define DATETIME_UPPER_REG 0x24
+
+#define CONTROL_RUNNING_BIT BIT(0)
+#define CONTROL_START_BIT BIT(0)
+#define CONTROL_STOP_BIT BIT(1)
+#define CONTROL_ALARM_ON_BIT BIT(2)
+#define CONTROL_ALARM_OFF_BIT BIT(3)
+#define CONTROL_RESET_BIT BIT(4)
+#define CONTROL_UPLOAD_BIT BIT(5)
+#define CONTROL_DOWNLOAD_BIT BIT(6)
+#define CONTROL_MATCH_BIT BIT(7)
+#define CONTROL_WAKEUP_CLR_BIT BIT(8)
+#define CONTROL_WAKEUP_SET_BIT BIT(9)
+#define CONTROL_UPDATED_BIT BIT(10)
+
+#define MODE_CLOCK_CALENDAR BIT(0)
+#define MODE_WAKE_EN BIT(1)
+#define MODE_WAKE_RESET BIT(2)
+#define MODE_WAKE_CONTINUE BIT(3)
+
+#define MAX_PRESCALER_COUNT GENMASK(25, 0)
+#define DATETIME_UPPER_MASK GENMASK(29, 0)
+#define ALARM_UPPER_MASK GENMASK(10, 0)
+
+#define UPLOAD_TIMEOUT_US 50
+
+struct mpfs_rtc_dev {
+ struct rtc_device *rtc;
+ void __iomem *base;
+};
+
+static void mpfs_rtc_start(struct mpfs_rtc_dev *rtcdev)
+{
+ u32 ctrl;
+
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl &= ~CONTROL_STOP_BIT;
+ ctrl |= CONTROL_START_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+}
+
+static void mpfs_rtc_clear_irq(struct mpfs_rtc_dev *rtcdev)
+{
+ u32 val = readl(rtcdev->base + CONTROL_REG);
+
+ val &= ~(CONTROL_ALARM_ON_BIT | CONTROL_STOP_BIT);
+ val |= CONTROL_ALARM_OFF_BIT;
+ writel(val, rtcdev->base + CONTROL_REG);
+ /*
+ * Ensure that the posted write to the CONTROL_REG register completed before
+ * returning from this function. Not doing this may result in the interrupt
+ * only being cleared some time after this function returns.
+ */
+ (void)readl(rtcdev->base + CONTROL_REG);
+}
+
+static int mpfs_rtc_readtime(struct device *dev, struct rtc_time *tm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u64 time;
+
+ time = readl(rtcdev->base + DATETIME_LOWER_REG);
+ time |= ((u64)readl(rtcdev->base + DATETIME_UPPER_REG) & DATETIME_UPPER_MASK) << 32;
+ rtc_time64_to_tm(time, tm);
+
+ return 0;
+}
+
+static int mpfs_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 ctrl, prog;
+ u64 time;
+ int ret;
+
+ time = rtc_tm_to_time64(tm);
+
+ writel((u32)time, rtcdev->base + DATETIME_LOWER_REG);
+ writel((u32)(time >> 32) & DATETIME_UPPER_MASK, rtcdev->base + DATETIME_UPPER_REG);
+
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl &= ~CONTROL_STOP_BIT;
+ ctrl |= CONTROL_UPLOAD_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+
+ ret = read_poll_timeout(readl, prog, prog & CONTROL_UPLOAD_BIT, 0, UPLOAD_TIMEOUT_US,
+ false, rtcdev->base + CONTROL_REG);
+ if (ret) {
+ dev_err(dev, "timed out uploading time to rtc");
+ return ret;
+ }
+ mpfs_rtc_start(rtcdev);
+
+ return 0;
+}
+
+static int mpfs_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 mode = readl(rtcdev->base + MODE_REG);
+ u64 time;
+
+ alrm->enabled = mode & MODE_WAKE_EN;
+
+ time = (u64)readl(rtcdev->base + ALARM_LOWER_REG) << 32;
+ time |= (readl(rtcdev->base + ALARM_UPPER_REG) & ALARM_UPPER_MASK);
+ rtc_time64_to_tm(time, &alrm->time);
+
+ return 0;
+}
+
+static int mpfs_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 mode, ctrl;
+ u64 time;
+
+ /* Disable the alarm before updating */
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl |= CONTROL_ALARM_OFF_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+
+ time = rtc_tm_to_time64(&alrm->time);
+
+ writel((u32)time, rtcdev->base + ALARM_LOWER_REG);
+ writel((u32)(time >> 32) & ALARM_UPPER_MASK, rtcdev->base + ALARM_UPPER_REG);
+
+ /* Bypass compare register in alarm mode */
+ writel(GENMASK(31, 0), rtcdev->base + COMPARE_LOWER_REG);
+ writel(GENMASK(29, 0), rtcdev->base + COMPARE_UPPER_REG);
+
+ /* Configure the RTC to enable the alarm. */
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ mode = readl(rtcdev->base + MODE_REG);
+ if (alrm->enabled) {
+ mode = MODE_WAKE_EN | MODE_WAKE_CONTINUE;
+ /* Enable the alarm */
+ ctrl &= ~CONTROL_ALARM_OFF_BIT;
+ ctrl |= CONTROL_ALARM_ON_BIT;
+ }
+ ctrl &= ~CONTROL_STOP_BIT;
+ ctrl |= CONTROL_START_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+ writel(mode, rtcdev->base + MODE_REG);
+
+ return 0;
+}
+
+static int mpfs_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl &= ~(CONTROL_ALARM_ON_BIT | CONTROL_ALARM_OFF_BIT | CONTROL_STOP_BIT);
+
+ if (enabled)
+ ctrl |= CONTROL_ALARM_ON_BIT;
+ else
+ ctrl |= CONTROL_ALARM_OFF_BIT;
+
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+
+ return 0;
+}
+
+static inline struct clk *mpfs_rtc_init_clk(struct device *dev)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, "rtc");
+ if (IS_ERR(clk))
+ return clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare, clk);
+ return clk;
+}
+
+static irqreturn_t mpfs_rtc_wakeup_irq_handler(int irq, void *dev)
+{
+ struct mpfs_rtc_dev *rtcdev = dev;
+
+ mpfs_rtc_clear_irq(rtcdev);
+
+ rtc_update_irq(rtcdev->rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops mpfs_rtc_ops = {
+ .read_time = mpfs_rtc_readtime,
+ .set_time = mpfs_rtc_settime,
+ .read_alarm = mpfs_rtc_readalarm,
+ .set_alarm = mpfs_rtc_setalarm,
+ .alarm_irq_enable = mpfs_rtc_alarm_irq_enable,
+};
+
+static int mpfs_rtc_probe(struct platform_device *pdev)
+{
+ struct mpfs_rtc_dev *rtcdev;
+ struct clk *clk;
+ u32 prescaler;
+ int wakeup_irq, ret;
+
+ rtcdev = devm_kzalloc(&pdev->dev, sizeof(struct mpfs_rtc_dev), GFP_KERNEL);
+ if (!rtcdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rtcdev);
+
+ rtcdev->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtcdev->rtc))
+ return PTR_ERR(rtcdev->rtc);
+
+ rtcdev->rtc->ops = &mpfs_rtc_ops;
+
+ /* range is capped by alarm max, lower reg is 31:0 & upper is 10:0 */
+ rtcdev->rtc->range_max = GENMASK_ULL(42, 0);
+
+ clk = mpfs_rtc_init_clk(&pdev->dev);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rtcdev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtcdev->base)) {
+ dev_dbg(&pdev->dev, "invalid ioremap resources\n");
+ return PTR_ERR(rtcdev->base);
+ }
+
+ wakeup_irq = platform_get_irq(pdev, 0);
+ if (wakeup_irq <= 0) {
+ dev_dbg(&pdev->dev, "could not get wakeup irq\n");
+ return wakeup_irq;
+ }
+ ret = devm_request_irq(&pdev->dev, wakeup_irq, mpfs_rtc_wakeup_irq_handler, 0,
+ dev_name(&pdev->dev), rtcdev);
+ if (ret) {
+ dev_dbg(&pdev->dev, "could not request wakeup irq\n");
+ return ret;
+ }
+
+ /* prescaler hardware adds 1 to reg value */
+ prescaler = clk_get_rate(devm_clk_get(&pdev->dev, "rtcref")) - 1;
+
+ if (prescaler > MAX_PRESCALER_COUNT) {
+ dev_dbg(&pdev->dev, "invalid prescaler %d\n", prescaler);
+ return -EINVAL;
+ }
+
+ writel(prescaler, rtcdev->base + PRESCALER_REG);
+ dev_info(&pdev->dev, "prescaler set to: 0x%X \r\n", prescaler);
+
+ device_init_wakeup(&pdev->dev, true);
+ ret = dev_pm_set_wake_irq(&pdev->dev, wakeup_irq);
+ if (ret)
+ dev_err(&pdev->dev, "failed to enable irq wake\n");
+
+ return devm_rtc_register_device(rtcdev->rtc);
+}
+
+static int mpfs_rtc_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id mpfs_rtc_of_match[] = {
+ { .compatible = "microchip,mpfs-rtc" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, mpfs_rtc_of_match);
+
+static struct platform_driver mpfs_rtc_driver = {
+ .probe = mpfs_rtc_probe,
+ .remove = mpfs_rtc_remove,
+ .driver = {
+ .name = "mpfs_rtc",
+ .of_match_table = mpfs_rtc_of_match,
+ },
+};
+
+module_platform_driver(mpfs_rtc_driver);
+
+MODULE_DESCRIPTION("Real time clock for Microchip Polarfire SoC");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
new file mode 100644
index 000000000000..d43acd3920ed
--- /dev/null
+++ b/drivers/rtc/rtc-nct3018y.c
@@ -0,0 +1,553 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2022 Nuvoton Technology Corporation
+
+#include <linux/bcd.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#define NCT3018Y_REG_SC 0x00 /* seconds */
+#define NCT3018Y_REG_SCA 0x01 /* alarm */
+#define NCT3018Y_REG_MN 0x02
+#define NCT3018Y_REG_MNA 0x03 /* alarm */
+#define NCT3018Y_REG_HR 0x04
+#define NCT3018Y_REG_HRA 0x05 /* alarm */
+#define NCT3018Y_REG_DW 0x06
+#define NCT3018Y_REG_DM 0x07
+#define NCT3018Y_REG_MO 0x08
+#define NCT3018Y_REG_YR 0x09
+#define NCT3018Y_REG_CTRL 0x0A /* timer control */
+#define NCT3018Y_REG_ST 0x0B /* status */
+#define NCT3018Y_REG_CLKO 0x0C /* clock out */
+
+#define NCT3018Y_BIT_AF BIT(7)
+#define NCT3018Y_BIT_ST BIT(7)
+#define NCT3018Y_BIT_DM BIT(6)
+#define NCT3018Y_BIT_HF BIT(5)
+#define NCT3018Y_BIT_DSM BIT(4)
+#define NCT3018Y_BIT_AIE BIT(3)
+#define NCT3018Y_BIT_OFIE BIT(2)
+#define NCT3018Y_BIT_CIE BIT(1)
+#define NCT3018Y_BIT_TWO BIT(0)
+
+#define NCT3018Y_REG_BAT_MASK 0x07
+#define NCT3018Y_REG_CLKO_F_MASK 0x03 /* frequenc mask */
+#define NCT3018Y_REG_CLKO_CKE 0x80 /* clock out enabled */
+
+struct nct3018y {
+ struct rtc_device *rtc;
+ struct i2c_client *client;
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw clkout_hw;
+#endif
+};
+
+static int nct3018y_set_alarm_mode(struct i2c_client *client, bool on)
+{
+ int err, flags;
+
+ dev_dbg(&client->dev, "%s:on:%d\n", __func__, on);
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
+ if (flags < 0) {
+ dev_dbg(&client->dev,
+ "Failed to read NCT3018Y_REG_CTRL\n");
+ return flags;
+ }
+
+ if (on)
+ flags |= NCT3018Y_BIT_AIE;
+ else
+ flags &= ~NCT3018Y_BIT_AIE;
+
+ flags |= NCT3018Y_BIT_CIE;
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL\n");
+ return err;
+ }
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
+ if (flags < 0) {
+ dev_dbg(&client->dev,
+ "Failed to read NCT3018Y_REG_ST\n");
+ return flags;
+ }
+
+ flags &= ~(NCT3018Y_BIT_AF);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_ST, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_ST\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *alarm_enable,
+ unsigned char *alarm_flag)
+{
+ int flags;
+
+ if (alarm_enable) {
+ dev_dbg(&client->dev, "%s:NCT3018Y_REG_CTRL\n", __func__);
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
+ if (flags < 0)
+ return flags;
+ *alarm_enable = flags & NCT3018Y_BIT_AIE;
+ }
+
+ if (alarm_flag) {
+ dev_dbg(&client->dev, "%s:NCT3018Y_REG_ST\n", __func__);
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
+ if (flags < 0)
+ return flags;
+ *alarm_flag = flags & NCT3018Y_BIT_AF;
+ }
+
+ dev_dbg(&client->dev, "%s:alarm_enable:%x alarm_flag:%x\n",
+ __func__, *alarm_enable, *alarm_flag);
+
+ return 0;
+}
+
+static irqreturn_t nct3018y_irq(int irq, void *dev_id)
+{
+ struct nct3018y *nct3018y = i2c_get_clientdata(dev_id);
+ struct i2c_client *client = nct3018y->client;
+ int err;
+ unsigned char alarm_flag;
+ unsigned char alarm_enable;
+
+ dev_dbg(&client->dev, "%s:irq:%d\n", __func__, irq);
+ err = nct3018y_get_alarm_mode(nct3018y->client, &alarm_enable, &alarm_flag);
+ if (err)
+ return IRQ_NONE;
+
+ if (alarm_flag) {
+ dev_dbg(&client->dev, "%s:alarm flag:%x\n",
+ __func__, alarm_flag);
+ rtc_update_irq(nct3018y->rtc, 1, RTC_IRQF | RTC_AF);
+ nct3018y_set_alarm_mode(nct3018y->client, 0);
+ dev_dbg(&client->dev, "%s:IRQ_HANDLED\n", __func__);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+/*
+ * In the routines that deal directly with the nct3018y hardware, we use
+ * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
+ */
+static int nct3018y_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[10];
+ int err;
+
+ err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_ST, 1, buf);
+ if (err < 0)
+ return err;
+
+ if (!buf[0]) {
+ dev_dbg(&client->dev, " voltage <=1.7, date/time is not reliable.\n");
+ return -EINVAL;
+ }
+
+ err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_SC, sizeof(buf), buf);
+ if (err < 0)
+ return err;
+
+ tm->tm_sec = bcd2bin(buf[0] & 0x7F);
+ tm->tm_min = bcd2bin(buf[2] & 0x7F);
+ tm->tm_hour = bcd2bin(buf[4] & 0x3F);
+ tm->tm_wday = buf[6] & 0x07;
+ tm->tm_mday = bcd2bin(buf[7] & 0x3F);
+ tm->tm_mon = bcd2bin(buf[8] & 0x1F) - 1;
+ tm->tm_year = bcd2bin(buf[9]) + 100;
+
+ return 0;
+}
+
+static int nct3018y_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[4] = {0};
+ int err;
+
+ buf[0] = bin2bcd(tm->tm_sec);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_SC, buf[0]);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_SC\n");
+ return err;
+ }
+
+ buf[0] = bin2bcd(tm->tm_min);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_MN, buf[0]);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_MN\n");
+ return err;
+ }
+
+ buf[0] = bin2bcd(tm->tm_hour);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_HR, buf[0]);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_HR\n");
+ return err;
+ }
+
+ buf[0] = tm->tm_wday & 0x07;
+ buf[1] = bin2bcd(tm->tm_mday);
+ buf[2] = bin2bcd(tm->tm_mon + 1);
+ buf[3] = bin2bcd(tm->tm_year - 100);
+ err = i2c_smbus_write_i2c_block_data(client, NCT3018Y_REG_DW,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write for day and mon and year\n");
+ return -EIO;
+ }
+
+ return err;
+}
+
+static int nct3018y_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[5];
+ int err;
+
+ err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_SCA,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to read date\n");
+ return -EIO;
+ }
+
+ dev_dbg(&client->dev, "%s: raw data is sec=%02x, min=%02x hr=%02x\n",
+ __func__, buf[0], buf[2], buf[4]);
+
+ tm->time.tm_sec = bcd2bin(buf[0] & 0x7F);
+ tm->time.tm_min = bcd2bin(buf[2] & 0x7F);
+ tm->time.tm_hour = bcd2bin(buf[4] & 0x3F);
+
+ err = nct3018y_get_alarm_mode(client, &tm->enabled, &tm->pending);
+ if (err < 0)
+ return err;
+
+ dev_dbg(&client->dev, "%s:s=%d m=%d, hr=%d, enabled=%d, pending=%d\n",
+ __func__, tm->time.tm_sec, tm->time.tm_min,
+ tm->time.tm_hour, tm->enabled, tm->pending);
+
+ return 0;
+}
+
+static int nct3018y_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int err;
+
+ dev_dbg(dev, "%s, sec=%d, min=%d hour=%d tm->enabled:%d\n",
+ __func__, tm->time.tm_sec, tm->time.tm_min, tm->time.tm_hour,
+ tm->enabled);
+
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_SCA, bin2bcd(tm->time.tm_sec));
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_SCA\n");
+ return err;
+ }
+
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_MNA, bin2bcd(tm->time.tm_min));
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_MNA\n");
+ return err;
+ }
+
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_HRA, bin2bcd(tm->time.tm_hour));
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_HRA\n");
+ return err;
+ }
+
+ return nct3018y_set_alarm_mode(client, tm->enabled);
+}
+
+static int nct3018y_irq_enable(struct device *dev, unsigned int enabled)
+{
+ dev_dbg(dev, "%s: alarm enable=%d\n", __func__, enabled);
+
+ return nct3018y_set_alarm_mode(to_i2c_client(dev), enabled);
+}
+
+static int nct3018y_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int status, flags = 0;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ status = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
+ if (status < 0)
+ return status;
+
+ if (!(status & NCT3018Y_REG_BAT_MASK))
+ flags |= RTC_VL_DATA_INVALID;
+
+ return put_user(flags, (unsigned int __user *)arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+#ifdef CONFIG_COMMON_CLK
+/*
+ * Handling of the clkout
+ */
+
+#define clkout_hw_to_nct3018y(_hw) container_of(_hw, struct nct3018y, clkout_hw)
+
+static const int clkout_rates[] = {
+ 32768,
+ 1024,
+ 32,
+ 1,
+};
+
+static unsigned long nct3018y_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return 0;
+
+ flags &= NCT3018Y_REG_CLKO_F_MASK;
+ return clkout_rates[flags];
+}
+
+static long nct3018y_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] <= rate)
+ return clkout_rates[i];
+
+ return 0;
+}
+
+static int nct3018y_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int i, flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return flags;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] == rate) {
+ flags &= ~NCT3018Y_REG_CLKO_F_MASK;
+ flags |= i;
+ return i2c_smbus_write_byte_data(client, NCT3018Y_REG_CLKO, flags);
+ }
+
+ return -EINVAL;
+}
+
+static int nct3018y_clkout_control(struct clk_hw *hw, bool enable)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return flags;
+
+ if (enable)
+ flags |= NCT3018Y_REG_CLKO_CKE;
+ else
+ flags &= ~NCT3018Y_REG_CLKO_CKE;
+
+ return i2c_smbus_write_byte_data(client, NCT3018Y_REG_CLKO, flags);
+}
+
+static int nct3018y_clkout_prepare(struct clk_hw *hw)
+{
+ return nct3018y_clkout_control(hw, 1);
+}
+
+static void nct3018y_clkout_unprepare(struct clk_hw *hw)
+{
+ nct3018y_clkout_control(hw, 0);
+}
+
+static int nct3018y_clkout_is_prepared(struct clk_hw *hw)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return flags;
+
+ return flags & NCT3018Y_REG_CLKO_CKE;
+}
+
+static const struct clk_ops nct3018y_clkout_ops = {
+ .prepare = nct3018y_clkout_prepare,
+ .unprepare = nct3018y_clkout_unprepare,
+ .is_prepared = nct3018y_clkout_is_prepared,
+ .recalc_rate = nct3018y_clkout_recalc_rate,
+ .round_rate = nct3018y_clkout_round_rate,
+ .set_rate = nct3018y_clkout_set_rate,
+};
+
+static struct clk *nct3018y_clkout_register_clk(struct nct3018y *nct3018y)
+{
+ struct i2c_client *client = nct3018y->client;
+ struct device_node *node = client->dev.of_node;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ init.name = "nct3018y-clkout";
+ init.ops = &nct3018y_clkout_ops;
+ init.flags = 0;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ nct3018y->clkout_hw.init = &init;
+
+ /* optional override of the clockname */
+ of_property_read_string(node, "clock-output-names", &init.name);
+
+ /* register the clock */
+ clk = devm_clk_register(&client->dev, &nct3018y->clkout_hw);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+ return clk;
+}
+#endif
+
+static const struct rtc_class_ops nct3018y_rtc_ops = {
+ .read_time = nct3018y_rtc_read_time,
+ .set_time = nct3018y_rtc_set_time,
+ .read_alarm = nct3018y_rtc_read_alarm,
+ .set_alarm = nct3018y_rtc_set_alarm,
+ .alarm_irq_enable = nct3018y_irq_enable,
+ .ioctl = nct3018y_ioctl,
+};
+
+static int nct3018y_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct nct3018y *nct3018y;
+ int err, flags;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BLOCK_DATA))
+ return -ENODEV;
+
+ nct3018y = devm_kzalloc(&client->dev, sizeof(struct nct3018y),
+ GFP_KERNEL);
+ if (!nct3018y)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, nct3018y);
+ nct3018y->client = client;
+ device_set_wakeup_capable(&client->dev, 1);
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
+ if (flags < 0) {
+ dev_dbg(&client->dev, "%s: read error\n", __func__);
+ return flags;
+ } else if (flags & NCT3018Y_BIT_TWO) {
+ dev_dbg(&client->dev, "%s: NCT3018Y_BIT_TWO is set\n", __func__);
+ }
+
+ flags = NCT3018Y_BIT_TWO;
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL\n");
+ return err;
+ }
+
+ flags = 0;
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_ST, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "%s: write error\n", __func__);
+ return err;
+ }
+
+ nct3018y->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(nct3018y->rtc))
+ return PTR_ERR(nct3018y->rtc);
+
+ nct3018y->rtc->ops = &nct3018y_rtc_ops;
+ nct3018y->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ nct3018y->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ if (client->irq > 0) {
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, nct3018y_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ "nct3018y", client);
+ if (err) {
+ dev_dbg(&client->dev, "unable to request IRQ %d\n", client->irq);
+ return err;
+ }
+ } else {
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, nct3018y->rtc->features);
+ clear_bit(RTC_FEATURE_ALARM, nct3018y->rtc->features);
+ }
+
+#ifdef CONFIG_COMMON_CLK
+ /* register clk in common clk framework */
+ nct3018y_clkout_register_clk(nct3018y);
+#endif
+
+ return devm_rtc_register_device(nct3018y->rtc);
+}
+
+static const struct i2c_device_id nct3018y_id[] = {
+ { "nct3018y", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, nct3018y_id);
+
+static const struct of_device_id nct3018y_of_match[] = {
+ { .compatible = "nuvoton,nct3018y" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, nct3018y_of_match);
+
+static struct i2c_driver nct3018y_driver = {
+ .driver = {
+ .name = "rtc-nct3018y",
+ .of_match_table = of_match_ptr(nct3018y_of_match),
+ },
+ .probe = nct3018y_probe,
+ .id_table = nct3018y_id,
+};
+
+module_i2c_driver(nct3018y_driver);
+
+MODULE_AUTHOR("Medad CChien <ctcchien@nuvoton.com>");
+MODULE_AUTHOR("Mia Lin <mimi05633@gmail.com>");
+MODULE_DESCRIPTION("Nuvoton NCT3018Y RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index b1b1943de844..6174b3fd4b98 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -390,8 +390,7 @@ static const struct regmap_config regmap_config = {
.max_register = 0x13,
};
-static int pcf8523_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf8523_probe(struct i2c_client *client)
{
struct pcf8523 *pcf8523;
struct rtc_device *rtc;
@@ -485,7 +484,7 @@ static struct i2c_driver pcf8523_driver = {
.name = "rtc-pcf8523",
.of_match_table = pcf8523_of_match,
},
- .probe = pcf8523_probe,
+ .probe_new = pcf8523_probe,
.id_table = pcf8523_id,
};
module_i2c_driver(pcf8523_driver);
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index bb3e9ba75f6c..c05b722f0060 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -350,8 +350,7 @@ static const struct pcf85x63_config pcf_85363_config = {
.num_nvram = 2
};
-static int pcf85363_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf85363_probe(struct i2c_client *client)
{
struct pcf85363 *pcf85363;
const struct pcf85x63_config *config = &pcf_85363_config;
@@ -436,7 +435,7 @@ static struct i2c_driver pcf85363_driver = {
.name = "pcf85363",
.of_match_table = of_match_ptr(dev_ids),
},
- .probe = pcf85363_probe,
+ .probe_new = pcf85363_probe,
};
module_i2c_driver(pcf85363_driver);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 9d06813e2e6d..11fa9788558b 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -509,8 +509,7 @@ static const struct rtc_class_ops pcf8563_rtc_ops = {
.alarm_irq_enable = pcf8563_irq_enable,
};
-static int pcf8563_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf8563_probe(struct i2c_client *client)
{
struct pcf8563 *pcf8563;
int err;
@@ -606,7 +605,7 @@ static struct i2c_driver pcf8563_driver = {
.name = "rtc-pcf8563",
.of_match_table = of_match_ptr(pcf8563_of_match),
},
- .probe = pcf8563_probe,
+ .probe_new = pcf8563_probe,
.id_table = pcf8563_id,
};
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index c80ca20e5d8d..87074d178274 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -275,8 +275,7 @@ static const struct rtc_class_ops pcf8583_rtc_ops = {
.set_time = pcf8583_rtc_set_time,
};
-static int pcf8583_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf8583_probe(struct i2c_client *client)
{
struct pcf8583 *pcf8583;
@@ -307,7 +306,7 @@ static struct i2c_driver pcf8583_driver = {
.driver = {
.name = "pcf8583",
},
- .probe = pcf8583_probe,
+ .probe_new = pcf8583_probe,
.id_table = pcf8583_id,
};
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index 8cb84c9595fc..eb483a30bd92 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -784,8 +784,7 @@ static const struct regmap_config config = {
#if IS_ENABLED(CONFIG_I2C)
-static int rv3029_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rv3029_i2c_probe(struct i2c_client *client)
{
struct regmap *regmap;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK |
@@ -819,7 +818,7 @@ static struct i2c_driver rv3029_driver = {
.name = "rv3029",
.of_match_table = of_match_ptr(rv3029_of_match),
},
- .probe = rv3029_i2c_probe,
+ .probe_new = rv3029_i2c_probe,
.id_table = rv3029_id,
};
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index f69e0b1137cd..3527a0521e9b 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -9,6 +9,7 @@
#include <linux/bcd.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/log2.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -33,6 +34,7 @@
#define RV8803_EXT 0x0D
#define RV8803_FLAG 0x0E
#define RV8803_CTRL 0x0F
+#define RV8803_OSC_OFFSET 0x2C
#define RV8803_EXT_WADA BIT(6)
@@ -49,12 +51,15 @@
#define RV8803_CTRL_TIE BIT(4)
#define RV8803_CTRL_UIE BIT(5)
+#define RX8803_CTRL_CSEL GENMASK(7, 6)
+
#define RX8900_BACKUP_CTRL 0x18
#define RX8900_FLAG_SWOFF BIT(2)
#define RX8900_FLAG_VDETOFF BIT(3)
enum rv8803_type {
rv_8803,
+ rx_8803,
rx_8804,
rx_8900
};
@@ -64,6 +69,7 @@ struct rv8803_data {
struct rtc_device *rtc;
struct mutex flags_lock;
u8 ctrl;
+ u8 backup;
enum rv8803_type type;
};
@@ -136,6 +142,44 @@ static int rv8803_write_regs(const struct i2c_client *client,
return ret;
}
+static int rv8803_regs_init(struct rv8803_data *rv8803)
+{
+ int ret;
+
+ ret = rv8803_write_reg(rv8803->client, RV8803_OSC_OFFSET, 0x00);
+ if (ret)
+ return ret;
+
+ ret = rv8803_write_reg(rv8803->client, RV8803_CTRL,
+ FIELD_PREP(RX8803_CTRL_CSEL, 1)); /* 2s */
+ if (ret)
+ return ret;
+
+ ret = rv8803_write_regs(rv8803->client, RV8803_ALARM_MIN, 3,
+ (u8[]){ 0, 0, 0 });
+ if (ret)
+ return ret;
+
+ return rv8803_write_reg(rv8803->client, RV8803_RAM, 0x00);
+}
+
+static int rv8803_regs_configure(struct rv8803_data *rv8803);
+
+static int rv8803_regs_reset(struct rv8803_data *rv8803)
+{
+ /*
+ * The RV-8803 resets all registers to POR defaults after voltage-loss,
+ * the Epson RTCs don't, so we manually reset the remainder here.
+ */
+ if (rv8803->type == rx_8803 || rv8803->type == rx_8900) {
+ int ret = rv8803_regs_init(rv8803);
+ if (ret)
+ return ret;
+ }
+
+ return rv8803_regs_configure(rv8803);
+}
+
static irqreturn_t rv8803_handle_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
@@ -269,6 +313,14 @@ static int rv8803_set_time(struct device *dev, struct rtc_time *tm)
return flags;
}
+ if (flags & RV8803_FLAG_V2F) {
+ ret = rv8803_regs_reset(rv8803);
+ if (ret) {
+ mutex_unlock(&rv8803->flags_lock);
+ return ret;
+ }
+ }
+
ret = rv8803_write_reg(rv8803->client, RV8803_FLAG,
flags & ~(RV8803_FLAG_V1F | RV8803_FLAG_V2F));
@@ -498,18 +550,32 @@ static int rx8900_trickle_charger_init(struct rv8803_data *rv8803)
if (err < 0)
return err;
- flags = ~(RX8900_FLAG_VDETOFF | RX8900_FLAG_SWOFF) & (u8)err;
-
- if (of_property_read_bool(node, "epson,vdet-disable"))
- flags |= RX8900_FLAG_VDETOFF;
-
- if (of_property_read_bool(node, "trickle-diode-disable"))
- flags |= RX8900_FLAG_SWOFF;
+ flags = (u8)err;
+ flags &= ~(RX8900_FLAG_VDETOFF | RX8900_FLAG_SWOFF);
+ flags |= rv8803->backup;
return i2c_smbus_write_byte_data(rv8803->client, RX8900_BACKUP_CTRL,
flags);
}
+/* configure registers with values different than the Power-On reset defaults */
+static int rv8803_regs_configure(struct rv8803_data *rv8803)
+{
+ int err;
+
+ err = rv8803_write_reg(rv8803->client, RV8803_EXT, RV8803_EXT_WADA);
+ if (err)
+ return err;
+
+ err = rx8900_trickle_charger_init(rv8803);
+ if (err) {
+ dev_err(&rv8803->client->dev, "failed to init charger\n");
+ return err;
+ }
+
+ return 0;
+}
+
static int rv8803_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -576,15 +642,15 @@ static int rv8803_probe(struct i2c_client *client,
if (!client->irq)
clear_bit(RTC_FEATURE_ALARM, rv8803->rtc->features);
- err = rv8803_write_reg(rv8803->client, RV8803_EXT, RV8803_EXT_WADA);
- if (err)
- return err;
+ if (of_property_read_bool(client->dev.of_node, "epson,vdet-disable"))
+ rv8803->backup |= RX8900_FLAG_VDETOFF;
- err = rx8900_trickle_charger_init(rv8803);
- if (err) {
- dev_err(&client->dev, "failed to init charger\n");
+ if (of_property_read_bool(client->dev.of_node, "trickle-diode-disable"))
+ rv8803->backup |= RX8900_FLAG_SWOFF;
+
+ err = rv8803_regs_configure(rv8803);
+ if (err)
return err;
- }
rv8803->rtc->ops = &rv8803_rtc_ops;
rv8803->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
@@ -603,7 +669,7 @@ static int rv8803_probe(struct i2c_client *client,
static const struct i2c_device_id rv8803_id[] = {
{ "rv8803", rv_8803 },
{ "rv8804", rx_8804 },
- { "rx8803", rv_8803 },
+ { "rx8803", rx_8803 },
{ "rx8900", rx_8900 },
{ }
};
@@ -616,7 +682,7 @@ static const __maybe_unused struct of_device_id rv8803_of_match[] = {
},
{
.compatible = "epson,rx8803",
- .data = (void *)rv_8803
+ .data = (void *)rx_8803
},
{
.compatible = "epson,rx8804",
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 758fd6e11a15..cc634558b928 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -419,8 +419,7 @@ static struct regmap_config regmap_i2c_config = {
.read_flag_mask = 0x80,
};
-static int rx6110_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rx6110_i2c_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct rx6110_data *rx6110;
@@ -464,7 +463,7 @@ static struct i2c_driver rx6110_i2c_driver = {
.name = RX6110_DRIVER_NAME,
.acpi_match_table = rx6110_i2c_acpi_match,
},
- .probe = rx6110_i2c_probe,
+ .probe_new = rx6110_i2c_probe,
.id_table = rx6110_i2c_id,
};
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index b32117ccd74b..dde86f3e2a4b 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -55,6 +55,8 @@
#define RX8025_BIT_CTRL2_XST BIT(5)
#define RX8025_BIT_CTRL2_VDET BIT(6)
+#define RX8035_BIT_HOUR_1224 BIT(7)
+
/* Clock precision adjustment */
#define RX8025_ADJ_RESOLUTION 3050 /* in ppb */
#define RX8025_ADJ_DATA_MAX 62
@@ -78,6 +80,7 @@ struct rx8025_data {
struct rtc_device *rtc;
enum rx_model model;
u8 ctrl1;
+ int is_24;
};
static s32 rx8025_read_reg(const struct i2c_client *client, u8 number)
@@ -226,7 +229,7 @@ static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
dt->tm_sec = bcd2bin(date[RX8025_REG_SEC] & 0x7f);
dt->tm_min = bcd2bin(date[RX8025_REG_MIN] & 0x7f);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x3f);
else
dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x1f) % 12
@@ -254,7 +257,7 @@ static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
*/
date[RX8025_REG_SEC] = bin2bcd(dt->tm_sec);
date[RX8025_REG_MIN] = bin2bcd(dt->tm_min);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
date[RX8025_REG_HOUR] = bin2bcd(dt->tm_hour);
else
date[RX8025_REG_HOUR] = (dt->tm_hour >= 12 ? 0x20 : 0)
@@ -279,6 +282,7 @@ static int rx8025_init_client(struct i2c_client *client)
struct rx8025_data *rx8025 = i2c_get_clientdata(client);
u8 ctrl[2], ctrl2;
int need_clear = 0;
+ int hour_reg;
int err;
err = rx8025_read_regs(client, RX8025_REG_CTRL1, 2, ctrl);
@@ -303,6 +307,16 @@ static int rx8025_init_client(struct i2c_client *client)
err = rx8025_write_reg(client, RX8025_REG_CTRL2, ctrl2);
}
+
+ if (rx8025->model == model_rx_8035) {
+ /* In RX-8035, 12/24 flag is in the hour register */
+ hour_reg = rx8025_read_reg(client, RX8025_REG_HOUR);
+ if (hour_reg < 0)
+ return hour_reg;
+ rx8025->is_24 = (hour_reg & RX8035_BIT_HOUR_1224);
+ } else {
+ rx8025->is_24 = (ctrl[1] & RX8025_BIT_CTRL1_1224);
+ }
out:
return err;
}
@@ -329,7 +343,7 @@ static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
/* Hardware alarms precision is 1 minute! */
t->time.tm_sec = 0;
t->time.tm_min = bcd2bin(ald[0] & 0x7f);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
t->time.tm_hour = bcd2bin(ald[1] & 0x3f);
else
t->time.tm_hour = bcd2bin(ald[1] & 0x1f) % 12
@@ -350,7 +364,7 @@ static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
int err;
ald[0] = bin2bcd(t->time.tm_min);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
ald[1] = bin2bcd(t->time.tm_hour);
else
ald[1] = (t->time.tm_hour >= 12 ? 0x20 : 0)
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index aed4898a0ff4..14edb7534c97 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -248,8 +248,7 @@ static const struct rx85x1_config rx8571_config = {
.num_nvram = 2
};
-static int rx8581_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rx8581_probe(struct i2c_client *client)
{
struct rx8581 *rx8581;
const struct rx85x1_config *config = &rx8581_config;
@@ -326,7 +325,7 @@ static struct i2c_driver rx8581_driver = {
.name = "rtc-rx8581",
.of_match_table = of_match_ptr(rx8581_of_match),
},
- .probe = rx8581_probe,
+ .probe_new = rx8581_probe,
.id_table = rx8581_id,
};
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index 26278c770731..81d97b1d3159 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -420,8 +420,7 @@ static const struct rtc_class_ops s35390a_rtc_ops = {
.ioctl = s35390a_rtc_ioctl,
};
-static int s35390a_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int s35390a_probe(struct i2c_client *client)
{
int err, err_read;
unsigned int i;
@@ -502,7 +501,7 @@ static struct i2c_driver s35390a_driver = {
.name = "rtc-s35390a",
.of_match_table = of_match_ptr(s35390a_of_match),
},
- .probe = s35390a_probe,
+ .probe_new = s35390a_probe,
.id_table = s35390a_id,
};
diff --git a/drivers/rtc/rtc-sd3078.c b/drivers/rtc/rtc-sd3078.c
index 24e8528e23ec..e2f90d768ca8 100644
--- a/drivers/rtc/rtc-sd3078.c
+++ b/drivers/rtc/rtc-sd3078.c
@@ -163,8 +163,7 @@ static const struct regmap_config regmap_config = {
.max_register = 0x11,
};
-static int sd3078_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sd3078_probe(struct i2c_client *client)
{
int ret;
struct sd3078 *sd3078;
@@ -218,7 +217,7 @@ static struct i2c_driver sd3078_driver = {
.name = "sd3078",
.of_match_table = of_match_ptr(rtc_dt_match),
},
- .probe = sd3078_probe,
+ .probe_new = sd3078_probe,
.id_table = sd3078_id,
};
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index d4777b01ab22..736fe535cd45 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -388,7 +388,7 @@ static int spear_rtc_probe(struct platform_device *pdev)
config->rtc->ops = &spear_rtc_ops;
config->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
- config->rtc->range_min = RTC_TIMESTAMP_END_9999;
+ config->rtc->range_max = RTC_TIMESTAMP_END_9999;
status = devm_rtc_register_device(config->rtc);
if (status)
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 57540727ce1c..ed5516089e9a 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -875,6 +875,8 @@ static const struct of_device_id sun6i_rtc_dt_ids[] = {
{ .compatible = "allwinner,sun50i-h6-rtc" },
{ .compatible = "allwinner,sun50i-h616-rtc",
.data = (void *)RTC_LINEAR_DAY },
+ { .compatible = "allwinner,sun50i-r329-rtc",
+ .data = (void *)RTC_LINEAR_DAY },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sun6i_rtc_dt_ids);
diff --git a/drivers/rtc/rtc-ti-k3.c b/drivers/rtc/rtc-ti-k3.c
new file mode 100644
index 000000000000..7a0f181d3fef
--- /dev/null
+++ b/drivers/rtc/rtc-ti-k3.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments K3 RTC driver
+ *
+ * Copyright (C) 2021-2022 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+/* Registers */
+#define REG_K3RTC_S_CNT_LSW 0x08
+#define REG_K3RTC_S_CNT_MSW 0x0c
+#define REG_K3RTC_COMP 0x10
+#define REG_K3RTC_ON_OFF_S_CNT_LSW 0x20
+#define REG_K3RTC_ON_OFF_S_CNT_MSW 0x24
+#define REG_K3RTC_SCRATCH0 0x30
+#define REG_K3RTC_SCRATCH7 0x4c
+#define REG_K3RTC_GENERAL_CTL 0x50
+#define REG_K3RTC_IRQSTATUS_RAW_SYS 0x54
+#define REG_K3RTC_IRQSTATUS_SYS 0x58
+#define REG_K3RTC_IRQENABLE_SET_SYS 0x5c
+#define REG_K3RTC_IRQENABLE_CLR_SYS 0x60
+#define REG_K3RTC_SYNCPEND 0x68
+#define REG_K3RTC_KICK0 0x70
+#define REG_K3RTC_KICK1 0x74
+
+/* Freeze when lsw is read and unfreeze when msw is read */
+#define K3RTC_CNT_FMODE_S_CNT_VALUE (0x2 << 24)
+
+/* Magic values for lock/unlock */
+#define K3RTC_KICK0_UNLOCK_VALUE 0x83e70b13
+#define K3RTC_KICK1_UNLOCK_VALUE 0x95a4f1e0
+
+/* Multiplier for ppb conversions */
+#define K3RTC_PPB_MULT (1000000000LL)
+/* Min and max values supported with 'offset' interface (swapped sign) */
+#define K3RTC_MIN_OFFSET (-277761)
+#define K3RTC_MAX_OFFSET (277778)
+
+/**
+ * struct ti_k3_rtc_soc_data - Private of compatible data for ti-k3-rtc
+ * @unlock_irq_erratum: Has erratum for unlock infinite IRQs (erratum i2327)
+ */
+struct ti_k3_rtc_soc_data {
+ const bool unlock_irq_erratum;
+};
+
+static const struct regmap_config ti_k3_rtc_regmap_config = {
+ .name = "peripheral-registers",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = REG_K3RTC_KICK1,
+};
+
+enum ti_k3_rtc_fields {
+ K3RTC_KICK0,
+ K3RTC_KICK1,
+ K3RTC_S_CNT_LSW,
+ K3RTC_S_CNT_MSW,
+ K3RTC_O32K_OSC_DEP_EN,
+ K3RTC_UNLOCK,
+ K3RTC_CNT_FMODE,
+ K3RTC_PEND,
+ K3RTC_RELOAD_FROM_BBD,
+ K3RTC_COMP,
+
+ K3RTC_ALM_S_CNT_LSW,
+ K3RTC_ALM_S_CNT_MSW,
+ K3RTC_IRQ_STATUS_RAW,
+ K3RTC_IRQ_STATUS,
+ K3RTC_IRQ_ENABLE_SET,
+ K3RTC_IRQ_ENABLE_CLR,
+
+ K3RTC_IRQ_STATUS_ALT,
+ K3RTC_IRQ_ENABLE_CLR_ALT,
+
+ K3_RTC_MAX_FIELDS
+};
+
+static const struct reg_field ti_rtc_reg_fields[] = {
+ [K3RTC_KICK0] = REG_FIELD(REG_K3RTC_KICK0, 0, 31),
+ [K3RTC_KICK1] = REG_FIELD(REG_K3RTC_KICK1, 0, 31),
+ [K3RTC_S_CNT_LSW] = REG_FIELD(REG_K3RTC_S_CNT_LSW, 0, 31),
+ [K3RTC_S_CNT_MSW] = REG_FIELD(REG_K3RTC_S_CNT_MSW, 0, 15),
+ [K3RTC_O32K_OSC_DEP_EN] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 21, 21),
+ [K3RTC_UNLOCK] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 23, 23),
+ [K3RTC_CNT_FMODE] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 24, 25),
+ [K3RTC_PEND] = REG_FIELD(REG_K3RTC_SYNCPEND, 0, 1),
+ [K3RTC_RELOAD_FROM_BBD] = REG_FIELD(REG_K3RTC_SYNCPEND, 31, 31),
+ [K3RTC_COMP] = REG_FIELD(REG_K3RTC_COMP, 0, 31),
+
+ /* We use on to off as alarm trigger */
+ [K3RTC_ALM_S_CNT_LSW] = REG_FIELD(REG_K3RTC_ON_OFF_S_CNT_LSW, 0, 31),
+ [K3RTC_ALM_S_CNT_MSW] = REG_FIELD(REG_K3RTC_ON_OFF_S_CNT_MSW, 0, 15),
+ [K3RTC_IRQ_STATUS_RAW] = REG_FIELD(REG_K3RTC_IRQSTATUS_RAW_SYS, 0, 0),
+ [K3RTC_IRQ_STATUS] = REG_FIELD(REG_K3RTC_IRQSTATUS_SYS, 0, 0),
+ [K3RTC_IRQ_ENABLE_SET] = REG_FIELD(REG_K3RTC_IRQENABLE_SET_SYS, 0, 0),
+ [K3RTC_IRQ_ENABLE_CLR] = REG_FIELD(REG_K3RTC_IRQENABLE_CLR_SYS, 0, 0),
+ /* Off to on is alternate */
+ [K3RTC_IRQ_STATUS_ALT] = REG_FIELD(REG_K3RTC_IRQSTATUS_SYS, 1, 1),
+ [K3RTC_IRQ_ENABLE_CLR_ALT] = REG_FIELD(REG_K3RTC_IRQENABLE_CLR_SYS, 1, 1),
+};
+
+/**
+ * struct ti_k3_rtc - Private data for ti-k3-rtc
+ * @irq: IRQ
+ * @sync_timeout_us: data sync timeout period in uSec
+ * @rate_32k: 32k clock rate in Hz
+ * @rtc_dev: rtc device
+ * @regmap: rtc mmio regmap
+ * @r_fields: rtc register fields
+ * @soc: SoC compatible match data
+ */
+struct ti_k3_rtc {
+ unsigned int irq;
+ u32 sync_timeout_us;
+ unsigned long rate_32k;
+ struct rtc_device *rtc_dev;
+ struct regmap *regmap;
+ struct regmap_field *r_fields[K3_RTC_MAX_FIELDS];
+ const struct ti_k3_rtc_soc_data *soc;
+};
+
+static int k3rtc_field_read(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f)
+{
+ int ret;
+ int val;
+
+ ret = regmap_field_read(priv->r_fields[f], &val);
+ /*
+ * We shouldn't be seeing regmap fail on us for mmio reads
+ * This is possible if clock context fails, but that isn't the case for us
+ */
+ if (WARN_ON_ONCE(ret))
+ return ret;
+ return val;
+}
+
+static void k3rtc_field_write(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f, u32 val)
+{
+ regmap_field_write(priv->r_fields[f], val);
+}
+
+/**
+ * k3rtc_fence - Ensure a register sync took place between the two domains
+ * @priv: pointer to priv data
+ *
+ * Return: 0 if the sync took place, else returns -ETIMEDOUT
+ */
+static int k3rtc_fence(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_PEND], ret,
+ !ret, 2, priv->sync_timeout_us);
+
+ return ret;
+}
+
+static inline int k3rtc_check_unlocked(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = k3rtc_field_read(priv, K3RTC_UNLOCK);
+ if (ret < 0)
+ return ret;
+
+ return (ret) ? 0 : 1;
+}
+
+static int k3rtc_unlock_rtc(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = k3rtc_check_unlocked(priv);
+ if (!ret)
+ return ret;
+
+ k3rtc_field_write(priv, K3RTC_KICK0, K3RTC_KICK0_UNLOCK_VALUE);
+ k3rtc_field_write(priv, K3RTC_KICK1, K3RTC_KICK1_UNLOCK_VALUE);
+
+ /* Skip fence since we are going to check the unlock bit as fence */
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_UNLOCK], ret,
+ !ret, 2, priv->sync_timeout_us);
+
+ return ret;
+}
+
+static int k3rtc_configure(struct device *dev)
+{
+ int ret;
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ /*
+ * HWBUG: The compare state machine is broken if the RTC module
+ * is NOT unlocked in under one second of boot - which is pretty long
+ * time from the perspective of Linux driver (module load, u-boot
+ * shell all can take much longer than this.
+ *
+ * In such occurrence, it is assumed that the RTC module is unusable
+ */
+ if (priv->soc->unlock_irq_erratum) {
+ ret = k3rtc_check_unlocked(priv);
+ /* If there is an error OR if we are locked, return error */
+ if (ret) {
+ dev_err(dev,
+ HW_ERR "Erratum i2327 unlock QUIRK! Cannot operate!!\n");
+ return -EFAULT;
+ }
+ } else {
+ /* May need to explicitly unlock first time */
+ ret = k3rtc_unlock_rtc(priv);
+ if (ret) {
+ dev_err(dev, "Failed to unlock(%d)!\n", ret);
+ return ret;
+ }
+ }
+
+ /* Enable Shadow register sync on 32k clock boundary */
+ k3rtc_field_write(priv, K3RTC_O32K_OSC_DEP_EN, 0x1);
+
+ /*
+ * Wait at least clock sync time before proceeding further programming.
+ * This ensures that the 32k based sync is active.
+ */
+ usleep_range(priv->sync_timeout_us, priv->sync_timeout_us + 5);
+
+ /* We need to ensure fence here to make sure sync here */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev,
+ "Failed fence osc_dep enable(%d) - is 32k clk working?!\n", ret);
+ return ret;
+ }
+
+ /*
+ * FMODE setting: Reading lower seconds will freeze value on higher
+ * seconds. This also implies that we must *ALWAYS* read lower seconds
+ * prior to reading higher seconds
+ */
+ k3rtc_field_write(priv, K3RTC_CNT_FMODE, K3RTC_CNT_FMODE_S_CNT_VALUE);
+
+ /* Clear any spurious IRQ sources if any */
+ k3rtc_field_write(priv, K3RTC_IRQ_STATUS_ALT, 0x1);
+ k3rtc_field_write(priv, K3RTC_IRQ_STATUS, 0x1);
+ /* Disable all IRQs */
+ k3rtc_field_write(priv, K3RTC_IRQ_ENABLE_CLR_ALT, 0x1);
+ k3rtc_field_write(priv, K3RTC_IRQ_ENABLE_CLR, 0x1);
+
+ /* And.. Let us Sync the writes in */
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 seconds_lo, seconds_hi;
+
+ seconds_lo = k3rtc_field_read(priv, K3RTC_S_CNT_LSW);
+ seconds_hi = k3rtc_field_read(priv, K3RTC_S_CNT_MSW);
+
+ rtc_time64_to_tm((((time64_t)seconds_hi) << 32) | (time64_t)seconds_lo, tm);
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ time64_t seconds;
+
+ seconds = rtc_tm_to_time64(tm);
+
+ /*
+ * Read operation on LSW will freeze the RTC, so to update
+ * the time, we cannot use field operations. Just write since the
+ * reserved bits are ignored.
+ */
+ regmap_write(priv->regmap, REG_K3RTC_S_CNT_LSW, seconds);
+ regmap_write(priv->regmap, REG_K3RTC_S_CNT_MSW, seconds >> 32);
+
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 reg;
+ u32 offset = enabled ? K3RTC_IRQ_ENABLE_SET : K3RTC_IRQ_ENABLE_CLR;
+
+ reg = k3rtc_field_read(priv, K3RTC_IRQ_ENABLE_SET);
+ if ((enabled && reg) || (!enabled && !reg))
+ return 0;
+
+ k3rtc_field_write(priv, offset, 0x1);
+
+ /*
+ * Ensure the write sync is through - NOTE: it should be OK to have
+ * ISR to fire as we are checking sync (which should be done in a 32k
+ * cycle or so).
+ */
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 seconds_lo, seconds_hi;
+
+ seconds_lo = k3rtc_field_read(priv, K3RTC_ALM_S_CNT_LSW);
+ seconds_hi = k3rtc_field_read(priv, K3RTC_ALM_S_CNT_MSW);
+
+ rtc_time64_to_tm((((time64_t)seconds_hi) << 32) | (time64_t)seconds_lo, &alarm->time);
+
+ alarm->enabled = k3rtc_field_read(priv, K3RTC_IRQ_ENABLE_SET);
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ time64_t seconds;
+ int ret;
+
+ seconds = rtc_tm_to_time64(&alarm->time);
+
+ k3rtc_field_write(priv, K3RTC_ALM_S_CNT_LSW, seconds);
+ k3rtc_field_write(priv, K3RTC_ALM_S_CNT_MSW, (seconds >> 32));
+
+ /* Make sure the alarm time is synced in */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence(%d)! Potential config issue?\n", ret);
+ return ret;
+ }
+
+ /* Alarm IRQ enable will do a sync */
+ return ti_k3_rtc_alarm_irq_enable(dev, alarm->enabled);
+}
+
+static int ti_k3_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 ticks_per_hr = priv->rate_32k * 3600;
+ int comp;
+ s64 tmp;
+
+ comp = k3rtc_field_read(priv, K3RTC_COMP);
+
+ /* Convert from RTC calibration register format to ppb format */
+ tmp = comp * (s64)K3RTC_PPB_MULT;
+ if (tmp < 0)
+ tmp -= ticks_per_hr / 2LL;
+ else
+ tmp += ticks_per_hr / 2LL;
+ tmp = div_s64(tmp, ticks_per_hr);
+
+ /* Offset value operates in negative way, so swap sign */
+ *offset = (long)-tmp;
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_offset(struct device *dev, long offset)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 ticks_per_hr = priv->rate_32k * 3600;
+ int comp;
+ s64 tmp;
+
+ /* Make sure offset value is within supported range */
+ if (offset < K3RTC_MIN_OFFSET || offset > K3RTC_MAX_OFFSET)
+ return -ERANGE;
+
+ /* Convert from ppb format to RTC calibration register format */
+ tmp = offset * (s64)ticks_per_hr;
+ if (tmp < 0)
+ tmp -= K3RTC_PPB_MULT / 2LL;
+ else
+ tmp += K3RTC_PPB_MULT / 2LL;
+ tmp = div_s64(tmp, K3RTC_PPB_MULT);
+
+ /* Offset value operates in negative way, so swap sign */
+ comp = (int)-tmp;
+
+ k3rtc_field_write(priv, K3RTC_COMP, comp);
+
+ return k3rtc_fence(priv);
+}
+
+static irqreturn_t ti_k3_rtc_interrupt(s32 irq, void *dev_id)
+{
+ struct device *dev = dev_id;
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 reg;
+ int ret;
+
+ /*
+ * IRQ assertion can be very fast, however, the IRQ Status clear
+ * de-assert depends on 32k clock edge in the 32k domain
+ * If we clear the status prior to the first 32k clock edge,
+ * the status bit is cleared, but the IRQ stays re-asserted.
+ *
+ * To prevent this condition, we need to wait for clock sync time.
+ * We can either do that by polling the 32k observability signal for
+ * a toggle OR we could just sleep and let the processor do other
+ * stuff.
+ */
+ usleep_range(priv->sync_timeout_us, priv->sync_timeout_us + 2);
+
+ /* Lets make sure that this is a valid interrupt */
+ reg = k3rtc_field_read(priv, K3RTC_IRQ_STATUS);
+
+ if (!reg) {
+ u32 raw = k3rtc_field_read(priv, K3RTC_IRQ_STATUS_RAW);
+
+ dev_err(dev,
+ HW_ERR
+ "Erratum i2327/IRQ trig: status: 0x%08x / 0x%08x\n", reg, raw);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Write 1 to clear status reg
+ * We cannot use a field operation here due to a potential race between
+ * 32k domain and vbus domain.
+ */
+ regmap_write(priv->regmap, REG_K3RTC_IRQSTATUS_SYS, 0x1);
+
+ /* Sync the write in */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence irq status clr(%d)!\n", ret);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Force the 32k status to be reloaded back in to ensure status is
+ * reflected back correctly.
+ */
+ k3rtc_field_write(priv, K3RTC_RELOAD_FROM_BBD, 0x1);
+
+ /* Ensure the write sync is through */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence reload from bbd(%d)!\n", ret);
+ return IRQ_NONE;
+ }
+
+ /* Now we ensure that the status bit is cleared */
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_IRQ_STATUS],
+ ret, !ret, 2, priv->sync_timeout_us);
+ if (ret) {
+ dev_err(dev, "Time out waiting for status clear\n");
+ return IRQ_NONE;
+ }
+
+ /* Notify RTC core on event */
+ rtc_update_irq(priv->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops ti_k3_rtc_ops = {
+ .read_time = ti_k3_rtc_read_time,
+ .set_time = ti_k3_rtc_set_time,
+ .read_alarm = ti_k3_rtc_read_alarm,
+ .set_alarm = ti_k3_rtc_set_alarm,
+ .read_offset = ti_k3_rtc_read_offset,
+ .set_offset = ti_k3_rtc_set_offset,
+ .alarm_irq_enable = ti_k3_rtc_alarm_irq_enable,
+};
+
+static int ti_k3_rtc_scratch_read(void *priv_data, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ti_k3_rtc *priv = (struct ti_k3_rtc *)priv_data;
+
+ return regmap_bulk_read(priv->regmap, REG_K3RTC_SCRATCH0 + offset, val, bytes / 4);
+}
+
+static int ti_k3_rtc_scratch_write(void *priv_data, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ti_k3_rtc *priv = (struct ti_k3_rtc *)priv_data;
+ int ret;
+
+ ret = regmap_bulk_write(priv->regmap, REG_K3RTC_SCRATCH0 + offset, val, bytes / 4);
+ if (ret)
+ return ret;
+
+ return k3rtc_fence(priv);
+}
+
+static struct nvmem_config ti_k3_rtc_nvmem_config = {
+ .name = "ti_k3_rtc_scratch",
+ .word_size = 4,
+ .stride = 4,
+ .size = REG_K3RTC_SCRATCH7 - REG_K3RTC_SCRATCH0 + 4,
+ .reg_read = ti_k3_rtc_scratch_read,
+ .reg_write = ti_k3_rtc_scratch_write,
+};
+
+static int k3rtc_get_32kclk(struct device *dev, struct ti_k3_rtc *priv)
+{
+ int ret;
+ struct clk *clk;
+
+ clk = devm_clk_get(dev, "osc32k");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk);
+ if (ret)
+ return ret;
+
+ priv->rate_32k = clk_get_rate(clk);
+
+ /* Make sure we are exact 32k clock. Else, try to compensate delay */
+ if (priv->rate_32k != 32768)
+ dev_warn(dev, "Clock rate %ld is not 32768! Could misbehave!\n",
+ priv->rate_32k);
+
+ /*
+ * Sync timeout should be two 32k clk sync cycles = ~61uS. We double
+ * it to comprehend intermediate bus segment and cpu frequency
+ * deltas
+ */
+ priv->sync_timeout_us = (u32)(DIV_ROUND_UP_ULL(1000000, priv->rate_32k) * 4);
+
+ return ret;
+}
+
+static int k3rtc_get_vbusclk(struct device *dev, struct ti_k3_rtc *priv)
+{
+ int ret;
+ struct clk *clk;
+
+ /* Note: VBUS isn't a context clock, it is needed for hardware operation */
+ clk = devm_clk_get(dev, "vbus");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk);
+}
+
+static int ti_k3_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ti_k3_rtc *priv;
+ void __iomem *rtc_base;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct ti_k3_rtc), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ rtc_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtc_base))
+ return PTR_ERR(rtc_base);
+
+ priv->regmap = devm_regmap_init_mmio(dev, rtc_base, &ti_k3_rtc_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ ret = devm_regmap_field_bulk_alloc(dev, priv->regmap, priv->r_fields,
+ ti_rtc_reg_fields, K3_RTC_MAX_FIELDS);
+ if (ret)
+ return ret;
+
+ ret = k3rtc_get_32kclk(dev, priv);
+ if (ret)
+ return ret;
+ ret = k3rtc_get_vbusclk(dev, priv);
+ if (ret)
+ return ret;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ priv->irq = (unsigned int)ret;
+
+ priv->rtc_dev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(priv->rtc_dev))
+ return PTR_ERR(priv->rtc_dev);
+
+ priv->soc = of_device_get_match_data(dev);
+
+ priv->rtc_dev->ops = &ti_k3_rtc_ops;
+ priv->rtc_dev->range_max = (1ULL << 48) - 1; /* 48Bit seconds */
+ ti_k3_rtc_nvmem_config.priv = priv;
+
+ ret = devm_request_threaded_irq(dev, priv->irq, NULL,
+ ti_k3_rtc_interrupt,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ dev_name(dev), dev);
+ if (ret) {
+ dev_err(dev, "Could not request IRQ: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = k3rtc_configure(dev);
+ if (ret)
+ return ret;
+
+ if (device_property_present(dev, "wakeup-source"))
+ device_init_wakeup(dev, true);
+ else
+ device_set_wakeup_capable(dev, true);
+
+ ret = devm_rtc_register_device(priv->rtc_dev);
+ if (ret)
+ return ret;
+
+ return devm_rtc_nvmem_register(priv->rtc_dev, &ti_k3_rtc_nvmem_config);
+}
+
+static const struct ti_k3_rtc_soc_data ti_k3_am62_data = {
+ .unlock_irq_erratum = true,
+};
+
+static const struct of_device_id ti_k3_rtc_of_match_table[] = {
+ {.compatible = "ti,am62-rtc", .data = &ti_k3_am62_data},
+ {}
+};
+MODULE_DEVICE_TABLE(of, ti_k3_rtc_of_match_table);
+
+static int __maybe_unused ti_k3_rtc_suspend(struct device *dev)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(priv->irq);
+ return 0;
+}
+
+static int __maybe_unused ti_k3_rtc_resume(struct device *dev)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(priv->irq);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ti_k3_rtc_pm_ops, ti_k3_rtc_suspend, ti_k3_rtc_resume);
+
+static struct platform_driver ti_k3_rtc_driver = {
+ .probe = ti_k3_rtc_probe,
+ .driver = {
+ .name = "rtc-ti-k3",
+ .of_match_table = ti_k3_rtc_of_match_table,
+ .pm = &ti_k3_rtc_pm_ops,
+ },
+};
+module_platform_driver(ti_k3_rtc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI K3 RTC driver");
+MODULE_AUTHOR("Nishanth Menon");
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
deleted file mode 100644
index 5a9f9ad86d32..000000000000
--- a/drivers/rtc/rtc-vr41xx.c
+++ /dev/null
@@ -1,363 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Driver for NEC VR4100 series Real Time Clock unit.
- *
- * Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/compat.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/rtc.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/log2.h>
-
-#include <asm/div64.h>
-
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
-MODULE_DESCRIPTION("NEC VR4100 series RTC driver");
-MODULE_LICENSE("GPL v2");
-
-/* RTC 1 registers */
-#define ETIMELREG 0x00
-#define ETIMEMREG 0x02
-#define ETIMEHREG 0x04
-/* RFU */
-#define ECMPLREG 0x08
-#define ECMPMREG 0x0a
-#define ECMPHREG 0x0c
-/* RFU */
-#define RTCL1LREG 0x10
-#define RTCL1HREG 0x12
-#define RTCL1CNTLREG 0x14
-#define RTCL1CNTHREG 0x16
-#define RTCL2LREG 0x18
-#define RTCL2HREG 0x1a
-#define RTCL2CNTLREG 0x1c
-#define RTCL2CNTHREG 0x1e
-
-/* RTC 2 registers */
-#define TCLKLREG 0x00
-#define TCLKHREG 0x02
-#define TCLKCNTLREG 0x04
-#define TCLKCNTHREG 0x06
-/* RFU */
-#define RTCINTREG 0x1e
- #define TCLOCK_INT 0x08
- #define RTCLONG2_INT 0x04
- #define RTCLONG1_INT 0x02
- #define ELAPSEDTIME_INT 0x01
-
-#define RTC_FREQUENCY 32768
-#define MAX_PERIODIC_RATE 6553
-
-static void __iomem *rtc1_base;
-static void __iomem *rtc2_base;
-
-#define rtc1_read(offset) readw(rtc1_base + (offset))
-#define rtc1_write(offset, value) writew((value), rtc1_base + (offset))
-
-#define rtc2_read(offset) readw(rtc2_base + (offset))
-#define rtc2_write(offset, value) writew((value), rtc2_base + (offset))
-
-/* 32-bit compat for ioctls that nobody else uses */
-#define RTC_EPOCH_READ32 _IOR('p', 0x0d, __u32)
-
-static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */
-
-static DEFINE_SPINLOCK(rtc_lock);
-static char rtc_name[] = "RTC";
-static unsigned long periodic_count;
-static unsigned int alarm_enabled;
-static int aie_irq;
-static int pie_irq;
-
-static inline time64_t read_elapsed_second(void)
-{
-
- unsigned long first_low, first_mid, first_high;
-
- unsigned long second_low, second_mid, second_high;
-
- do {
- first_low = rtc1_read(ETIMELREG);
- first_mid = rtc1_read(ETIMEMREG);
- first_high = rtc1_read(ETIMEHREG);
- second_low = rtc1_read(ETIMELREG);
- second_mid = rtc1_read(ETIMEMREG);
- second_high = rtc1_read(ETIMEHREG);
- } while (first_low != second_low || first_mid != second_mid ||
- first_high != second_high);
-
- return ((u64)first_high << 17) | (first_mid << 1) | (first_low >> 15);
-}
-
-static inline void write_elapsed_second(time64_t sec)
-{
- spin_lock_irq(&rtc_lock);
-
- rtc1_write(ETIMELREG, (uint16_t)(sec << 15));
- rtc1_write(ETIMEMREG, (uint16_t)(sec >> 1));
- rtc1_write(ETIMEHREG, (uint16_t)(sec >> 17));
-
- spin_unlock_irq(&rtc_lock);
-}
-
-static int vr41xx_rtc_read_time(struct device *dev, struct rtc_time *time)
-{
- time64_t epoch_sec, elapsed_sec;
-
- epoch_sec = mktime64(epoch, 1, 1, 0, 0, 0);
- elapsed_sec = read_elapsed_second();
-
- rtc_time64_to_tm(epoch_sec + elapsed_sec, time);
-
- return 0;
-}
-
-static int vr41xx_rtc_set_time(struct device *dev, struct rtc_time *time)
-{
- time64_t epoch_sec, current_sec;
-
- epoch_sec = mktime64(epoch, 1, 1, 0, 0, 0);
- current_sec = rtc_tm_to_time64(time);
-
- write_elapsed_second(current_sec - epoch_sec);
-
- return 0;
-}
-
-static int vr41xx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
-{
- unsigned long low, mid, high;
- struct rtc_time *time = &wkalrm->time;
-
- spin_lock_irq(&rtc_lock);
-
- low = rtc1_read(ECMPLREG);
- mid = rtc1_read(ECMPMREG);
- high = rtc1_read(ECMPHREG);
- wkalrm->enabled = alarm_enabled;
-
- spin_unlock_irq(&rtc_lock);
-
- rtc_time64_to_tm((high << 17) | (mid << 1) | (low >> 15), time);
-
- return 0;
-}
-
-static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
-{
- time64_t alarm_sec;
-
- alarm_sec = rtc_tm_to_time64(&wkalrm->time);
-
- spin_lock_irq(&rtc_lock);
-
- if (alarm_enabled)
- disable_irq(aie_irq);
-
- rtc1_write(ECMPLREG, (uint16_t)(alarm_sec << 15));
- rtc1_write(ECMPMREG, (uint16_t)(alarm_sec >> 1));
- rtc1_write(ECMPHREG, (uint16_t)(alarm_sec >> 17));
-
- if (wkalrm->enabled)
- enable_irq(aie_irq);
-
- alarm_enabled = wkalrm->enabled;
-
- spin_unlock_irq(&rtc_lock);
-
- return 0;
-}
-
-static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case RTC_EPOCH_READ:
- return put_user(epoch, (unsigned long __user *)arg);
-#ifdef CONFIG_64BIT
- case RTC_EPOCH_READ32:
- return put_user(epoch, (unsigned int __user *)arg);
-#endif
- case RTC_EPOCH_SET:
- /* Doesn't support before 1900 */
- if (arg < 1900)
- return -EINVAL;
- epoch = arg;
- break;
- default:
- return -ENOIOCTLCMD;
- }
-
- return 0;
-}
-
-static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
-{
- spin_lock_irq(&rtc_lock);
- if (enabled) {
- if (!alarm_enabled) {
- enable_irq(aie_irq);
- alarm_enabled = 1;
- }
- } else {
- if (alarm_enabled) {
- disable_irq(aie_irq);
- alarm_enabled = 0;
- }
- }
- spin_unlock_irq(&rtc_lock);
- return 0;
-}
-
-static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id)
-{
- struct platform_device *pdev = (struct platform_device *)dev_id;
- struct rtc_device *rtc = platform_get_drvdata(pdev);
-
- rtc2_write(RTCINTREG, ELAPSEDTIME_INT);
-
- rtc_update_irq(rtc, 1, RTC_AF);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
-{
- struct platform_device *pdev = (struct platform_device *)dev_id;
- struct rtc_device *rtc = platform_get_drvdata(pdev);
- unsigned long count = periodic_count;
-
- rtc2_write(RTCINTREG, RTCLONG1_INT);
-
- rtc1_write(RTCL1LREG, count);
- rtc1_write(RTCL1HREG, count >> 16);
-
- rtc_update_irq(rtc, 1, RTC_PF);
-
- return IRQ_HANDLED;
-}
-
-static const struct rtc_class_ops vr41xx_rtc_ops = {
- .ioctl = vr41xx_rtc_ioctl,
- .read_time = vr41xx_rtc_read_time,
- .set_time = vr41xx_rtc_set_time,
- .read_alarm = vr41xx_rtc_read_alarm,
- .set_alarm = vr41xx_rtc_set_alarm,
- .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
-};
-
-static int rtc_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct rtc_device *rtc;
- int retval;
-
- if (pdev->num_resources != 4)
- return -EBUSY;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EBUSY;
-
- rtc1_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!rtc1_base)
- return -EBUSY;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- retval = -EBUSY;
- goto err_rtc1_iounmap;
- }
-
- rtc2_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!rtc2_base) {
- retval = -EBUSY;
- goto err_rtc1_iounmap;
- }
-
- rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(rtc)) {
- retval = PTR_ERR(rtc);
- goto err_iounmap_all;
- }
-
- rtc->ops = &vr41xx_rtc_ops;
-
- /* 48-bit counter at 32.768 kHz */
- rtc->range_max = (1ULL << 33) - 1;
- rtc->max_user_freq = MAX_PERIODIC_RATE;
-
- spin_lock_irq(&rtc_lock);
-
- rtc1_write(ECMPLREG, 0);
- rtc1_write(ECMPMREG, 0);
- rtc1_write(ECMPHREG, 0);
- rtc1_write(RTCL1LREG, 0);
- rtc1_write(RTCL1HREG, 0);
-
- spin_unlock_irq(&rtc_lock);
-
- aie_irq = platform_get_irq(pdev, 0);
- if (aie_irq <= 0) {
- retval = -EBUSY;
- goto err_iounmap_all;
- }
-
- retval = devm_request_irq(&pdev->dev, aie_irq, elapsedtime_interrupt, 0,
- "elapsed_time", pdev);
- if (retval < 0)
- goto err_iounmap_all;
-
- pie_irq = platform_get_irq(pdev, 1);
- if (pie_irq <= 0) {
- retval = -EBUSY;
- goto err_iounmap_all;
- }
-
- retval = devm_request_irq(&pdev->dev, pie_irq, rtclong1_interrupt, 0,
- "rtclong1", pdev);
- if (retval < 0)
- goto err_iounmap_all;
-
- platform_set_drvdata(pdev, rtc);
-
- disable_irq(aie_irq);
- disable_irq(pie_irq);
-
- dev_info(&pdev->dev, "Real Time Clock of NEC VR4100 series\n");
-
- retval = devm_rtc_register_device(rtc);
- if (retval)
- goto err_iounmap_all;
-
- return 0;
-
-err_iounmap_all:
- rtc2_base = NULL;
-
-err_rtc1_iounmap:
- rtc1_base = NULL;
-
- return retval;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:RTC");
-
-static struct platform_driver rtc_platform_driver = {
- .probe = rtc_probe,
- .driver = {
- .name = rtc_name,
- },
-};
-
-module_platform_driver(rtc_platform_driver);
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index d1d5a44d9122..ba0d22a5b421 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -614,8 +614,7 @@ static void x1205_sysfs_unregister(struct device *dev)
}
-static int x1205_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int x1205_probe(struct i2c_client *client)
{
int err = 0;
unsigned char sr;
@@ -681,7 +680,7 @@ static struct i2c_driver x1205_driver = {
.name = "rtc-x1205",
.of_match_table = x1205_dt_ids,
},
- .probe = x1205_probe,
+ .probe_new = x1205_probe,
.remove = x1205_remove,
.id_table = x1205_id,
};
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index f440bb52be92..c9b85c838ebe 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -6,6 +6,7 @@
*
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -36,17 +37,23 @@
#define RTC_OSC_EN BIT(24)
#define RTC_BATT_EN BIT(31)
-#define RTC_CALIB_DEF 0x198233
+#define RTC_CALIB_DEF 0x7FFF
#define RTC_CALIB_MASK 0x1FFFFF
#define RTC_ALRM_MASK BIT(1)
#define RTC_MSEC 1000
+#define RTC_FR_MASK 0xF0000
+#define RTC_FR_MAX_TICKS 16
+#define RTC_PPB 1000000000LL
+#define RTC_MIN_OFFSET -32768000
+#define RTC_MAX_OFFSET 32767000
struct xlnx_rtc_dev {
struct rtc_device *rtc;
void __iomem *reg_base;
int alarm_irq;
int sec_irq;
- unsigned int calibval;
+ struct clk *rtc_clk;
+ unsigned int freq;
};
static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -61,13 +68,6 @@ static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
*/
new_time = rtc_tm_to_time64(tm) + 1;
- /*
- * Writing into calibration register will clear the Tick Counter and
- * force the next second to be signaled exactly in 1 second period
- */
- xrtcdev->calibval &= RTC_CALIB_MASK;
- writel(xrtcdev->calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
-
writel(new_time, xrtcdev->reg_base + RTC_SET_TM_WR);
/*
@@ -173,15 +173,76 @@ static void xlnx_init_rtc(struct xlnx_rtc_dev *xrtcdev)
rtc_ctrl = readl(xrtcdev->reg_base + RTC_CTRL);
rtc_ctrl |= RTC_BATT_EN;
writel(rtc_ctrl, xrtcdev->reg_base + RTC_CTRL);
+}
- /*
- * Based on crystal freq of 33.330 KHz
- * set the seconds counter and enable, set fractions counter
- * to default value suggested as per design spec
- * to correct RTC delay in frequency over period of time.
+static int xlnx_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+ unsigned long long rtc_ppb = RTC_PPB;
+ unsigned int tick_mult = do_div(rtc_ppb, xrtcdev->freq);
+ unsigned int calibval;
+ long offset_val;
+
+ calibval = readl(xrtcdev->reg_base + RTC_CALIB_RD);
+ /* Offset with seconds ticks */
+ offset_val = calibval & RTC_TICK_MASK;
+ offset_val = offset_val - RTC_CALIB_DEF;
+ offset_val = offset_val * tick_mult;
+
+ /* Offset with fractional ticks */
+ if (calibval & RTC_FR_EN)
+ offset_val += ((calibval & RTC_FR_MASK) >> RTC_FR_DATSHIFT)
+ * (tick_mult / RTC_FR_MAX_TICKS);
+ *offset = offset_val;
+
+ return 0;
+}
+
+static int xlnx_rtc_set_offset(struct device *dev, long offset)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+ unsigned long long rtc_ppb = RTC_PPB;
+ unsigned int tick_mult = do_div(rtc_ppb, xrtcdev->freq);
+ unsigned char fract_tick = 0;
+ unsigned int calibval;
+ short int max_tick;
+ int fract_offset;
+
+ if (offset < RTC_MIN_OFFSET || offset > RTC_MAX_OFFSET)
+ return -ERANGE;
+
+ /* Number ticks for given offset */
+ max_tick = div_s64_rem(offset, tick_mult, &fract_offset);
+
+ /* Number fractional ticks for given offset */
+ if (fract_offset) {
+ if (fract_offset < 0) {
+ fract_offset = fract_offset + tick_mult;
+ max_tick--;
+ }
+ if (fract_offset > (tick_mult / RTC_FR_MAX_TICKS)) {
+ for (fract_tick = 1; fract_tick < 16; fract_tick++) {
+ if (fract_offset <=
+ (fract_tick *
+ (tick_mult / RTC_FR_MAX_TICKS)))
+ break;
+ }
+ }
+ }
+
+ /* Zynqmp RTC uses second and fractional tick
+ * counters for compensation
*/
- xrtcdev->calibval &= RTC_CALIB_MASK;
- writel(xrtcdev->calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
+ calibval = max_tick + RTC_CALIB_DEF;
+
+ if (fract_tick)
+ calibval |= RTC_FR_EN;
+
+ calibval |= (fract_tick << RTC_FR_DATSHIFT);
+
+ writel(calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
+
+ return 0;
}
static const struct rtc_class_ops xlnx_rtc_ops = {
@@ -190,6 +251,8 @@ static const struct rtc_class_ops xlnx_rtc_ops = {
.read_alarm = xlnx_rtc_read_alarm,
.set_alarm = xlnx_rtc_set_alarm,
.alarm_irq_enable = xlnx_rtc_alarm_irq_enable,
+ .read_offset = xlnx_rtc_read_offset,
+ .set_offset = xlnx_rtc_set_offset,
};
static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
@@ -255,10 +318,22 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(pdev->dev.of_node, "calibration",
- &xrtcdev->calibval);
- if (ret)
- xrtcdev->calibval = RTC_CALIB_DEF;
+ /* Getting the rtc_clk info */
+ xrtcdev->rtc_clk = devm_clk_get_optional(&pdev->dev, "rtc_clk");
+ if (IS_ERR(xrtcdev->rtc_clk)) {
+ if (PTR_ERR(xrtcdev->rtc_clk) != -EPROBE_DEFER)
+ dev_warn(&pdev->dev, "Device clock not found.\n");
+ }
+ xrtcdev->freq = clk_get_rate(xrtcdev->rtc_clk);
+ if (!xrtcdev->freq) {
+ ret = of_property_read_u32(pdev->dev.of_node, "calibration",
+ &xrtcdev->freq);
+ if (ret)
+ xrtcdev->freq = RTC_CALIB_DEF;
+ }
+ ret = readl(xrtcdev->reg_base + RTC_CALIB_RD);
+ if (!ret)
+ writel(xrtcdev->freq, (xrtcdev->reg_base + RTC_CALIB_WR));
xlnx_init_rtc(xrtcdev);
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 57f41efb8043..7d1749b0d378 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -89,7 +89,7 @@ config HMC_DRV
Management Console (HMC) drive CD/DVD-ROM. It is available as a
module, called 'hmcdrv', and also as kernel built-in. There is one
optional parameter for this module: cachesize=N, which modifies the
- transfer cache size from it's default value 0.5MB to N bytes. If N
+ transfer cache size from its default value 0.5MB to N bytes. If N
is zero, then no caching is performed.
config SCLP_OFB
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 38cc1565d6ae..751945fb6793 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -548,7 +548,7 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
case 0x2e:
/*
* Not capable. This indicates either that the drive fails
- * reading the format id mark or that that format specified
+ * reading the format id mark or that format specified
* is not supported by the drive.
*/
dev_warn (&device->cdev->dev, "The tape unit cannot process "
diff --git a/drivers/s390/char/uvdevice.c b/drivers/s390/char/uvdevice.c
index 66505d7166a6..1d40457c7b10 100644
--- a/drivers/s390/char/uvdevice.c
+++ b/drivers/s390/char/uvdevice.c
@@ -27,6 +27,7 @@
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/cpufeature.h>
#include <asm/uvdevice.h>
#include <asm/uv.h>
@@ -244,12 +245,10 @@ static void __exit uvio_dev_exit(void)
static int __init uvio_dev_init(void)
{
- if (!test_facility(158))
- return -ENXIO;
return misc_register(&uvio_dev_miscdev);
}
-module_init(uvio_dev_init);
+module_cpu_feature_match(S390_CPU_FEATURE_UV, uvio_dev_init);
module_exit(uvio_dev_exit);
MODULE_AUTHOR("IBM Corporation");
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 516783ba950f..f6da215ccf9f 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/panic_notifier.h>
#include <linux/reboot.h>
+#include <linux/uio.h>
#include <asm/asm-offsets.h>
#include <asm/ipl.h>
@@ -50,36 +51,41 @@ static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *zcore_ipl_block;
+static DEFINE_MUTEX(hsa_buf_mutex);
static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
/*
- * Copy memory from HSA to user memory (not reentrant):
+ * Copy memory from HSA to iterator (not reentrant):
*
- * @dest: User buffer where memory should be copied to
+ * @iter: Iterator where memory should be copied to
* @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied
*/
-int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
+size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count)
{
- unsigned long offset, bytes;
+ size_t bytes, copied, res = 0;
+ unsigned long offset;
if (!hsa_available)
- return -ENODATA;
+ return 0;
+ mutex_lock(&hsa_buf_mutex);
while (count) {
if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
TRACE("sclp_sdias_copy() failed\n");
- return -EIO;
+ break;
}
offset = src % PAGE_SIZE;
bytes = min(PAGE_SIZE - offset, count);
- if (copy_to_user(dest, hsa_buf + offset, bytes))
- return -EFAULT;
- src += bytes;
- dest += bytes;
- count -= bytes;
+ copied = copy_to_iter(hsa_buf + offset, bytes, iter);
+ count -= copied;
+ src += copied;
+ res += copied;
+ if (copied < bytes)
+ break;
}
- return 0;
+ mutex_unlock(&hsa_buf_mutex);
+ return res;
}
/*
@@ -89,25 +95,16 @@ int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
* @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied
*/
-int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
+static inline int memcpy_hsa_kernel(void *dst, unsigned long src, size_t count)
{
- unsigned long offset, bytes;
+ struct iov_iter iter;
+ struct kvec kvec;
- if (!hsa_available)
- return -ENODATA;
-
- while (count) {
- if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
- TRACE("sclp_sdias_copy() failed\n");
- return -EIO;
- }
- offset = src % PAGE_SIZE;
- bytes = min(PAGE_SIZE - offset, count);
- memcpy(dest, hsa_buf + offset, bytes);
- src += bytes;
- dest += bytes;
- count -= bytes;
- }
+ kvec.iov_base = dst;
+ kvec.iov_len = count;
+ iov_iter_kvec(&iter, WRITE, &kvec, 1, count);
+ if (memcpy_hsa_iter(&iter, src, count) < count)
+ return -EIO;
return 0;
}
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c
index 7a838e3d7c0f..420d89ba7f83 100644
--- a/drivers/s390/cio/vfio_ccw_async.c
+++ b/drivers/s390/cio/vfio_ccw_async.c
@@ -8,7 +8,6 @@
*/
#include <linux/vfio.h>
-#include <linux/mdev.h>
#include "vfio_ccw_private.h"
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 0c2be9421ab7..7b02e97f4b29 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -11,6 +11,7 @@
#include <linux/ratelimit.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/vfio.h>
#include <asm/idals.h>
@@ -18,13 +19,11 @@
#include "vfio_ccw_cp.h"
#include "vfio_ccw_private.h"
-struct pfn_array {
- /* Starting guest physical I/O address. */
- unsigned long pa_iova;
- /* Array that stores PFNs of the pages need to pin. */
- unsigned long *pa_iova_pfn;
- /* Array that receives PFNs of the pages pinned. */
- unsigned long *pa_pfn;
+struct page_array {
+ /* Array that stores pages need to pin. */
+ dma_addr_t *pa_iova;
+ /* Array that receives the pinned pages. */
+ struct page **pa_page;
/* Number of pages pinned from @pa_iova. */
int pa_nr;
};
@@ -37,116 +36,158 @@ struct ccwchain {
/* Count of the valid ccws in chain. */
int ch_len;
/* Pinned PAGEs for the original data. */
- struct pfn_array *ch_pa;
+ struct page_array *ch_pa;
};
/*
- * pfn_array_alloc() - alloc memory for PFNs
- * @pa: pfn_array on which to perform the operation
+ * page_array_alloc() - alloc memory for page array
+ * @pa: page_array on which to perform the operation
* @iova: target guest physical address
* @len: number of bytes that should be pinned from @iova
*
- * Attempt to allocate memory for PFNs.
+ * Attempt to allocate memory for page array.
*
- * Usage of pfn_array:
- * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
+ * Usage of page_array:
+ * We expect (pa_nr == 0) and (pa_iova == NULL), any field in
* this structure will be filled in by this function.
*
* Returns:
- * 0 if PFNs are allocated
- * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn is not NULL
+ * 0 if page array is allocated
+ * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova is not NULL
* -ENOMEM if alloc failed
*/
-static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
+static int page_array_alloc(struct page_array *pa, u64 iova, unsigned int len)
{
int i;
- if (pa->pa_nr || pa->pa_iova_pfn)
+ if (pa->pa_nr || pa->pa_iova)
return -EINVAL;
- pa->pa_iova = iova;
-
pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (!pa->pa_nr)
return -EINVAL;
- pa->pa_iova_pfn = kcalloc(pa->pa_nr,
- sizeof(*pa->pa_iova_pfn) +
- sizeof(*pa->pa_pfn),
- GFP_KERNEL);
- if (unlikely(!pa->pa_iova_pfn)) {
+ pa->pa_iova = kcalloc(pa->pa_nr,
+ sizeof(*pa->pa_iova) + sizeof(*pa->pa_page),
+ GFP_KERNEL);
+ if (unlikely(!pa->pa_iova)) {
pa->pa_nr = 0;
return -ENOMEM;
}
- pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
+ pa->pa_page = (struct page **)&pa->pa_iova[pa->pa_nr];
- pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
- pa->pa_pfn[0] = -1ULL;
+ pa->pa_iova[0] = iova;
+ pa->pa_page[0] = NULL;
for (i = 1; i < pa->pa_nr; i++) {
- pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
- pa->pa_pfn[i] = -1ULL;
+ pa->pa_iova[i] = pa->pa_iova[i - 1] + PAGE_SIZE;
+ pa->pa_page[i] = NULL;
}
return 0;
}
/*
- * pfn_array_pin() - Pin user pages in memory
- * @pa: pfn_array on which to perform the operation
+ * page_array_unpin() - Unpin user pages in memory
+ * @pa: page_array on which to perform the operation
+ * @vdev: the vfio device to perform the operation
+ * @pa_nr: number of user pages to unpin
+ *
+ * Only unpin if any pages were pinned to begin with, i.e. pa_nr > 0,
+ * otherwise only clear pa->pa_nr
+ */
+static void page_array_unpin(struct page_array *pa,
+ struct vfio_device *vdev, int pa_nr)
+{
+ int unpinned = 0, npage = 1;
+
+ while (unpinned < pa_nr) {
+ dma_addr_t *first = &pa->pa_iova[unpinned];
+ dma_addr_t *last = &first[npage];
+
+ if (unpinned + npage < pa_nr &&
+ *first + npage * PAGE_SIZE == *last) {
+ npage++;
+ continue;
+ }
+
+ vfio_unpin_pages(vdev, *first, npage);
+ unpinned += npage;
+ npage = 1;
+ }
+
+ pa->pa_nr = 0;
+}
+
+/*
+ * page_array_pin() - Pin user pages in memory
+ * @pa: page_array on which to perform the operation
* @mdev: the mediated device to perform pin operations
*
* Returns number of pages pinned upon success.
* If the pin request partially succeeds, or fails completely,
* all pages are left unpinned and a negative error value is returned.
*/
-static int pfn_array_pin(struct pfn_array *pa, struct vfio_device *vdev)
+static int page_array_pin(struct page_array *pa, struct vfio_device *vdev)
{
+ int pinned = 0, npage = 1;
int ret = 0;
- ret = vfio_pin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr,
- IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
+ while (pinned < pa->pa_nr) {
+ dma_addr_t *first = &pa->pa_iova[pinned];
+ dma_addr_t *last = &first[npage];
- if (ret < 0) {
- goto err_out;
- } else if (ret > 0 && ret != pa->pa_nr) {
- vfio_unpin_pages(vdev, pa->pa_iova_pfn, ret);
- ret = -EINVAL;
- goto err_out;
+ if (pinned + npage < pa->pa_nr &&
+ *first + npage * PAGE_SIZE == *last) {
+ npage++;
+ continue;
+ }
+
+ ret = vfio_pin_pages(vdev, *first, npage,
+ IOMMU_READ | IOMMU_WRITE,
+ &pa->pa_page[pinned]);
+ if (ret < 0) {
+ goto err_out;
+ } else if (ret > 0 && ret != npage) {
+ pinned += ret;
+ ret = -EINVAL;
+ goto err_out;
+ }
+ pinned += npage;
+ npage = 1;
}
return ret;
err_out:
- pa->pa_nr = 0;
-
+ page_array_unpin(pa, vdev, pinned);
return ret;
}
/* Unpin the pages before releasing the memory. */
-static void pfn_array_unpin_free(struct pfn_array *pa, struct vfio_device *vdev)
+static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev)
{
- /* Only unpin if any pages were pinned to begin with */
- if (pa->pa_nr)
- vfio_unpin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr);
- pa->pa_nr = 0;
- kfree(pa->pa_iova_pfn);
+ page_array_unpin(pa, vdev, pa->pa_nr);
+ kfree(pa->pa_iova);
}
-static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova)
+static bool page_array_iova_pinned(struct page_array *pa, u64 iova, u64 length)
{
- unsigned long iova_pfn = iova >> PAGE_SHIFT;
+ u64 iova_pfn_start = iova >> PAGE_SHIFT;
+ u64 iova_pfn_end = (iova + length - 1) >> PAGE_SHIFT;
+ u64 pfn;
int i;
- for (i = 0; i < pa->pa_nr; i++)
- if (pa->pa_iova_pfn[i] == iova_pfn)
+ for (i = 0; i < pa->pa_nr; i++) {
+ pfn = pa->pa_iova[i] >> PAGE_SHIFT;
+ if (pfn >= iova_pfn_start && pfn <= iova_pfn_end)
return true;
+ }
return false;
}
-/* Create the list of IDAL words for a pfn_array. */
-static inline void pfn_array_idal_create_words(
- struct pfn_array *pa,
- unsigned long *idaws)
+/* Create the list of IDAL words for a page_array. */
+static inline void page_array_idal_create_words(struct page_array *pa,
+ unsigned long *idaws)
{
int i;
@@ -159,10 +200,10 @@ static inline void pfn_array_idal_create_words(
*/
for (i = 0; i < pa->pa_nr; i++)
- idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
+ idaws[i] = page_to_phys(pa->pa_page[i]);
/* Adjust the first IDAW, since it may not start on a page boundary */
- idaws[0] += pa->pa_iova & (PAGE_SIZE - 1);
+ idaws[0] += pa->pa_iova[0] & (PAGE_SIZE - 1);
}
static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
@@ -194,24 +235,24 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
unsigned long n)
{
- struct pfn_array pa = {0};
- u64 from;
+ struct page_array pa = {0};
int i, ret;
unsigned long l, m;
- ret = pfn_array_alloc(&pa, iova, n);
+ ret = page_array_alloc(&pa, iova, n);
if (ret < 0)
return ret;
- ret = pfn_array_pin(&pa, vdev);
+ ret = page_array_pin(&pa, vdev);
if (ret < 0) {
- pfn_array_unpin_free(&pa, vdev);
+ page_array_unpin_free(&pa, vdev);
return ret;
}
l = n;
for (i = 0; i < pa.pa_nr; i++) {
- from = pa.pa_pfn[i] << PAGE_SHIFT;
+ void *from = kmap_local_page(pa.pa_page[i]);
+
m = PAGE_SIZE;
if (i == 0) {
from += iova & (PAGE_SIZE - 1);
@@ -219,14 +260,15 @@ static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
}
m = min(l, m);
- memcpy(to + (n - l), (void *)from, m);
+ memcpy(to + (n - l), from, m);
+ kunmap_local(from);
l -= m;
if (l == 0)
break;
}
- pfn_array_unpin_free(&pa, vdev);
+ page_array_unpin_free(&pa, vdev);
return l;
}
@@ -329,7 +371,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
chain->ch_ccw = (struct ccw1 *)data;
data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
- chain->ch_pa = (struct pfn_array *)data;
+ chain->ch_pa = (struct page_array *)data;
chain->ch_len = len;
@@ -513,7 +555,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
struct ccw1 *ccw;
- struct pfn_array *pa;
+ struct page_array *pa;
u64 iova;
unsigned long *idaws;
int ret;
@@ -547,13 +589,13 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
}
/*
- * Allocate an array of pfn's for pages to pin/translate.
+ * Allocate an array of pages to pin/translate.
* The number of pages is actually the count of the idaws
* required for the data transfer, since we only only support
* 4K IDAWs today.
*/
pa = chain->ch_pa + idx;
- ret = pfn_array_alloc(pa, iova, bytes);
+ ret = page_array_alloc(pa, iova, bytes);
if (ret < 0)
goto out_free_idaws;
@@ -564,21 +606,21 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
goto out_unpin;
/*
- * Copy guest IDAWs into pfn_array, in case the memory they
+ * Copy guest IDAWs into page_array, in case the memory they
* occupy is not contiguous.
*/
for (i = 0; i < idaw_nr; i++)
- pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT;
+ pa->pa_iova[i] = idaws[i];
} else {
/*
- * No action is required here; the iova addresses in pfn_array
- * were initialized sequentially in pfn_array_alloc() beginning
+ * No action is required here; the iova addresses in page_array
+ * were initialized sequentially in page_array_alloc() beginning
* with the contents of ccw->cda.
*/
}
if (ccw_does_data_transfer(ccw)) {
- ret = pfn_array_pin(pa, vdev);
+ ret = page_array_pin(pa, vdev);
if (ret < 0)
goto out_unpin;
} else {
@@ -588,13 +630,13 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
ccw->cda = (__u32) virt_to_phys(idaws);
ccw->flags |= CCW_FLAG_IDA;
- /* Populate the IDAL with pinned/translated addresses from pfn */
- pfn_array_idal_create_words(pa, idaws);
+ /* Populate the IDAL with pinned/translated addresses from page */
+ page_array_idal_create_words(pa, idaws);
return 0;
out_unpin:
- pfn_array_unpin_free(pa, vdev);
+ page_array_unpin_free(pa, vdev);
out_free_idaws:
kfree(idaws);
out_init:
@@ -700,7 +742,7 @@ void cp_free(struct channel_program *cp)
cp->initialized = false;
list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++) {
- pfn_array_unpin_free(chain->ch_pa + i, vdev);
+ page_array_unpin_free(chain->ch_pa + i, vdev);
ccwchain_cda_free(chain, i);
}
ccwchain_free(chain);
@@ -862,11 +904,12 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
* cp_iova_pinned() - check if an iova is pinned for a ccw chain.
* @cp: channel_program on which to perform the operation
* @iova: the iova to check
+ * @length: the length to check from @iova
*
* If the @iova is currently pinned for the ccw chain, return true;
* else return false.
*/
-bool cp_iova_pinned(struct channel_program *cp, u64 iova)
+bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length)
{
struct ccwchain *chain;
int i;
@@ -876,7 +919,7 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova)
list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++)
- if (pfn_array_iova_pinned(chain->ch_pa + i, iova))
+ if (page_array_iova_pinned(chain->ch_pa + i, iova, length))
return true;
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index e4c436199b4c..54d26e242533 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -41,11 +41,11 @@ struct channel_program {
struct ccw1 *guest_cp;
};
-extern int cp_init(struct channel_program *cp, union orb *orb);
-extern void cp_free(struct channel_program *cp);
-extern int cp_prefetch(struct channel_program *cp);
-extern union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
-extern void cp_update_scsw(struct channel_program *cp, union scsw *scsw);
-extern bool cp_iova_pinned(struct channel_program *cp, u64 iova);
+int cp_init(struct channel_program *cp, union orb *orb);
+void cp_free(struct channel_program *cp);
+int cp_prefetch(struct channel_program *cp);
+union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
+void cp_update_scsw(struct channel_program *cp, union scsw *scsw);
+bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length);
#endif
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index ee182cfb467d..86d9e428357b 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
-#include <linux/uuid.h>
#include <linux/mdev.h>
#include <asm/isc.h>
@@ -42,13 +41,6 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
DECLARE_COMPLETION_ONSTACK(completion);
int iretry, ret = 0;
- spin_lock_irq(sch->lock);
- if (!sch->schib.pmcw.ena)
- goto out_unlock;
- ret = cio_disable_subchannel(sch);
- if (ret != -EBUSY)
- goto out_unlock;
-
iretry = 255;
do {
@@ -75,9 +67,7 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
spin_lock_irq(sch->lock);
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
-out_unlock:
- private->state = VFIO_CCW_STATE_NOT_OPER;
- spin_unlock_irq(sch->lock);
+
return ret;
}
@@ -107,9 +97,10 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
/*
* Reset to IDLE only if processing of a channel program
* has finished. Do not overwrite a possible processing
- * state if the final interrupt was for HSCH or CSCH.
+ * state if the interrupt was unsolicited, or if the final
+ * interrupt was for HSCH or CSCH.
*/
- if (private->mdev && cp_is_finished)
+ if (cp_is_finished)
private->state = VFIO_CCW_STATE_IDLE;
if (private->io_trigger)
@@ -147,7 +138,7 @@ static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch)
private->sch = sch;
mutex_init(&private->io_mutex);
- private->state = VFIO_CCW_STATE_NOT_OPER;
+ private->state = VFIO_CCW_STATE_STANDBY;
INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
@@ -231,26 +222,15 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
dev_set_drvdata(&sch->dev, private);
- spin_lock_irq(sch->lock);
- sch->isc = VFIO_CCW_ISC;
- ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
- spin_unlock_irq(sch->lock);
+ ret = mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
if (ret)
goto out_free;
- private->state = VFIO_CCW_STATE_STANDBY;
-
- ret = vfio_ccw_mdev_reg(sch);
- if (ret)
- goto out_disable;
-
VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
sch->schid.cssid, sch->schid.ssid,
sch->schid.sch_no);
return 0;
-out_disable:
- cio_disable_subchannel(sch);
out_free:
dev_set_drvdata(&sch->dev, NULL);
vfio_ccw_free_private(private);
@@ -261,8 +241,7 @@ static void vfio_ccw_sch_remove(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
- vfio_ccw_sch_quiesce(sch);
- vfio_ccw_mdev_unreg(sch);
+ mdev_unregister_device(&sch->dev);
dev_set_drvdata(&sch->dev, NULL);
@@ -275,7 +254,10 @@ static void vfio_ccw_sch_remove(struct subchannel *sch)
static void vfio_ccw_sch_shutdown(struct subchannel *sch)
{
- vfio_ccw_sch_quiesce(sch);
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
}
/**
@@ -301,19 +283,11 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
if (work_pending(&sch->todo_work))
goto out_unlock;
- if (cio_update_schib(sch)) {
- vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
- rc = 0;
- goto out_unlock;
- }
-
- private = dev_get_drvdata(&sch->dev);
- if (private->state == VFIO_CCW_STATE_NOT_OPER) {
- private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
- VFIO_CCW_STATE_STANDBY;
- }
rc = 0;
+ if (cio_update_schib(sch))
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
@@ -358,8 +332,8 @@ static int vfio_ccw_chp_event(struct subchannel *sch,
return 0;
trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
- VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n",
- mdev_uuid(private->mdev), sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n",
+ sch->schid.cssid,
sch->schid.ssid, sch->schid.sch_no,
mask, event);
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 8483a266051c..a59c758869f8 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -10,7 +10,8 @@
*/
#include <linux/vfio.h>
-#include <linux/mdev.h>
+
+#include <asm/isc.h>
#include "ioasm.h"
#include "vfio_ccw_private.h"
@@ -161,8 +162,12 @@ static void fsm_notoper(struct vfio_ccw_private *private,
{
struct subchannel *sch = private->sch;
- VFIO_CCW_TRACE_EVENT(2, "notoper");
- VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: notoper event %x state %x\n",
+ sch->schid.cssid,
+ sch->schid.ssid,
+ sch->schid.sch_no,
+ event,
+ private->state);
/*
* TODO:
@@ -170,6 +175,9 @@ static void fsm_notoper(struct vfio_ccw_private *private,
*/
css_sched_sch_todo(sch, SCH_TODO_UNREG);
private->state = VFIO_CCW_STATE_NOT_OPER;
+
+ /* This is usually handled during CLOSE event */
+ cp_free(&private->cp);
}
/*
@@ -242,7 +250,6 @@ static void fsm_io_request(struct vfio_ccw_private *private,
union orb *orb;
union scsw *scsw = &private->scsw;
struct ccw_io_region *io_region = private->io_region;
- struct mdev_device *mdev = private->mdev;
char *errstr = "request";
struct subchannel_id schid = get_schid(private);
@@ -256,8 +263,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
if (orb->tm.b) {
io_region->ret_code = -EOPNOTSUPP;
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): transport mode\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: transport mode\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
errstr = "transport mode";
goto err_out;
@@ -265,8 +272,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = cp_init(&private->cp, orb);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): cp_init=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: cp_init=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp init";
@@ -276,8 +283,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = cp_prefetch(&private->cp);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: cp_prefetch=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp prefetch";
@@ -289,8 +296,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = fsm_io_helper(private);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: fsm_io_helper=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp fsm_io_helper";
@@ -300,16 +307,16 @@ static void fsm_io_request(struct vfio_ccw_private *private,
return;
} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): halt on io_region\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: halt on io_region\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
/* halt is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): clear on io_region\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: clear on io_region\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
/* clear is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
@@ -366,6 +373,54 @@ static void fsm_irq(struct vfio_ccw_private *private,
complete(private->completion);
}
+static void fsm_open(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+ int ret;
+
+ spin_lock_irq(sch->lock);
+ sch->isc = VFIO_CCW_ISC;
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (ret)
+ goto err_unlock;
+
+ private->state = VFIO_CCW_STATE_IDLE;
+ spin_unlock_irq(sch->lock);
+ return;
+
+err_unlock:
+ spin_unlock_irq(sch->lock);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+}
+
+static void fsm_close(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+ int ret;
+
+ spin_lock_irq(sch->lock);
+
+ if (!sch->schib.pmcw.ena)
+ goto err_unlock;
+
+ ret = cio_disable_subchannel(sch);
+ if (ret == -EBUSY)
+ ret = vfio_ccw_sch_quiesce(sch);
+ if (ret)
+ goto err_unlock;
+
+ private->state = VFIO_CCW_STATE_STANDBY;
+ spin_unlock_irq(sch->lock);
+ cp_free(&private->cp);
+ return;
+
+err_unlock:
+ spin_unlock_irq(sch->lock);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+}
+
/*
* Device statemachine
*/
@@ -375,29 +430,39 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_nop,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_nop,
},
[VFIO_CCW_STATE_STANDBY] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
- [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_open,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_notoper,
},
[VFIO_CCW_STATE_IDLE] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
[VFIO_CCW_STATE_CP_PROCESSING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
[VFIO_CCW_STATE_CP_PENDING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
};
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index b49e2e9db2dc..4a806a2273b5 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -21,54 +21,28 @@ static const struct vfio_device_ops vfio_ccw_dev_ops;
static int vfio_ccw_mdev_reset(struct vfio_ccw_private *private)
{
- struct subchannel *sch;
- int ret;
-
- sch = private->sch;
/*
- * TODO:
- * In the cureent stage, some things like "no I/O running" and "no
- * interrupt pending" are clear, but we are not sure what other state
- * we need to care about.
- * There are still a lot more instructions need to be handled. We
- * should come back here later.
+ * If the FSM state is seen as Not Operational after closing
+ * and re-opening the mdev, return an error.
*/
- ret = vfio_ccw_sch_quiesce(sch);
- if (ret)
- return ret;
-
- ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
- if (!ret)
- private->state = VFIO_CCW_STATE_IDLE;
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
+ if (private->state == VFIO_CCW_STATE_NOT_OPER)
+ return -EINVAL;
- return ret;
+ return 0;
}
-static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
+static void vfio_ccw_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length)
{
struct vfio_ccw_private *private =
- container_of(nb, struct vfio_ccw_private, nb);
-
- /*
- * Vendor drivers MUST unpin pages in response to an
- * invalidation.
- */
- if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
- struct vfio_iommu_type1_dma_unmap *unmap = data;
-
- if (!cp_iova_pinned(&private->cp, unmap->iova))
- return NOTIFY_OK;
+ container_of(vdev, struct vfio_ccw_private, vdev);
- if (vfio_ccw_mdev_reset(private))
- return NOTIFY_BAD;
+ /* Drivers MUST unpin pages in response to an invalidation. */
+ if (!cp_iova_pinned(&private->cp, iova, length))
+ return;
- cp_free(&private->cp);
- return NOTIFY_OK;
- }
-
- return NOTIFY_DONE;
+ vfio_ccw_mdev_reset(private);
}
static ssize_t name_show(struct mdev_type *mtype,
@@ -128,11 +102,8 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
vfio_init_group_dev(&private->vdev, &mdev->dev,
&vfio_ccw_dev_ops);
- private->mdev = mdev;
- private->state = VFIO_CCW_STATE_IDLE;
-
- VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n",
- mdev_uuid(mdev), private->sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: create\n",
+ private->sch->schid.cssid,
private->sch->schid.ssid,
private->sch->schid.sch_no);
@@ -145,8 +116,6 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
err_atomic:
vfio_uninit_group_dev(&private->vdev);
atomic_inc(&private->avail);
- private->mdev = NULL;
- private->state = VFIO_CCW_STATE_IDLE;
return ret;
}
@@ -154,23 +123,14 @@ static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
{
struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent);
- VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
- mdev_uuid(mdev), private->sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: remove\n",
+ private->sch->schid.cssid,
private->sch->schid.ssid,
private->sch->schid.sch_no);
vfio_unregister_group_dev(&private->vdev);
- if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
- (private->state != VFIO_CCW_STATE_STANDBY)) {
- if (!vfio_ccw_sch_quiesce(private->sch))
- private->state = VFIO_CCW_STATE_STANDBY;
- /* The state will be NOT_OPER on error. */
- }
-
vfio_uninit_group_dev(&private->vdev);
- cp_free(&private->cp);
- private->mdev = NULL;
atomic_inc(&private->avail);
}
@@ -178,19 +138,15 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
{
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
- unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
int ret;
- private->nb.notifier_call = vfio_ccw_mdev_notifier;
-
- ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY,
- &events, &private->nb);
- if (ret)
- return ret;
+ /* Device cannot simply be opened again from this state */
+ if (private->state == VFIO_CCW_STATE_NOT_OPER)
+ return -EINVAL;
ret = vfio_ccw_register_async_dev_regions(private);
if (ret)
- goto out_unregister;
+ return ret;
ret = vfio_ccw_register_schib_dev_regions(private);
if (ret)
@@ -200,11 +156,16 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
if (ret)
goto out_unregister;
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
+ if (private->state == VFIO_CCW_STATE_NOT_OPER) {
+ ret = -EINVAL;
+ goto out_unregister;
+ }
+
return ret;
out_unregister:
vfio_ccw_unregister_dev_regions(private);
- vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
return ret;
}
@@ -213,16 +174,8 @@ static void vfio_ccw_mdev_close_device(struct vfio_device *vdev)
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
- if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
- (private->state != VFIO_CCW_STATE_STANDBY)) {
- if (!vfio_ccw_mdev_reset(private))
- private->state = VFIO_CCW_STATE_STANDBY;
- /* The state will be NOT_OPER on error. */
- }
-
- cp_free(&private->cp);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
vfio_ccw_unregister_dev_regions(private);
- vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
}
static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
@@ -645,6 +598,7 @@ static const struct vfio_device_ops vfio_ccw_dev_ops = {
.write = vfio_ccw_mdev_write,
.ioctl = vfio_ccw_mdev_ioctl,
.request = vfio_ccw_mdev_request,
+ .dma_unmap = vfio_ccw_dma_unmap,
};
struct mdev_driver vfio_ccw_mdev_driver = {
@@ -657,13 +611,3 @@ struct mdev_driver vfio_ccw_mdev_driver = {
.remove = vfio_ccw_mdev_remove,
.supported_type_groups = mdev_type_groups,
};
-
-int vfio_ccw_mdev_reg(struct subchannel *sch)
-{
- return mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
-}
-
-void vfio_ccw_mdev_unreg(struct subchannel *sch)
-{
- mdev_unregister_device(&sch->dev);
-}
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 7272eb788612..cd24b7fada91 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -73,8 +73,6 @@ struct vfio_ccw_crw {
* @state: internal state of the device
* @completion: synchronization helper of the I/O completion
* @avail: available for creating a mediated device
- * @mdev: pointer to the mediated device
- * @nb: notifier for vfio events
* @io_region: MMIO region to input/output I/O arguments/results
* @io_mutex: protect against concurrent update of I/O regions
* @region: additional regions for other subchannel operations
@@ -97,8 +95,6 @@ struct vfio_ccw_private {
int state;
struct completion *completion;
atomic_t avail;
- struct mdev_device *mdev;
- struct notifier_block nb;
struct ccw_io_region *io_region;
struct mutex io_mutex;
struct vfio_ccw_region *region;
@@ -119,10 +115,7 @@ struct vfio_ccw_private {
struct work_struct crw_work;
} __aligned(8);
-extern int vfio_ccw_mdev_reg(struct subchannel *sch);
-extern void vfio_ccw_mdev_unreg(struct subchannel *sch);
-
-extern int vfio_ccw_sch_quiesce(struct subchannel *sch);
+int vfio_ccw_sch_quiesce(struct subchannel *sch);
extern struct mdev_driver vfio_ccw_mdev_driver;
@@ -147,6 +140,8 @@ enum vfio_ccw_event {
VFIO_CCW_EVENT_IO_REQ,
VFIO_CCW_EVENT_INTERRUPT,
VFIO_CCW_EVENT_ASYNC_REQ,
+ VFIO_CCW_EVENT_OPEN,
+ VFIO_CCW_EVENT_CLOSE,
/* last element! */
NR_VFIO_CCW_EVENTS
};
@@ -158,7 +153,7 @@ typedef void (fsm_func_t)(struct vfio_ccw_private *, enum vfio_ccw_event);
extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS];
static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
- int event)
+ enum vfio_ccw_event event)
{
trace_vfio_ccw_fsm_event(private->sch->schid, private->state, event);
vfio_ccw_jumptable[private->state][event](private, event);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 95cfdeab5baf..59ac98f2bd27 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -838,6 +838,17 @@ static void ap_bus_revise_bindings(void)
bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved);
}
+/**
+ * ap_owned_by_def_drv: indicates whether an AP adapter is reserved for the
+ * default host driver or not.
+ * @card: the APID of the adapter card to check
+ * @queue: the APQI of the queue to check
+ *
+ * Note: the ap_perms_mutex must be locked by the caller of this function.
+ *
+ * Return: an int specifying whether the AP adapter is reserved for the host (1)
+ * or not (0).
+ */
int ap_owned_by_def_drv(int card, int queue)
{
int rc = 0;
@@ -845,25 +856,31 @@ int ap_owned_by_def_drv(int card, int queue)
if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
return -EINVAL;
- mutex_lock(&ap_perms_mutex);
-
if (test_bit_inv(card, ap_perms.apm) &&
test_bit_inv(queue, ap_perms.aqm))
rc = 1;
- mutex_unlock(&ap_perms_mutex);
-
return rc;
}
EXPORT_SYMBOL(ap_owned_by_def_drv);
+/**
+ * ap_apqn_in_matrix_owned_by_def_drv: indicates whether every APQN contained in
+ * a set is reserved for the host drivers
+ * or not.
+ * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check
+ * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check
+ *
+ * Note: the ap_perms_mutex must be locked by the caller of this function.
+ *
+ * Return: an int specifying whether each APQN is reserved for the host (1) or
+ * not (0)
+ */
int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
unsigned long *aqm)
{
int card, queue, rc = 0;
- mutex_lock(&ap_perms_mutex);
-
for (card = 0; !rc && card < AP_DEVICES; card++)
if (test_bit_inv(card, apm) &&
test_bit_inv(card, ap_perms.apm))
@@ -872,8 +889,6 @@ int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
test_bit_inv(queue, ap_perms.aqm))
rc = 1;
- mutex_unlock(&ap_perms_mutex);
-
return rc;
}
EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv);
@@ -2071,6 +2086,9 @@ static inline void ap_scan_adapter(int ap)
*/
static bool ap_get_configuration(void)
{
+ if (!ap_qci_info) /* QCI not supported */
+ return false;
+
memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
ap_fetch_qci_info(ap_qci_info);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 0c40af157df2..0f17933954fb 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -148,12 +148,16 @@ struct ap_driver {
/*
* Called at the start of the ap bus scan function when
* the crypto config information (qci) has changed.
+ * This callback is not invoked if there is no AP
+ * QCI support available.
*/
void (*on_config_changed)(struct ap_config_info *new_config_info,
struct ap_config_info *old_config_info);
/*
* Called at the end of the ap bus scan function when
* the crypto config information (qci) has changed.
+ * This callback is not invoked if there is no AP
+ * QCI support available.
*/
void (*on_scan_complete)(struct ap_config_info *new_config_info,
struct ap_config_info *old_config_info);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index c48b0db824e3..a32457b4cbb8 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -34,7 +34,7 @@ static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
qirqctrl.ir = 1;
qirqctrl.isc = AP_ISC;
- status = ap_aqic(aq->qid, qirqctrl, ind);
+ status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_OTHERWISE_CHANGED:
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 7329caa7d467..5a05d1cdfec2 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -2115,5 +2115,5 @@ static void __exit pkey_exit(void)
pkey_debug_exit();
}
-module_cpu_feature_match(MSA, pkey_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init);
module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 4ac9c6521ec1..f43cfeabd2cc 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -18,9 +18,6 @@
#define VFIO_AP_ROOT_NAME "vfio_ap"
#define VFIO_AP_DEV_NAME "matrix"
-#define AP_QUEUE_ASSIGNED "assigned"
-#define AP_QUEUE_UNASSIGNED "unassigned"
-#define AP_QUEUE_IN_USE "in use"
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018");
@@ -46,120 +43,12 @@ static struct ap_device_id ap_queue_ids[] = {
{ /* end of sibling */ },
};
-static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
-{
- struct ap_matrix_mdev *matrix_mdev;
- unsigned long apid = AP_QID_CARD(q->apqn);
- unsigned long apqi = AP_QID_QUEUE(q->apqn);
-
- list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
- if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
- test_bit_inv(apqi, matrix_mdev->matrix.aqm))
- return matrix_mdev;
- }
-
- return NULL;
-}
-
-static ssize_t status_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- ssize_t nchars = 0;
- struct vfio_ap_queue *q;
- struct ap_matrix_mdev *matrix_mdev;
- struct ap_device *apdev = to_ap_dev(dev);
-
- mutex_lock(&matrix_dev->lock);
- q = dev_get_drvdata(&apdev->device);
- matrix_mdev = vfio_ap_mdev_for_queue(q);
-
- if (matrix_mdev) {
- if (matrix_mdev->kvm)
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_IN_USE);
- else
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_ASSIGNED);
- } else {
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_UNASSIGNED);
- }
-
- mutex_unlock(&matrix_dev->lock);
-
- return nchars;
-}
-
-static DEVICE_ATTR_RO(status);
-
-static struct attribute *vfio_queue_attrs[] = {
- &dev_attr_status.attr,
- NULL,
-};
-
-static const struct attribute_group vfio_queue_attr_group = {
- .attrs = vfio_queue_attrs,
-};
-
-/**
- * vfio_ap_queue_dev_probe: Allocate a vfio_ap_queue structure and associate it
- * with the device as driver_data.
- *
- * @apdev: the AP device being probed
- *
- * Return: returns 0 if the probe succeeded; otherwise, returns an error if
- * storage could not be allocated for a vfio_ap_queue object or the
- * sysfs 'status' attribute could not be created for the queue device.
- */
-static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
-{
- int ret;
- struct vfio_ap_queue *q;
-
- q = kzalloc(sizeof(*q), GFP_KERNEL);
- if (!q)
- return -ENOMEM;
-
- mutex_lock(&matrix_dev->lock);
- dev_set_drvdata(&apdev->device, q);
- q->apqn = to_ap_queue(&apdev->device)->qid;
- q->saved_isc = VFIO_AP_ISC_INVALID;
-
- ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
- if (ret) {
- dev_set_drvdata(&apdev->device, NULL);
- kfree(q);
- }
-
- mutex_unlock(&matrix_dev->lock);
-
- return ret;
-}
-
-/**
- * vfio_ap_queue_dev_remove: Free the associated vfio_ap_queue structure.
- *
- * @apdev: the AP device being removed
- *
- * Takes the matrix lock to avoid actions on this device while doing the remove.
- */
-static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
-{
- struct vfio_ap_queue *q;
-
- mutex_lock(&matrix_dev->lock);
- sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
- q = dev_get_drvdata(&apdev->device);
- vfio_ap_mdev_reset_queue(q, 1);
- dev_set_drvdata(&apdev->device, NULL);
- kfree(q);
- mutex_unlock(&matrix_dev->lock);
-}
-
static struct ap_driver vfio_ap_drv = {
- .probe = vfio_ap_queue_dev_probe,
- .remove = vfio_ap_queue_dev_remove,
+ .probe = vfio_ap_mdev_probe_queue,
+ .remove = vfio_ap_mdev_remove_queue,
+ .in_use = vfio_ap_mdev_resource_in_use,
+ .on_config_changed = vfio_ap_on_cfg_changed,
+ .on_scan_complete = vfio_ap_on_scan_complete,
.ids = ap_queue_ids,
};
@@ -212,8 +101,9 @@ static int vfio_ap_matrix_dev_create(void)
goto matrix_alloc_err;
}
- mutex_init(&matrix_dev->lock);
+ mutex_init(&matrix_dev->mdevs_lock);
INIT_LIST_HEAD(&matrix_dev->mdev_list);
+ mutex_init(&matrix_dev->guests_lock);
dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
matrix_dev->device.parent = root_device;
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index a7d2a95796d3..6c8c41fac4e1 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -26,44 +26,193 @@
#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
-static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
+#define AP_QUEUE_ASSIGNED "assigned"
+#define AP_QUEUE_UNASSIGNED "unassigned"
+#define AP_QUEUE_IN_USE "in use"
+
+static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable);
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
+static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, unsigned int retry);
+
+/**
+ * get_update_locks_for_kvm: Acquire the locks required to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @kvm is NULL, the KVM lock will not be taken.
+ */
+static inline void get_update_locks_for_kvm(struct kvm *kvm)
+{
+ mutex_lock(&matrix_dev->guests_lock);
+ if (kvm)
+ mutex_lock(&kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
+
+/**
+ * release_update_locks_for_kvm: Release the locks used to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
+ *
+ * The proper unlocking order is:
+ * 1. matrix_dev->mdevs_lock
+ * 2. kvm->lock
+ * 3. matrix_dev->guests_lock
+ *
+ * Note: If @kvm is NULL, the KVM lock will not be released.
+ */
+static inline void release_update_locks_for_kvm(struct kvm *kvm)
+{
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ if (kvm)
+ mutex_unlock(&kvm->lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+}
+
+/**
+ * get_update_locks_for_mdev: Acquire the locks required to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
+ * configuration data to use to update a KVM guest's APCB.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
+ * lock will not be taken.
+ */
+static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
+{
+ mutex_lock(&matrix_dev->guests_lock);
+ if (matrix_mdev && matrix_mdev->kvm)
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
+
+/**
+ * release_update_locks_for_mdev: Release the locks used to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
+ * configuration data to use to update a KVM guest's APCB.
+ *
+ * The proper unlocking order is:
+ * 1. matrix_dev->mdevs_lock
+ * 2. matrix_mdev->kvm->lock
+ * 3. matrix_dev->guests_lock
+ *
+ * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
+ * lock will not be released.
+ */
+static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
+{
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ if (matrix_mdev && matrix_mdev->kvm)
+ mutex_unlock(&matrix_mdev->kvm->lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+}
-static int match_apqn(struct device *dev, const void *data)
+/**
+ * get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and
+ * acquire the locks required to update the APCB of
+ * the KVM guest to which the mdev is attached.
+ *
+ * @apqn: the APQN of a queue device.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock
+ * will not be taken.
+ *
+ * Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn
+ * is not assigned to an ap_matrix_mdev.
+ */
+static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn)
{
- struct vfio_ap_queue *q = dev_get_drvdata(dev);
+ struct ap_matrix_mdev *matrix_mdev;
+
+ mutex_lock(&matrix_dev->guests_lock);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) &&
+ test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) {
+ if (matrix_mdev->kvm)
+ mutex_lock(&matrix_mdev->kvm->lock);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ return matrix_mdev;
+ }
+ }
- return (q->apqn == *(int *)(data)) ? 1 : 0;
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ return NULL;
}
/**
- * vfio_ap_get_queue - retrieve a queue with a specific APQN from a list
- * @matrix_mdev: the associated mediated matrix
- * @apqn: The queue APQN
+ * get_update_locks_for_queue: get the locks required to update the APCB of the
+ * KVM guest to which the matrix mdev linked to a
+ * vfio_ap_queue object is attached.
+ *
+ * @q: a pointer to a vfio_ap_queue object.
*
- * Retrieve a queue with a specific APQN from the list of the
- * devices of the vfio_ap_drv.
- * Verify that the APID and the APQI are set in the matrix.
+ * The proper locking order is:
+ * 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a
+ * KVM guest's APCB.
+ * 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev
+ *
+ * Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock
+ * will not be taken.
+ */
+static inline void get_update_locks_for_queue(struct vfio_ap_queue *q)
+{
+ mutex_lock(&matrix_dev->guests_lock);
+ if (q->matrix_mdev && q->matrix_mdev->kvm)
+ mutex_lock(&q->matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
+
+/**
+ * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a
+ * hash table of queues assigned to a matrix mdev
+ * @matrix_mdev: the matrix mdev
+ * @apqn: The APQN of a queue device
*
- * Return: the pointer to the associated vfio_ap_queue
+ * Return: the pointer to the vfio_ap_queue struct representing the queue or
+ * NULL if the queue is not assigned to @matrix_mdev
*/
-static struct vfio_ap_queue *vfio_ap_get_queue(
+static struct vfio_ap_queue *vfio_ap_mdev_get_queue(
struct ap_matrix_mdev *matrix_mdev,
int apqn)
{
struct vfio_ap_queue *q;
- if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
- return NULL;
- if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
- return NULL;
-
- q = vfio_ap_find_queue(apqn);
- if (q)
- q->matrix_mdev = matrix_mdev;
+ hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode,
+ apqn) {
+ if (q && q->apqn == apqn)
+ return q;
+ }
- return q;
+ return NULL;
}
/**
@@ -112,7 +261,7 @@ static void vfio_ap_wait_for_irqclear(int apqn)
*
* Unregisters the ISC in the GIB when the saved ISC not invalid.
* Unpins the guest's page holding the NIB when it exists.
- * Resets the saved_pfn and saved_isc to invalid values.
+ * Resets the saved_iova and saved_isc to invalid values.
*/
static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
{
@@ -123,9 +272,9 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
q->saved_isc = VFIO_AP_ISC_INVALID;
}
- if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) {
- vfio_unpin_pages(&q->matrix_mdev->vdev, &q->saved_pfn, 1);
- q->saved_pfn = 0;
+ if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) {
+ vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1);
+ q->saved_iova = 0;
}
}
@@ -154,7 +303,7 @@ static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
int retries = 5;
do {
- status = ap_aqic(q->apqn, aqic_gisa, NULL);
+ status = ap_aqic(q->apqn, aqic_gisa, 0);
switch (status.response_code) {
case AP_RESPONSE_OTHERWISE_CHANGED:
case AP_RESPONSE_NORMAL:
@@ -180,7 +329,6 @@ static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
status.response_code);
end_free:
vfio_ap_free_aqic_resources(q);
- q->matrix_mdev = NULL;
return status;
}
@@ -189,27 +337,19 @@ end_free:
*
* @vcpu: the object representing the vcpu executing the PQAP(AQIC) instruction.
* @nib: the location for storing the nib address.
- * @g_pfn: the location for storing the page frame number of the page containing
- * the nib.
*
* When the PQAP(AQIC) instruction is executed, general register 2 contains the
* address of the notification indicator byte (nib) used for IRQ notification.
- * This function parses the nib from gr2 and calculates the page frame
- * number for the guest of the page containing the nib. The values are
- * stored in @nib and @g_pfn respectively.
- *
- * The g_pfn of the nib is then validated to ensure the nib address is valid.
+ * This function parses and validates the nib from gr2.
*
* Return: returns zero if the nib address is a valid; otherwise, returns
* -EINVAL.
*/
-static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, unsigned long *nib,
- unsigned long *g_pfn)
+static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
{
*nib = vcpu->run->s.regs.gprs[2];
- *g_pfn = *nib >> PAGE_SHIFT;
- if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *g_pfn)))
+ if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
return -EINVAL;
return 0;
@@ -239,33 +379,34 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
int isc,
struct kvm_vcpu *vcpu)
{
- unsigned long nib;
struct ap_qirq_ctrl aqic_gisa = {};
struct ap_queue_status status = {};
struct kvm_s390_gisa *gisa;
+ struct page *h_page;
int nisc;
struct kvm *kvm;
- unsigned long h_nib, g_pfn, h_pfn;
+ phys_addr_t h_nib;
+ dma_addr_t nib;
int ret;
/* Verify that the notification indicator byte address is valid */
- if (vfio_ap_validate_nib(vcpu, &nib, &g_pfn)) {
- VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%#lx, g_pfn=%#lx, apqn=%#04x\n",
- __func__, nib, g_pfn, q->apqn);
+ if (vfio_ap_validate_nib(vcpu, &nib)) {
+ VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%pad, apqn=%#04x\n",
+ __func__, &nib, q->apqn);
status.response_code = AP_RESPONSE_INVALID_ADDRESS;
return status;
}
- ret = vfio_pin_pages(&q->matrix_mdev->vdev, &g_pfn, 1,
- IOMMU_READ | IOMMU_WRITE, &h_pfn);
+ ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1,
+ IOMMU_READ | IOMMU_WRITE, &h_page);
switch (ret) {
case 1:
break;
default:
VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d,"
- "nib=%#lx, g_pfn=%#lx, apqn=%#04x\n",
- __func__, ret, nib, g_pfn, q->apqn);
+ "nib=%pad, apqn=%#04x\n",
+ __func__, ret, &nib, q->apqn);
status.response_code = AP_RESPONSE_INVALID_ADDRESS;
return status;
@@ -274,7 +415,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
kvm = q->matrix_mdev->kvm;
gisa = kvm->arch.gisa_int.origin;
- h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK);
+ h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK);
aqic_gisa.gisc = isc;
nisc = kvm_s390_gisc_register(kvm, isc);
@@ -290,17 +431,17 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
aqic_gisa.ir = 1;
aqic_gisa.gisa = (uint64_t)gisa >> 4;
- status = ap_aqic(q->apqn, aqic_gisa, (void *)h_nib);
+ status = ap_aqic(q->apqn, aqic_gisa, h_nib);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
/* See if we did clear older IRQ configuration */
vfio_ap_free_aqic_resources(q);
- q->saved_pfn = g_pfn;
+ q->saved_iova = nib;
q->saved_isc = isc;
break;
case AP_RESPONSE_OTHERWISE_CHANGED:
/* We could not modify IRQ setings: clear new configuration */
- vfio_unpin_pages(&q->matrix_mdev->vdev, &g_pfn, 1);
+ vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
kvm_s390_gisc_unregister(kvm, isc);
break;
default:
@@ -406,10 +547,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
- mutex_lock(&matrix_dev->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
if (!vcpu->kvm->arch.crypto.pqap_hook) {
VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n",
__func__, apqn);
+
goto out_unlock;
}
@@ -425,7 +568,7 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
goto out_unlock;
}
- q = vfio_ap_get_queue(matrix_mdev, apqn);
+ q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
if (!q) {
VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n",
__func__, AP_QID_CARD(apqn),
@@ -444,7 +587,7 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
out_unlock:
memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
vcpu->run->s.regs.gprs[1] >>= 32;
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return 0;
}
@@ -456,6 +599,91 @@ static void vfio_ap_matrix_init(struct ap_config_info *info,
matrix->adm_max = info->apxa ? info->Nd : 15;
}
+static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev)
+{
+ if (matrix_mdev->kvm)
+ kvm_arch_crypto_set_masks(matrix_mdev->kvm,
+ matrix_mdev->shadow_apcb.apm,
+ matrix_mdev->shadow_apcb.aqm,
+ matrix_mdev->shadow_apcb.adm);
+}
+
+static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
+{
+ DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS);
+
+ bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS);
+ bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm,
+ (unsigned long *)matrix_dev->info.adm, AP_DOMAINS);
+
+ return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm,
+ AP_DOMAINS);
+}
+
+/*
+ * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev
+ * to ensure no queue devices are passed through to
+ * the guest that are not bound to the vfio_ap
+ * device driver.
+ *
+ * @matrix_mdev: the matrix mdev whose matrix is to be filtered.
+ *
+ * Note: If an APQN referencing a queue device that is not bound to the vfio_ap
+ * driver, its APID will be filtered from the guest's APCB. The matrix
+ * structure precludes filtering an individual APQN, so its APID will be
+ * filtered.
+ *
+ * Return: a boolean value indicating whether the KVM guest's APCB was changed
+ * by the filtering or not.
+ */
+static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+ struct ap_matrix_mdev *matrix_mdev)
+{
+ unsigned long apid, apqi, apqn;
+ DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
+ DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS);
+ struct vfio_ap_queue *q;
+
+ bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
+ bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
+
+ /*
+ * Copy the adapters, domains and control domains to the shadow_apcb
+ * from the matrix mdev, but only those that are assigned to the host's
+ * AP configuration.
+ */
+ bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm,
+ (unsigned long *)matrix_dev->info.apm, AP_DEVICES);
+ bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
+ (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
+
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
+ /*
+ * If the APQN is not bound to the vfio_ap device
+ * driver, then we can't assign it to the guest's
+ * AP configuration. The AP architecture won't
+ * allow filtering of a single APQN, so let's filter
+ * the APID since an adapter represents a physical
+ * hardware device.
+ */
+ apqn = AP_MKQID(apid, apqi);
+ q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
+ if (!q || q->reset_rc) {
+ clear_bit_inv(apid,
+ matrix_mdev->shadow_apcb.apm);
+ break;
+ }
+ }
+ }
+
+ return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm,
+ AP_DEVICES) ||
+ !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm,
+ AP_DOMAINS);
+}
+
static int vfio_ap_mdev_probe(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev;
@@ -475,20 +703,19 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev)
matrix_mdev->mdev = mdev;
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
matrix_mdev->pqap_hook = handle_pqap;
- mutex_lock(&matrix_dev->lock);
- list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
- mutex_unlock(&matrix_dev->lock);
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
+ hash_init(matrix_mdev->qtable.queues);
ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
if (ret)
goto err_list;
dev_set_drvdata(&mdev->dev, matrix_mdev);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return 0;
err_list:
- mutex_lock(&matrix_dev->lock);
- list_del(&matrix_mdev->node);
- mutex_unlock(&matrix_dev->lock);
vfio_uninit_group_dev(&matrix_mdev->vdev);
kfree(matrix_mdev);
err_dec_available:
@@ -496,16 +723,62 @@ err_dec_available:
return ret;
}
+static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev,
+ struct vfio_ap_queue *q)
+{
+ if (q) {
+ q->matrix_mdev = matrix_mdev;
+ hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn);
+ }
+}
+
+static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn)
+{
+ struct vfio_ap_queue *q;
+
+ q = vfio_ap_find_queue(apqn);
+ vfio_ap_mdev_link_queue(matrix_mdev, q);
+}
+
+static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q)
+{
+ hash_del(&q->mdev_qnode);
+}
+
+static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q)
+{
+ q->matrix_mdev = NULL;
+}
+
+static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev)
+{
+ struct vfio_ap_queue *q;
+ unsigned long apid, apqi;
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ AP_DOMAINS) {
+ q = vfio_ap_mdev_get_queue(matrix_mdev,
+ AP_MKQID(apid, apqi));
+ if (q)
+ q->matrix_mdev = NULL;
+ }
+ }
+}
+
static void vfio_ap_mdev_remove(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
vfio_unregister_group_dev(&matrix_mdev->vdev);
- mutex_lock(&matrix_dev->lock);
- vfio_ap_mdev_reset_queues(matrix_mdev);
+ mutex_lock(&matrix_dev->guests_lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
+ vfio_ap_mdev_unlink_fr_queues(matrix_mdev);
list_del(&matrix_mdev->node);
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_dev->guests_lock);
vfio_uninit_group_dev(&matrix_mdev->vdev);
kfree(matrix_mdev);
atomic_inc(&matrix_dev->available_instances);
@@ -554,141 +827,48 @@ static struct attribute_group *vfio_ap_mdev_type_groups[] = {
NULL,
};
-struct vfio_ap_queue_reserved {
- unsigned long *apid;
- unsigned long *apqi;
- bool reserved;
-};
-
-/**
- * vfio_ap_has_queue - determines if the AP queue containing the target in @data
- *
- * @dev: an AP queue device
- * @data: a struct vfio_ap_queue_reserved reference
- *
- * Flags whether the AP queue device (@dev) has a queue ID containing the APQN,
- * apid or apqi specified in @data:
- *
- * - If @data contains both an apid and apqi value, then @data will be flagged
- * as reserved if the APID and APQI fields for the AP queue device matches
- *
- * - If @data contains only an apid value, @data will be flagged as
- * reserved if the APID field in the AP queue device matches
- *
- * - If @data contains only an apqi value, @data will be flagged as
- * reserved if the APQI field in the AP queue device matches
- *
- * Return: 0 to indicate the input to function succeeded. Returns -EINVAL if
- * @data does not contain either an apid or apqi.
- */
-static int vfio_ap_has_queue(struct device *dev, void *data)
-{
- struct vfio_ap_queue_reserved *qres = data;
- struct ap_queue *ap_queue = to_ap_queue(dev);
- ap_qid_t qid;
- unsigned long id;
-
- if (qres->apid && qres->apqi) {
- qid = AP_MKQID(*qres->apid, *qres->apqi);
- if (qid == ap_queue->qid)
- qres->reserved = true;
- } else if (qres->apid && !qres->apqi) {
- id = AP_QID_CARD(ap_queue->qid);
- if (id == *qres->apid)
- qres->reserved = true;
- } else if (!qres->apid && qres->apqi) {
- id = AP_QID_QUEUE(ap_queue->qid);
- if (id == *qres->apqi)
- qres->reserved = true;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * vfio_ap_verify_queue_reserved - verifies that the AP queue containing
- * @apid or @aqpi is reserved
- *
- * @apid: an AP adapter ID
- * @apqi: an AP queue index
- *
- * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device
- * driver according to the following rules:
- *
- * - If both @apid and @apqi are not NULL, then there must be an AP queue
- * device bound to the vfio_ap driver with the APQN identified by @apid and
- * @apqi
- *
- * - If only @apid is not NULL, then there must be an AP queue device bound
- * to the vfio_ap driver with an APQN containing @apid
- *
- * - If only @apqi is not NULL, then there must be an AP queue device bound
- * to the vfio_ap driver with an APQN containing @apqi
- *
- * Return: 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
- */
-static int vfio_ap_verify_queue_reserved(unsigned long *apid,
- unsigned long *apqi)
-{
- int ret;
- struct vfio_ap_queue_reserved qres;
-
- qres.apid = apid;
- qres.apqi = apqi;
- qres.reserved = false;
-
- ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL,
- &qres, vfio_ap_has_queue);
- if (ret)
- return ret;
-
- if (qres.reserved)
- return 0;
-
- return -EADDRNOTAVAIL;
-}
+#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \
+ "already assigned to %s"
-static int
-vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
- unsigned long apid)
+static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *apm,
+ unsigned long *aqm)
{
- int ret;
- unsigned long apqi;
- unsigned long nbits = matrix_mdev->matrix.aqm_max + 1;
-
- if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits)
- return vfio_ap_verify_queue_reserved(&apid, NULL);
-
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) {
- ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
- if (ret)
- return ret;
- }
+ unsigned long apid, apqi;
+ const struct device *dev = mdev_dev(matrix_mdev->mdev);
+ const char *mdev_name = dev_name(dev);
- return 0;
+ for_each_set_bit_inv(apid, apm, AP_DEVICES)
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS)
+ dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name);
}
/**
- * vfio_ap_mdev_verify_no_sharing - verifies that the AP matrix is not configured
+ * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs
*
- * @matrix_mdev: the mediated matrix device
+ * @mdev_apm: mask indicating the APIDs of the APQNs to be verified
+ * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified
*
- * Verifies that the APQNs derived from the cross product of the AP adapter IDs
- * and AP queue indexes comprising the AP matrix are not configured for another
+ * Verifies that each APQN derived from the Cartesian product of a bitmap of
+ * AP adapter IDs and AP queue indexes is not configured for any matrix
* mediated device. AP queue sharing is not allowed.
*
- * Return: 0 if the APQNs are not shared; otherwise returns -EADDRINUSE.
+ * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE.
*/
-static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
+static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
+ unsigned long *mdev_aqm)
{
- struct ap_matrix_mdev *lstdev;
+ struct ap_matrix_mdev *matrix_mdev;
DECLARE_BITMAP(apm, AP_DEVICES);
DECLARE_BITMAP(aqm, AP_DOMAINS);
- list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) {
- if (matrix_mdev == lstdev)
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ /*
+ * If the input apm and aqm are fields of the matrix_mdev
+ * object, then move on to the next matrix_mdev.
+ */
+ if (mdev_apm == matrix_mdev->matrix.apm &&
+ mdev_aqm == matrix_mdev->matrix.aqm)
continue;
memset(apm, 0, sizeof(apm));
@@ -698,14 +878,16 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
* We work on full longs, as we can only exclude the leftover
* bits in non-inverse order. The leftover is all zeros.
*/
- if (!bitmap_and(apm, matrix_mdev->matrix.apm,
- lstdev->matrix.apm, AP_DEVICES))
+ if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm,
+ AP_DEVICES))
continue;
- if (!bitmap_and(aqm, matrix_mdev->matrix.aqm,
- lstdev->matrix.aqm, AP_DOMAINS))
+ if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm,
+ AP_DOMAINS))
continue;
+ vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm);
+
return -EADDRINUSE;
}
@@ -713,6 +895,41 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
}
/**
+ * vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are
+ * not reserved for the default zcrypt driver and
+ * are not assigned to another mdev.
+ *
+ * @matrix_mdev: the mdev to which the APQNs being validated are assigned.
+ *
+ * Return: One of the following values:
+ * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function,
+ * most likely -EBUSY indicating the ap_perms_mutex lock is already held.
+ * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the
+ * zcrypt default driver.
+ * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev
+ * o A zero indicating validation succeeded.
+ */
+static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev)
+{
+ if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm))
+ return -EADDRNOTAVAIL;
+
+ return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm);
+}
+
+static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ unsigned long apqi;
+
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS)
+ vfio_ap_mdev_link_apqn(matrix_mdev,
+ AP_MKQID(apid, apqi));
+}
+
+/**
* assign_adapter_store - parses the APID from @buf and sets the
* corresponding bit in the mediated matrix device's APM
*
@@ -741,6 +958,10 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
* An APQN derived from the cross product of the APID being assigned
* and the APQIs previously assigned is being used by another mediated
* matrix device
+ *
+ * 5. -EAGAIN
+ * A lock required to validate the mdev's AP configuration could not
+ * be obtained.
*/
static ssize_t assign_adapter_store(struct device *dev,
struct device_attribute *attr,
@@ -748,15 +969,11 @@ static ssize_t assign_adapter_store(struct device *dev,
{
int ret;
unsigned long apid;
+ DECLARE_BITMAP(apm_delta, AP_DEVICES);
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow assignment of adapter */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ mutex_lock(&ap_perms_mutex);
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apid);
if (ret)
@@ -767,33 +984,97 @@ static ssize_t assign_adapter_store(struct device *dev,
goto done;
}
- /*
- * Set the bit in the AP mask (APM) corresponding to the AP adapter
- * number (APID). The bits in the mask, from most significant to least
- * significant bit, correspond to APIDs 0-255.
- */
- ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
- if (ret)
+ set_bit_inv(apid, matrix_mdev->matrix.apm);
+
+ ret = vfio_ap_mdev_validate_masks(matrix_mdev);
+ if (ret) {
+ clear_bit_inv(apid, matrix_mdev->matrix.apm);
goto done;
+ }
- set_bit_inv(apid, matrix_mdev->matrix.apm);
+ vfio_ap_mdev_link_adapter(matrix_mdev, apid);
+ memset(apm_delta, 0, sizeof(apm_delta));
+ set_bit_inv(apid, apm_delta);
- ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
- if (ret)
- goto share_err;
+ if (vfio_ap_mdev_filter_matrix(apm_delta,
+ matrix_mdev->matrix.aqm, matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
ret = count;
- goto done;
-
-share_err:
- clear_bit_inv(apid, matrix_mdev->matrix.apm);
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
+ mutex_unlock(&ap_perms_mutex);
return ret;
}
static DEVICE_ATTR_WO(assign_adapter);
+static struct vfio_ap_queue
+*vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid, unsigned long apqi)
+{
+ struct vfio_ap_queue *q = NULL;
+
+ q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
+ /* If the queue is assigned to the matrix mdev, unlink it. */
+ if (q)
+ vfio_ap_unlink_queue_fr_mdev(q);
+
+ return q;
+}
+
+/**
+ * vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned
+ * adapter from the matrix mdev to which the
+ * adapter was assigned.
+ * @matrix_mdev: the matrix mediated device to which the adapter was assigned.
+ * @apid: the APID of the unassigned adapter.
+ * @qtable: table for storing queues associated with unassigned adapter.
+ */
+static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid,
+ struct ap_queue_table *qtable)
+{
+ unsigned long apqi;
+ struct vfio_ap_queue *q;
+
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+ if (q && qtable) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+ hash_add(qtable->queues, &q->mdev_qnode,
+ q->apqn);
+ }
+ }
+}
+
+static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ int loop_cursor;
+ struct vfio_ap_queue *q;
+ struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
+
+ hash_init(qtable->queues);
+ vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, qtable);
+
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+ vfio_ap_mdev_reset_queues(qtable);
+
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+ hash_del(&q->mdev_qnode);
+ }
+
+ kfree(qtable);
+}
+
/**
* unassign_adapter_store - parses the APID from @buf and clears the
* corresponding bit in the mediated matrix device's APM
@@ -817,13 +1098,7 @@ static ssize_t unassign_adapter_store(struct device *dev,
unsigned long apid;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow unassignment of adapter */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apid);
if (ret)
@@ -835,31 +1110,22 @@ static ssize_t unassign_adapter_store(struct device *dev,
}
clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
+ vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid);
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_adapter);
-static int
-vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
- unsigned long apqi)
+static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
{
- int ret;
unsigned long apid;
- unsigned long nbits = matrix_mdev->matrix.apm_max + 1;
-
- if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits)
- return vfio_ap_verify_queue_reserved(NULL, &apqi);
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) {
- ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
- if (ret)
- return ret;
- }
-
- return 0;
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES)
+ vfio_ap_mdev_link_apqn(matrix_mdev,
+ AP_MKQID(apid, apqi));
}
/**
@@ -891,6 +1157,10 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
* An APQN derived from the cross product of the APQI being assigned
* and the APIDs previously assigned is being used by another mediated
* matrix device
+ *
+ * 5. -EAGAIN
+ * The lock required to validate the mdev's AP configuration could not
+ * be obtained.
*/
static ssize_t assign_domain_store(struct device *dev,
struct device_attribute *attr,
@@ -898,47 +1168,89 @@ static ssize_t assign_domain_store(struct device *dev,
{
int ret;
unsigned long apqi;
+ DECLARE_BITMAP(aqm_delta, AP_DOMAINS);
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
-
- mutex_lock(&matrix_dev->lock);
- /* If the KVM guest is running, disallow assignment of domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ mutex_lock(&ap_perms_mutex);
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apqi);
if (ret)
goto done;
- if (apqi > max_apqi) {
+
+ if (apqi > matrix_mdev->matrix.aqm_max) {
ret = -ENODEV;
goto done;
}
- ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
- if (ret)
+ set_bit_inv(apqi, matrix_mdev->matrix.aqm);
+
+ ret = vfio_ap_mdev_validate_masks(matrix_mdev);
+ if (ret) {
+ clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
goto done;
+ }
- set_bit_inv(apqi, matrix_mdev->matrix.aqm);
+ vfio_ap_mdev_link_domain(matrix_mdev, apqi);
+ memset(aqm_delta, 0, sizeof(aqm_delta));
+ set_bit_inv(apqi, aqm_delta);
- ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
- if (ret)
- goto share_err;
+ if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta,
+ matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
ret = count;
- goto done;
-
-share_err:
- clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
+ mutex_unlock(&ap_perms_mutex);
return ret;
}
static DEVICE_ATTR_WO(assign_domain);
+static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi,
+ struct ap_queue_table *qtable)
+{
+ unsigned long apid;
+ struct vfio_ap_queue *q;
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+ if (q && qtable) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+ hash_add(qtable->queues, &q->mdev_qnode,
+ q->apqn);
+ }
+ }
+}
+
+static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
+{
+ int loop_cursor;
+ struct vfio_ap_queue *q;
+ struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
+
+ hash_init(qtable->queues);
+ vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, qtable);
+
+ if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+ clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+ vfio_ap_mdev_reset_queues(qtable);
+
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+ hash_del(&q->mdev_qnode);
+ }
+
+ kfree(qtable);
+}
/**
* unassign_domain_store - parses the APQI from @buf and clears the
@@ -963,13 +1275,7 @@ static ssize_t unassign_domain_store(struct device *dev,
unsigned long apqi;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow unassignment of domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apqi);
if (ret)
@@ -981,10 +1287,11 @@ static ssize_t unassign_domain_store(struct device *dev,
}
clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
+ vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi);
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_domain);
@@ -1011,13 +1318,7 @@ static ssize_t assign_control_domain_store(struct device *dev,
unsigned long id;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow assignment of control domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &id);
if (ret)
@@ -1034,9 +1335,12 @@ static ssize_t assign_control_domain_store(struct device *dev,
* number of control domains that can be assigned.
*/
set_bit_inv(id, matrix_mdev->matrix.adm);
+ if (vfio_ap_mdev_filter_cdoms(matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(assign_control_domain);
@@ -1062,28 +1366,28 @@ static ssize_t unassign_control_domain_store(struct device *dev,
int ret;
unsigned long domid;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- unsigned long max_domid = matrix_mdev->matrix.adm_max;
-
- mutex_lock(&matrix_dev->lock);
- /* If a KVM guest is running, disallow unassignment of control domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &domid);
if (ret)
goto done;
- if (domid > max_domid) {
+
+ if (domid > matrix_mdev->matrix.adm_max) {
ret = -ENODEV;
goto done;
}
clear_bit_inv(domid, matrix_mdev->matrix.adm);
+
+ if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) {
+ clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_control_domain);
@@ -1099,40 +1403,36 @@ static ssize_t control_domains_show(struct device *dev,
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
unsigned long max_domid = matrix_mdev->matrix.adm_max;
- mutex_lock(&matrix_dev->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
n = sprintf(bufpos, "%04lx\n", id);
bufpos += n;
nchars += n;
}
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
}
static DEVICE_ATTR_RO(control_domains);
-static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
{
- struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
char *bufpos = buf;
unsigned long apid;
unsigned long apqi;
unsigned long apid1;
unsigned long apqi1;
- unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1;
- unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1;
+ unsigned long napm_bits = matrix->apm_max + 1;
+ unsigned long naqm_bits = matrix->aqm_max + 1;
int nchars = 0;
int n;
- apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits);
- apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits);
-
- mutex_lock(&matrix_dev->lock);
+ apid1 = find_first_bit_inv(matrix->apm, napm_bits);
+ apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits);
if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
+ for_each_set_bit_inv(apqi, matrix->aqm,
naqm_bits) {
n = sprintf(bufpos, "%02lx.%04lx\n", apid,
apqi);
@@ -1141,25 +1441,50 @@ static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
}
}
} else if (apid1 < napm_bits) {
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
+ for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
n = sprintf(bufpos, "%02lx.\n", apid);
bufpos += n;
nchars += n;
}
} else if (apqi1 < naqm_bits) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) {
+ for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) {
n = sprintf(bufpos, ".%04lx\n", apqi);
bufpos += n;
nchars += n;
}
}
- mutex_unlock(&matrix_dev->lock);
+ return nchars;
+}
+
+static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t nchars;
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
}
static DEVICE_ATTR_RO(matrix);
+static ssize_t guest_matrix_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t nchars;
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+
+ return nchars;
+}
+static DEVICE_ATTR_RO(guest_matrix);
+
static struct attribute *vfio_ap_mdev_attrs[] = {
&dev_attr_assign_adapter.attr,
&dev_attr_unassign_adapter.attr,
@@ -1169,6 +1494,7 @@ static struct attribute *vfio_ap_mdev_attrs[] = {
&dev_attr_unassign_control_domain.attr,
&dev_attr_control_domains.attr,
&dev_attr_matrix.attr,
+ &dev_attr_guest_matrix.attr,
NULL,
};
@@ -1201,59 +1527,32 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
up_write(&kvm->arch.crypto.pqap_hook_rwsem);
- mutex_lock(&kvm->lock);
- mutex_lock(&matrix_dev->lock);
+ get_update_locks_for_kvm(kvm);
list_for_each_entry(m, &matrix_dev->mdev_list, node) {
if (m != matrix_mdev && m->kvm == kvm) {
- mutex_unlock(&kvm->lock);
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_kvm(kvm);
return -EPERM;
}
}
kvm_get_kvm(kvm);
matrix_mdev->kvm = kvm;
- kvm_arch_crypto_set_masks(kvm,
- matrix_mdev->matrix.apm,
- matrix_mdev->matrix.aqm,
- matrix_mdev->matrix.adm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
- mutex_unlock(&kvm->lock);
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_kvm(kvm);
}
return 0;
}
-/**
- * vfio_ap_mdev_iommu_notifier - IOMMU notifier callback
- *
- * @nb: The notifier block
- * @action: Action to be taken
- * @data: data associated with the request
- *
- * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
- * pinned before). Other requests are ignored.
- *
- * Return: for an UNMAP request, NOFITY_OK; otherwise NOTIFY_DONE.
- */
-static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
+static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova,
+ u64 length)
{
- struct ap_matrix_mdev *matrix_mdev;
-
- matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier);
-
- if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
- struct vfio_iommu_type1_dma_unmap *unmap = data;
- unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
-
- vfio_unpin_pages(&matrix_mdev->vdev, &g_pfn, 1);
- return NOTIFY_OK;
- }
+ struct ap_matrix_mdev *matrix_mdev =
+ container_of(vdev, struct ap_matrix_mdev, vdev);
- return NOTIFY_DONE;
+ vfio_unpin_pages(&matrix_mdev->vdev, iova, 1);
}
/**
@@ -1271,36 +1570,36 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
kvm->arch.crypto.pqap_hook = NULL;
up_write(&kvm->arch.crypto.pqap_hook_rwsem);
- mutex_lock(&kvm->lock);
- mutex_lock(&matrix_dev->lock);
+ get_update_locks_for_kvm(kvm);
kvm_arch_crypto_clear_masks(kvm);
- vfio_ap_mdev_reset_queues(matrix_mdev);
+ vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
kvm_put_kvm(kvm);
matrix_mdev->kvm = NULL;
- mutex_unlock(&kvm->lock);
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_kvm(kvm);
}
}
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
{
- struct device *dev;
+ struct ap_queue *queue;
struct vfio_ap_queue *q = NULL;
- dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
- &apqn, match_apqn);
- if (dev) {
- q = dev_get_drvdata(dev);
- put_device(dev);
- }
+ queue = ap_get_qdev(apqn);
+ if (!queue)
+ return NULL;
+
+ if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver)
+ q = dev_get_drvdata(&queue->ap_dev.device);
+
+ put_device(&queue->ap_dev.device);
return q;
}
-int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
- unsigned int retry)
+static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
+ unsigned int retry)
{
struct ap_queue_status status;
int ret;
@@ -1308,9 +1607,9 @@ int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
if (!q)
return 0;
-
retry_zapq:
status = ap_zapq(q->apqn);
+ q->reset_rc = status.response_code;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
ret = 0;
@@ -1325,12 +1624,17 @@ retry_zapq:
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
- WARN_ON_ONCE(status.irq_enabled);
+ WARN_ONCE(status.irq_enabled,
+ "PQAP/ZAPQ for %02x.%04x failed with rc=%u while IRQ enabled",
+ AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
+ status.response_code);
ret = -EBUSY;
goto free_resources;
default:
/* things are really broken, give up */
- WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n",
+ WARN(true,
+ "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n",
+ AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
status.response_code);
return -EIO;
}
@@ -1342,7 +1646,8 @@ retry_zapq:
msleep(20);
status = ap_tapq(q->apqn, NULL);
}
- WARN_ON_ONCE(retry2 <= 0);
+ WARN_ONCE(retry2 <= 0, "unable to verify reset of queue %02x.%04x",
+ AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn));
free_resources:
vfio_ap_free_aqic_resources(q);
@@ -1350,27 +1655,20 @@ free_resources:
return ret;
}
-static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev)
+static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable)
{
- int ret;
- int rc = 0;
- unsigned long apid, apqi;
+ int ret, loop_cursor, rc = 0;
struct vfio_ap_queue *q;
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
- matrix_mdev->matrix.apm_max + 1) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
- matrix_mdev->matrix.aqm_max + 1) {
- q = vfio_ap_find_queue(AP_MKQID(apid, apqi));
- ret = vfio_ap_mdev_reset_queue(q, 1);
- /*
- * Regardless whether a queue turns out to be busy, or
- * is not operational, we need to continue resetting
- * the remaining queues.
- */
- if (ret)
- rc = ret;
- }
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+ ret = vfio_ap_mdev_reset_queue(q, 1);
+ /*
+ * Regardless whether a queue turns out to be busy, or
+ * is not operational, we need to continue resetting
+ * the remaining queues.
+ */
+ if (ret)
+ rc = ret;
}
return rc;
@@ -1380,27 +1678,11 @@ static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
{
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
- unsigned long events;
- int ret;
if (!vdev->kvm)
return -EINVAL;
- ret = vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
- if (ret)
- return ret;
-
- matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
- events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY, &events,
- &matrix_mdev->iommu_notifier);
- if (ret)
- goto err_kvm;
- return 0;
-
-err_kvm:
- vfio_ap_mdev_unset_kvm(matrix_mdev);
- return ret;
+ return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
}
static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
@@ -1408,8 +1690,6 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
- vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY,
- &matrix_mdev->iommu_notifier);
vfio_ap_mdev_unset_kvm(matrix_mdev);
}
@@ -1440,27 +1720,84 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
container_of(vdev, struct ap_matrix_mdev, vdev);
int ret;
- mutex_lock(&matrix_dev->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
ret = vfio_ap_mdev_get_device_info(arg);
break;
case VFIO_DEVICE_RESET:
- ret = vfio_ap_mdev_reset_queues(matrix_mdev);
+ ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
break;
default:
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return ret;
}
+static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+ unsigned long apid = AP_QID_CARD(q->apqn);
+ unsigned long apqi = AP_QID_QUEUE(q->apqn);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
+ test_bit_inv(apqi, matrix_mdev->matrix.aqm))
+ return matrix_mdev;
+ }
+
+ return NULL;
+}
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t nchars = 0;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev;
+ struct ap_device *apdev = to_ap_dev(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ q = dev_get_drvdata(&apdev->device);
+ matrix_mdev = vfio_ap_mdev_for_queue(q);
+
+ if (matrix_mdev) {
+ if (matrix_mdev->kvm)
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_IN_USE);
+ else
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_ASSIGNED);
+ } else {
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_UNASSIGNED);
+ }
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+
+ return nchars;
+}
+
+static DEVICE_ATTR_RO(status);
+
+static struct attribute *vfio_queue_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+
+static const struct attribute_group vfio_queue_attr_group = {
+ .attrs = vfio_queue_attrs,
+};
+
static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
.open_device = vfio_ap_mdev_open_device,
.close_device = vfio_ap_mdev_close_device,
.ioctl = vfio_ap_mdev_ioctl,
+ .dma_unmap = vfio_ap_mdev_dma_unmap,
};
static struct mdev_driver vfio_ap_matrix_driver = {
@@ -1500,3 +1837,432 @@ void vfio_ap_mdev_unregister(void)
mdev_unregister_device(&matrix_dev->device);
mdev_unregister_driver(&vfio_ap_matrix_driver);
}
+
+int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
+{
+ int ret;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev;
+
+ ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
+ if (ret)
+ return ret;
+
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ q->apqn = to_ap_queue(&apdev->device)->qid;
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ matrix_mdev = get_update_locks_by_apqn(q->apqn);
+
+ if (matrix_mdev) {
+ vfio_ap_mdev_link_queue(matrix_mdev, q);
+
+ if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm,
+ matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+ dev_set_drvdata(&apdev->device, q);
+ release_update_locks_for_mdev(matrix_mdev);
+
+ return 0;
+}
+
+void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
+{
+ unsigned long apid, apqi;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev;
+
+ sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
+ q = dev_get_drvdata(&apdev->device);
+ get_update_locks_for_queue(q);
+ matrix_mdev = q->matrix_mdev;
+
+ if (matrix_mdev) {
+ vfio_ap_unlink_queue_fr_mdev(q);
+
+ apid = AP_QID_CARD(q->apqn);
+ apqi = AP_QID_QUEUE(q->apqn);
+
+ /*
+ * If the queue is assigned to the guest's APCB, then remove
+ * the adapter's APID from the APCB and hot it into the guest.
+ */
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+ }
+
+ vfio_ap_mdev_reset_queue(q, 1);
+ dev_set_drvdata(&apdev->device, NULL);
+ kfree(q);
+ release_update_locks_for_mdev(matrix_mdev);
+}
+
+/**
+ * vfio_ap_mdev_resource_in_use: check whether any of a set of APQNs is
+ * assigned to a mediated device under the control
+ * of the vfio_ap device driver.
+ *
+ * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check.
+ * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check.
+ *
+ * Return:
+ * * -EADDRINUSE if one or more of the APQNs specified via @apm/@aqm are
+ * assigned to a mediated device under the control of the vfio_ap
+ * device driver.
+ * * Otherwise, return 0.
+ */
+int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm)
+{
+ int ret;
+
+ mutex_lock(&matrix_dev->guests_lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ ret = vfio_ap_mdev_verify_no_sharing(apm, aqm);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+
+ return ret;
+}
+
+/**
+ * vfio_ap_mdev_hot_unplug_cfg - hot unplug the adapters, domains and control
+ * domains that have been removed from the host's
+ * AP configuration from a guest.
+ *
+ * @matrix_mdev: an ap_matrix_mdev object attached to a KVM guest.
+ * @aprem: the adapters that have been removed from the host's AP configuration
+ * @aqrem: the domains that have been removed from the host's AP configuration
+ * @cdrem: the control domains that have been removed from the host's AP
+ * configuration.
+ */
+static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *aprem,
+ unsigned long *aqrem,
+ unsigned long *cdrem)
+{
+ int do_hotplug = 0;
+
+ if (!bitmap_empty(aprem, AP_DEVICES)) {
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm,
+ matrix_mdev->shadow_apcb.apm,
+ aprem, AP_DEVICES);
+ }
+
+ if (!bitmap_empty(aqrem, AP_DOMAINS)) {
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm,
+ matrix_mdev->shadow_apcb.aqm,
+ aqrem, AP_DEVICES);
+ }
+
+ if (!bitmap_empty(cdrem, AP_DOMAINS))
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm,
+ matrix_mdev->shadow_apcb.adm,
+ cdrem, AP_DOMAINS);
+
+ if (do_hotplug)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+}
+
+/**
+ * vfio_ap_mdev_cfg_remove - determines which guests are using the adapters,
+ * domains and control domains that have been removed
+ * from the host AP configuration and unplugs them
+ * from those guests.
+ *
+ * @ap_remove: bitmap specifying which adapters have been removed from the host
+ * config.
+ * @aq_remove: bitmap specifying which domains have been removed from the host
+ * config.
+ * @cd_remove: bitmap specifying which control domains have been removed from
+ * the host config.
+ */
+static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove,
+ unsigned long *aq_remove,
+ unsigned long *cd_remove)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+ DECLARE_BITMAP(aprem, AP_DEVICES);
+ DECLARE_BITMAP(aqrem, AP_DOMAINS);
+ DECLARE_BITMAP(cdrem, AP_DOMAINS);
+ int do_remove = 0;
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ do_remove |= bitmap_and(aprem, ap_remove,
+ matrix_mdev->matrix.apm,
+ AP_DEVICES);
+ do_remove |= bitmap_and(aqrem, aq_remove,
+ matrix_mdev->matrix.aqm,
+ AP_DOMAINS);
+ do_remove |= bitmap_andnot(cdrem, cd_remove,
+ matrix_mdev->matrix.adm,
+ AP_DOMAINS);
+
+ if (do_remove)
+ vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem,
+ cdrem);
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_mdev->kvm->lock);
+ }
+}
+
+/**
+ * vfio_ap_mdev_on_cfg_remove - responds to the removal of adapters, domains and
+ * control domains from the host AP configuration
+ * by unplugging them from the guests that are
+ * using them.
+ * @cur_config_info: the current host AP configuration information
+ * @prev_config_info: the previous host AP configuration information
+ */
+static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info,
+ struct ap_config_info *prev_config_info)
+{
+ int do_remove;
+ DECLARE_BITMAP(aprem, AP_DEVICES);
+ DECLARE_BITMAP(aqrem, AP_DOMAINS);
+ DECLARE_BITMAP(cdrem, AP_DOMAINS);
+
+ do_remove = bitmap_andnot(aprem,
+ (unsigned long *)prev_config_info->apm,
+ (unsigned long *)cur_config_info->apm,
+ AP_DEVICES);
+ do_remove |= bitmap_andnot(aqrem,
+ (unsigned long *)prev_config_info->aqm,
+ (unsigned long *)cur_config_info->aqm,
+ AP_DEVICES);
+ do_remove |= bitmap_andnot(cdrem,
+ (unsigned long *)prev_config_info->adm,
+ (unsigned long *)cur_config_info->adm,
+ AP_DEVICES);
+
+ if (do_remove)
+ vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem);
+}
+
+/**
+ * vfio_ap_filter_apid_by_qtype: filter APIDs from an AP mask for adapters that
+ * are older than AP type 10 (CEX4).
+ * @apm: a bitmap of the APIDs to examine
+ * @aqm: a bitmap of the APQIs of the queues to query for the AP type.
+ */
+static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
+{
+ bool apid_cleared;
+ struct ap_queue_status status;
+ unsigned long apid, apqi, info;
+ int qtype, qtype_mask = 0xff000000;
+
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+ apid_cleared = false;
+
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
+ status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info);
+ switch (status.response_code) {
+ /*
+ * According to the architecture in each case
+ * below, the queue's info should be filled.
+ */
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_BUSY:
+ qtype = info & qtype_mask;
+
+ /*
+ * The vfio_ap device driver only
+ * supports CEX4 and newer adapters, so
+ * remove the APID if the adapter is
+ * older than a CEX4.
+ */
+ if (qtype < AP_DEVICE_TYPE_CEX4) {
+ clear_bit_inv(apid, apm);
+ apid_cleared = true;
+ }
+
+ break;
+
+ default:
+ /*
+ * If we don't know the adapter type,
+ * clear its APID since it can't be
+ * determined whether the vfio_ap
+ * device driver supports it.
+ */
+ clear_bit_inv(apid, apm);
+ apid_cleared = true;
+ break;
+ }
+
+ /*
+ * If we've already cleared the APID from the apm, there
+ * is no need to continue examining the remainin AP
+ * queues to determine the type of the adapter.
+ */
+ if (apid_cleared)
+ continue;
+ }
+ }
+}
+
+/**
+ * vfio_ap_mdev_cfg_add - store bitmaps specifying the adapters, domains and
+ * control domains that have been added to the host's
+ * AP configuration for each matrix mdev to which they
+ * are assigned.
+ *
+ * @apm_add: a bitmap specifying the adapters that have been added to the AP
+ * configuration.
+ * @aqm_add: a bitmap specifying the domains that have been added to the AP
+ * configuration.
+ * @adm_add: a bitmap specifying the control domains that have been added to the
+ * AP configuration.
+ */
+static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add,
+ unsigned long *adm_add)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ if (list_empty(&matrix_dev->mdev_list))
+ return;
+
+ vfio_ap_filter_apid_by_qtype(apm_add, aqm_add);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ bitmap_and(matrix_mdev->apm_add,
+ matrix_mdev->matrix.apm, apm_add, AP_DEVICES);
+ bitmap_and(matrix_mdev->aqm_add,
+ matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS);
+ bitmap_and(matrix_mdev->adm_add,
+ matrix_mdev->matrix.adm, adm_add, AP_DEVICES);
+ }
+}
+
+/**
+ * vfio_ap_mdev_on_cfg_add - responds to the addition of adapters, domains and
+ * control domains to the host AP configuration
+ * by updating the bitmaps that specify what adapters,
+ * domains and control domains have been added so they
+ * can be hot plugged into the guest when the AP bus
+ * scan completes (see vfio_ap_on_scan_complete
+ * function).
+ * @cur_config_info: the current AP configuration information
+ * @prev_config_info: the previous AP configuration information
+ */
+static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info,
+ struct ap_config_info *prev_config_info)
+{
+ bool do_add;
+ DECLARE_BITMAP(apm_add, AP_DEVICES);
+ DECLARE_BITMAP(aqm_add, AP_DOMAINS);
+ DECLARE_BITMAP(adm_add, AP_DOMAINS);
+
+ do_add = bitmap_andnot(apm_add,
+ (unsigned long *)cur_config_info->apm,
+ (unsigned long *)prev_config_info->apm,
+ AP_DEVICES);
+ do_add |= bitmap_andnot(aqm_add,
+ (unsigned long *)cur_config_info->aqm,
+ (unsigned long *)prev_config_info->aqm,
+ AP_DOMAINS);
+ do_add |= bitmap_andnot(adm_add,
+ (unsigned long *)cur_config_info->adm,
+ (unsigned long *)prev_config_info->adm,
+ AP_DOMAINS);
+
+ if (do_add)
+ vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add);
+}
+
+/**
+ * vfio_ap_on_cfg_changed - handles notification of changes to the host AP
+ * configuration.
+ *
+ * @cur_cfg_info: the current host AP configuration
+ * @prev_cfg_info: the previous host AP configuration
+ */
+void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info,
+ struct ap_config_info *prev_cfg_info)
+{
+ if (!cur_cfg_info || !prev_cfg_info)
+ return;
+
+ mutex_lock(&matrix_dev->guests_lock);
+
+ vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info);
+ vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info);
+ memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info));
+
+ mutex_unlock(&matrix_dev->guests_lock);
+}
+
+static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
+{
+ bool do_hotplug = false;
+ int filter_domains = 0;
+ int filter_adapters = 0;
+ DECLARE_BITMAP(apm, AP_DEVICES);
+ DECLARE_BITMAP(aqm, AP_DOMAINS);
+
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ filter_adapters = bitmap_and(apm, matrix_mdev->matrix.apm,
+ matrix_mdev->apm_add, AP_DEVICES);
+ filter_domains = bitmap_and(aqm, matrix_mdev->matrix.aqm,
+ matrix_mdev->aqm_add, AP_DOMAINS);
+
+ if (filter_adapters && filter_domains)
+ do_hotplug |= vfio_ap_mdev_filter_matrix(apm, aqm, matrix_mdev);
+ else if (filter_adapters)
+ do_hotplug |=
+ vfio_ap_mdev_filter_matrix(apm,
+ matrix_mdev->shadow_apcb.aqm,
+ matrix_mdev);
+ else
+ do_hotplug |=
+ vfio_ap_mdev_filter_matrix(matrix_mdev->shadow_apcb.apm,
+ aqm, matrix_mdev);
+
+ if (bitmap_intersects(matrix_mdev->matrix.adm, matrix_mdev->adm_add,
+ AP_DOMAINS))
+ do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
+
+ if (do_hotplug)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_mdev->kvm->lock);
+}
+
+void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ mutex_lock(&matrix_dev->guests_lock);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) &&
+ bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) &&
+ bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS))
+ continue;
+
+ vfio_ap_mdev_hot_plug_cfg(matrix_mdev);
+ bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES);
+ bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS);
+ bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS);
+ }
+
+ mutex_unlock(&matrix_dev->guests_lock);
+}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index a26efd804d0d..d782cf463eab 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -19,6 +19,7 @@
#include <linux/mutex.h>
#include <linux/kvm_host.h>
#include <linux/vfio.h>
+#include <linux/hashtable.h>
#include "ap_bus.h"
@@ -32,20 +33,26 @@
* @available_instances: number of mediated matrix devices that can be created
* @info: the struct containing the output from the PQAP(QCI) instruction
* @mdev_list: the list of mediated matrix devices created
- * @lock: mutex for locking the AP matrix device. This lock will be
+ * @mdevs_lock: mutex for locking the AP matrix device. This lock will be
* taken every time we fiddle with state managed by the vfio_ap
* driver, be it using @mdev_list or writing the state of a
* single ap_matrix_mdev device. It's quite coarse but we don't
* expect much contention.
* @vfio_ap_drv: the vfio_ap device driver
+ * @guests_lock: mutex for controlling access to a guest that is using AP
+ * devices passed through by the vfio_ap device driver. This lock
+ * will be taken when the AP devices are plugged into or unplugged
+ * from a guest, and when an ap_matrix_mdev device is added to or
+ * removed from @mdev_list or the list is iterated.
*/
struct ap_matrix_dev {
struct device device;
atomic_t available_instances;
struct ap_config_info info;
struct list_head mdev_list;
- struct mutex lock;
+ struct mutex mdevs_lock; /* serializes access to each ap_matrix_mdev */
struct ap_driver *vfio_ap_drv;
+ struct mutex guests_lock; /* serializes access to each KVM guest */
};
extern struct ap_matrix_dev *matrix_dev;
@@ -75,48 +82,77 @@ struct ap_matrix {
};
/**
+ * struct ap_queue_table - a table of queue objects.
+ *
+ * @queues: a hashtable of queues (struct vfio_ap_queue).
+ */
+struct ap_queue_table {
+ DECLARE_HASHTABLE(queues, 8);
+};
+
+/**
* struct ap_matrix_mdev - Contains the data associated with a matrix mediated
* device.
* @vdev: the vfio device
* @node: allows the ap_matrix_mdev struct to be added to a list
* @matrix: the adapters, usage domains and control domains assigned to the
* mediated matrix device.
- * @iommu_notifier: notifier block used for specifying callback function for
- * handling the VFIO_IOMMU_NOTIFY_DMA_UNMAP even
+ * @shadow_apcb: the shadow copy of the APCB field of the KVM guest's CRYCB
* @kvm: the struct holding guest's state
* @pqap_hook: the function pointer to the interception handler for the
* PQAP(AQIC) instruction.
* @mdev: the mediated device
+ * @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev
+ * @apm_add: bitmap of APIDs added to the host's AP configuration
+ * @aqm_add: bitmap of APQIs added to the host's AP configuration
+ * @adm_add: bitmap of control domain numbers added to the host's AP
+ * configuration
*/
struct ap_matrix_mdev {
struct vfio_device vdev;
struct list_head node;
struct ap_matrix matrix;
- struct notifier_block iommu_notifier;
+ struct ap_matrix shadow_apcb;
struct kvm *kvm;
crypto_hook pqap_hook;
struct mdev_device *mdev;
+ struct ap_queue_table qtable;
+ DECLARE_BITMAP(apm_add, AP_DEVICES);
+ DECLARE_BITMAP(aqm_add, AP_DOMAINS);
+ DECLARE_BITMAP(adm_add, AP_DOMAINS);
};
/**
* struct vfio_ap_queue - contains the data associated with a queue bound to the
* vfio_ap device driver
* @matrix_mdev: the matrix mediated device
- * @saved_pfn: the guest PFN pinned for the guest
+ * @saved_iova: the notification indicator byte (nib) address
* @apqn: the APQN of the AP queue device
* @saved_isc: the guest ISC registered with the GIB interface
+ * @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable
+ * @reset_rc: the status response code from the last reset of the queue
*/
struct vfio_ap_queue {
struct ap_matrix_mdev *matrix_mdev;
- unsigned long saved_pfn;
+ dma_addr_t saved_iova;
int apqn;
#define VFIO_AP_ISC_INVALID 0xff
unsigned char saved_isc;
+ struct hlist_node mdev_qnode;
+ unsigned int reset_rc;
};
int vfio_ap_mdev_register(void);
void vfio_ap_mdev_unregister(void);
-int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
- unsigned int retry);
+
+int vfio_ap_mdev_probe_queue(struct ap_device *queue);
+void vfio_ap_mdev_remove_queue(struct ap_device *queue);
+
+int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm);
+
+void vfio_ap_on_cfg_changed(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info);
+void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info);
#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 35d4b398c197..8bd9fd51208c 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -763,6 +763,49 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
ipa_name, com, CARD_DEVID(card));
}
+static void qeth_default_link_info(struct qeth_card *card)
+{
+ struct qeth_link_info *link_info = &card->info.link_info;
+
+ QETH_CARD_TEXT(card, 2, "dftlinfo");
+ link_info->duplex = DUPLEX_FULL;
+
+ if (IS_IQD(card) || IS_VM_NIC(card)) {
+ link_info->speed = SPEED_10000;
+ link_info->port = PORT_FIBRE;
+ link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
+ } else {
+ switch (card->info.link_type) {
+ case QETH_LINK_TYPE_FAST_ETH:
+ case QETH_LINK_TYPE_LANE_ETH100:
+ link_info->speed = SPEED_100;
+ link_info->port = PORT_TP;
+ break;
+ case QETH_LINK_TYPE_GBIT_ETH:
+ case QETH_LINK_TYPE_LANE_ETH1000:
+ link_info->speed = SPEED_1000;
+ link_info->port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_10GBIT_ETH:
+ link_info->speed = SPEED_10000;
+ link_info->port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_25GBIT_ETH:
+ link_info->speed = SPEED_25000;
+ link_info->port = PORT_FIBRE;
+ break;
+ default:
+ dev_info(&card->gdev->dev,
+ "Unknown link type %x\n",
+ card->info.link_type);
+ link_info->speed = SPEED_UNKNOWN;
+ link_info->port = PORT_OTHER;
+ }
+
+ link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
+ }
+}
+
static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
{
@@ -790,6 +833,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
netdev_name(card->dev), card->info.chpid);
qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
netif_carrier_off(card->dev);
+ qeth_default_link_info(card);
}
return NULL;
case IPA_CMD_STARTLAN:
@@ -4744,92 +4788,6 @@ out_free:
return rc;
}
-static int qeth_query_card_info_cb(struct qeth_card *card,
- struct qeth_reply *reply, unsigned long data)
-{
- struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
- struct qeth_link_info *link_info = reply->param;
- struct qeth_query_card_info *card_info;
-
- QETH_CARD_TEXT(card, 2, "qcrdincb");
- if (qeth_setadpparms_inspect_rc(cmd))
- return -EIO;
-
- card_info = &cmd->data.setadapterparms.data.card_info;
- netdev_dbg(card->dev,
- "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
- card_info->card_type, card_info->port_mode,
- card_info->port_speed);
-
- switch (card_info->port_mode) {
- case CARD_INFO_PORTM_FULLDUPLEX:
- link_info->duplex = DUPLEX_FULL;
- break;
- case CARD_INFO_PORTM_HALFDUPLEX:
- link_info->duplex = DUPLEX_HALF;
- break;
- default:
- link_info->duplex = DUPLEX_UNKNOWN;
- }
-
- switch (card_info->card_type) {
- case CARD_INFO_TYPE_1G_COPPER_A:
- case CARD_INFO_TYPE_1G_COPPER_B:
- link_info->speed = SPEED_1000;
- link_info->port = PORT_TP;
- break;
- case CARD_INFO_TYPE_1G_FIBRE_A:
- case CARD_INFO_TYPE_1G_FIBRE_B:
- link_info->speed = SPEED_1000;
- link_info->port = PORT_FIBRE;
- break;
- case CARD_INFO_TYPE_10G_FIBRE_A:
- case CARD_INFO_TYPE_10G_FIBRE_B:
- link_info->speed = SPEED_10000;
- link_info->port = PORT_FIBRE;
- break;
- default:
- switch (card_info->port_speed) {
- case CARD_INFO_PORTS_10M:
- link_info->speed = SPEED_10;
- break;
- case CARD_INFO_PORTS_100M:
- link_info->speed = SPEED_100;
- break;
- case CARD_INFO_PORTS_1G:
- link_info->speed = SPEED_1000;
- break;
- case CARD_INFO_PORTS_10G:
- link_info->speed = SPEED_10000;
- break;
- case CARD_INFO_PORTS_25G:
- link_info->speed = SPEED_25000;
- break;
- default:
- link_info->speed = SPEED_UNKNOWN;
- }
-
- link_info->port = PORT_OTHER;
- }
-
- return 0;
-}
-
-int qeth_query_card_info(struct qeth_card *card,
- struct qeth_link_info *link_info)
-{
- struct qeth_cmd_buffer *iob;
-
- QETH_CARD_TEXT(card, 2, "qcrdinfo");
- if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
- return -EOPNOTSUPP;
- iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
- if (!iob)
- return -ENOMEM;
-
- return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
-}
-
static int qeth_init_link_info_oat_cb(struct qeth_card *card,
struct qeth_reply *reply_priv,
unsigned long data)
@@ -4839,6 +4797,7 @@ static int qeth_init_link_info_oat_cb(struct qeth_card *card,
struct qeth_query_oat_physical_if *phys_if;
struct qeth_query_oat_reply *reply;
+ QETH_CARD_TEXT(card, 2, "qoatincb");
if (qeth_setadpparms_inspect_rc(cmd))
return -EIO;
@@ -4918,41 +4877,7 @@ static int qeth_init_link_info_oat_cb(struct qeth_card *card,
static void qeth_init_link_info(struct qeth_card *card)
{
- card->info.link_info.duplex = DUPLEX_FULL;
-
- if (IS_IQD(card) || IS_VM_NIC(card)) {
- card->info.link_info.speed = SPEED_10000;
- card->info.link_info.port = PORT_FIBRE;
- card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
- } else {
- switch (card->info.link_type) {
- case QETH_LINK_TYPE_FAST_ETH:
- case QETH_LINK_TYPE_LANE_ETH100:
- card->info.link_info.speed = SPEED_100;
- card->info.link_info.port = PORT_TP;
- break;
- case QETH_LINK_TYPE_GBIT_ETH:
- case QETH_LINK_TYPE_LANE_ETH1000:
- card->info.link_info.speed = SPEED_1000;
- card->info.link_info.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_10GBIT_ETH:
- card->info.link_info.speed = SPEED_10000;
- card->info.link_info.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_25GBIT_ETH:
- card->info.link_info.speed = SPEED_25000;
- card->info.link_info.port = PORT_FIBRE;
- break;
- default:
- dev_info(&card->gdev->dev, "Unknown link type %x\n",
- card->info.link_type);
- card->info.link_info.speed = SPEED_UNKNOWN;
- card->info.link_info.port = PORT_OTHER;
- }
-
- card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
- }
+ qeth_default_link_info(card);
/* Get more accurate data via QUERY OAT: */
if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
@@ -5461,6 +5386,7 @@ int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
qeth_clear_working_pool_list(card);
qeth_flush_local_addrs(card);
card->info.promisc_mode = 0;
+ qeth_default_link_info(card);
rc = qeth_stop_channel(&card->data);
rc2 = qeth_stop_channel(&card->write);
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index b0b36b2132fe..9eba0a32e9f9 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -428,8 +428,8 @@ static int qeth_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct qeth_card *card = netdev->ml_priv;
- struct qeth_link_info link_info;
+ QETH_CARD_TEXT(card, 4, "ethtglks");
cmd->base.speed = card->info.link_info.speed;
cmd->base.duplex = card->info.link_info.duplex;
cmd->base.port = card->info.link_info.port;
@@ -439,16 +439,6 @@ static int qeth_get_link_ksettings(struct net_device *netdev,
cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
- /* Check if we can obtain more accurate information. */
- if (!qeth_query_card_info(card, &link_info)) {
- if (link_info.speed != SPEED_UNKNOWN)
- cmd->base.speed = link_info.speed;
- if (link_info.duplex != DUPLEX_UNKNOWN)
- cmd->base.duplex = link_info.duplex;
- if (link_info.port != PORT_OTHER)
- cmd->base.port = link_info.port;
- }
-
qeth_set_ethtool_link_modes(cmd, card->info.link_info.link_mode);
return 0;
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 511bf8e0a436..b61acbb09be3 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -145,27 +145,33 @@ void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
{
+ int ret = -EIO;
+
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
- if (zfcp_fsf_open_wka_port(wka_port))
+ if (zfcp_fsf_open_wka_port(wka_port)) {
+ /* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+ goto out;
+ }
}
- mutex_unlock(&wka_port->mutex);
-
- wait_event(wka_port->completion_wq,
+ wait_event(wka_port->opened,
wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
atomic_inc(&wka_port->refcount);
- return 0;
+ ret = 0;
+ goto out;
}
- return -EIO;
+out:
+ mutex_unlock(&wka_port->mutex);
+ return ret;
}
static void zfcp_fc_wka_port_offline(struct work_struct *work)
@@ -181,9 +187,12 @@ static void zfcp_fc_wka_port_offline(struct work_struct *work)
wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
if (zfcp_fsf_close_wka_port(wka_port)) {
+ /* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
- wake_up(&wka_port->completion_wq);
+ goto out;
}
+ wait_event(wka_port->closed,
+ wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
out:
mutex_unlock(&wka_port->mutex);
}
@@ -193,13 +202,15 @@ static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
/* wait 10 milliseconds, other reqs might pop in */
- schedule_delayed_work(&wka_port->work, HZ / 100);
+ queue_delayed_work(wka_port->adapter->work_queue, &wka_port->work,
+ msecs_to_jiffies(10));
}
static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
struct zfcp_adapter *adapter)
{
- init_waitqueue_head(&wka_port->completion_wq);
+ init_waitqueue_head(&wka_port->opened);
+ init_waitqueue_head(&wka_port->closed);
wka_port->adapter = adapter;
wka_port->d_id = d_id;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 8aaf409ce9cb..97755407ce1b 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -185,7 +185,8 @@ enum zfcp_fc_wka_status {
/**
* struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
* @adapter: Pointer to adapter structure this WKA port belongs to
- * @completion_wq: Wait for completion of open/close command
+ * @opened: Wait for completion of open command
+ * @closed: Wait for completion of close command
* @status: Current status of WKA port
* @refcount: Reference count to keep port open as long as it is in use
* @d_id: FC destination id or well-known-address
@@ -195,7 +196,8 @@ enum zfcp_fc_wka_status {
*/
struct zfcp_fc_wka_port {
struct zfcp_adapter *adapter;
- wait_queue_head_t completion_wq;
+ wait_queue_head_t opened;
+ wait_queue_head_t closed;
enum zfcp_fc_wka_status status;
atomic_t refcount;
u32 d_id;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 4f1e4385ce58..19223b075568 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1907,7 +1907,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
}
out:
- wake_up(&wka_port->completion_wq);
+ wake_up(&wka_port->opened);
}
/**
@@ -1966,7 +1966,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
}
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
- wake_up(&wka_port->completion_wq);
+ wake_up(&wka_port->closed);
}
/**
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index aa96f67dd0b1..a10dbe632ef9 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -532,6 +532,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
err = -ENOMEM;
goto out_err;
}
+
+ vq->num_max = info->num;
+
/* it may have been reduced */
info->num = virtqueue_get_vring_size(vq);
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 90253208a72f..3d9c56ac8224 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -1712,7 +1712,7 @@ static unsigned char FlashPoint_InterruptPending(void *pCurrCard)
static int FlashPoint_HandleInterrupt(void *pcard)
{
struct sccb *currSCCB;
- unsigned char thisCard, result, bm_status, bm_int_st;
+ unsigned char thisCard, result, bm_status;
unsigned short hp_int;
unsigned char i, target;
struct sccb_card *pCurrCard = pcard;
@@ -1723,7 +1723,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
MDISABLE_INT(ioport);
- if ((bm_int_st = RD_HARPOON(ioport + hp_int_status)) & EXT_STATUS_ON)
+ if (RD_HARPOON(ioport + hp_int_status) & EXT_STATUS_ON)
bm_status = RD_HARPOON(ioport + hp_ext_status) &
(unsigned char)BAD_EXT_STATUS;
else
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 244fc27215dc..631eda2d467e 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -16,6 +16,7 @@
#include <linux/poll.h>
#include <linux/sched/signal.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <asm/xive.h>
#include <misc/ocxl.h>
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index bbcb901f6926..9857dba09c95 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -182,6 +182,15 @@ void scsi_remove_host(struct Scsi_Host *shost)
mutex_unlock(&shost->scan_mutex);
scsi_proc_host_rm(shost);
+ /*
+ * New SCSI devices cannot be attached anymore because of the SCSI host
+ * state so drop the tag set refcnt. Wait until the tag set refcnt drops
+ * to zero because .exit_cmd_priv implementations may need the host
+ * pointer.
+ */
+ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
+ wait_for_completion(&shost->tagset_freed);
+
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_DEL))
BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
@@ -236,10 +245,18 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
shost->dma_dev = dma_dev;
+ if (dma_dev->dma_mask) {
+ shost->max_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_max_mapping_size(dma_dev) >> SECTOR_SHIFT);
+ }
+
error = scsi_mq_setup_tags(shost);
if (error)
goto fail;
+ kref_init(&shost->tagset_refcnt);
+ init_completion(&shost->tagset_freed);
+
/*
* Increase usage count temporarily here so that calling
* scsi_autopm_put_host() will trigger runtime idle if there is
@@ -312,6 +329,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
+ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
fail:
return error;
}
@@ -345,9 +363,6 @@ static void scsi_host_dev_release(struct device *dev)
kfree(dev_name(&shost->shost_dev));
}
- if (shost->tag_set.tags)
- scsi_mq_destroy_tags(shost);
-
kfree(shost->shost_data);
ida_free(&host_index_ida, shost->host_no);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 4a0eadd1c22c..55a1ad6eed03 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -7948,6 +7948,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* The lpfc_wq workqueue for deferred irq use */
phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+ if (!phba->wq)
+ return -ENOMEM;
/*
* Initialize timers used by driver
@@ -8051,7 +8053,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Allocate device driver memory */
rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
if (rc)
- return -ENOMEM;
+ goto out_destroy_workqueue;
/* IF Type 2 ports get initialized now. */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
@@ -8479,6 +8481,9 @@ out_free_bsmbx:
lpfc_destroy_bootstrap_mbox(phba);
out_free_mem:
lpfc_mem_free(phba);
+out_destroy_workqueue:
+ destroy_workqueue(phba->wq);
+ phba->wq = NULL;
return rc;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 084c0f9fdc3a..938a5e435943 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4272,7 +4272,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = DID_REQUEUE << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
@@ -4562,7 +4562,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = DID_REQUEUE << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index a3e117a4b8e7..f6c37a97544e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -7153,22 +7153,18 @@ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
switch (instance->adapter_type) {
case MFI_SERIES:
if (megasas_alloc_mfi_ctrl_mem(instance))
- goto fail;
+ return -ENOMEM;
break;
case AERO_SERIES:
case VENTURA_SERIES:
case THUNDERBOLT_SERIES:
case INVADER_SERIES:
if (megasas_alloc_fusion_context(instance))
- goto fail;
+ return -ENOMEM;
break;
}
return 0;
- fail:
- kfree(instance->reply_map);
- instance->reply_map = NULL;
- return -ENOMEM;
}
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 5b5885d9732b..09c5fe37754c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -3199,7 +3199,6 @@ megasas_build_io_fusion(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd)
{
int sge_count;
- u8 cmd_type;
u16 pd_index = 0;
u8 drive_type = 0;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
@@ -3225,7 +3224,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
*/
io_request->IoFlags = cpu_to_le16(scp->cmd_len);
- switch (cmd_type = megasas_cmd_type(scp)) {
+ switch (megasas_cmd_type(scp)) {
case READ_WRITE_LDIO:
megasas_build_ldio_fusion(instance, scp, cmd);
break;
@@ -5311,7 +5310,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
if (!fusion->log_to_span) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
- kfree(instance->ctrl_context);
return -ENOMEM;
}
}
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 322d3ad38159..84b541a57b7b 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -38,7 +38,7 @@
#include <asm/irq.h>
#include <asm/hydra.h>
#include <asm/processor.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/pmac_feature.h>
#include <asm/macio.h>
@@ -1882,11 +1882,6 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
goto out_release;
}
- /* Old junk for root discovery, that will die ultimately */
-#if !defined(MODULE)
- note_scsi_host(mesh, mesh_host);
-#endif
-
mesh_host->base = macio_resource_start(mdev, 0);
mesh_host->irq = macio_irq(mdev, 0);
ms = (struct mesh_state *) mesh_host->hostdata;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 565339a0811d..331e896d8225 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2993,7 +2993,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
if (ioc->is_mcpu_endpoint ||
sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
- dma_get_required_mask(&pdev->dev) <= 32)
+ dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32))
ioc->dma_mask = 32;
/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index def37a7e5980..bd6a5f1bd532 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3670,6 +3670,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
fw_event = list_first_entry(&ioc->fw_event_list,
struct fw_event_work, list);
list_del_init(&fw_event->list);
+ fw_event_work_put(fw_event);
}
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
@@ -3751,7 +3752,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
if (cancel_work_sync(&fw_event->work))
fw_event_work_put(fw_event);
- fw_event_work_put(fw_event);
}
ioc->fw_events_cleanup = 0;
}
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 4acaff479916..91d78d0a38fe 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3138,7 +3138,7 @@ int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
*
* when HBA driver received the identify done event or initiate FIS received
* event(for SATA), it will invoke this function to notify the sas layer that
- * the sas toplogy has formed, please discover the the whole sas domain,
+ * the sas toplogy has formed, please discover the whole sas domain,
* while receive a broadcast(change) primitive just tell the sas
* layer to discover the changed domain rather than the whole domain.
*/
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 3d6b137314f3..bbc4d5890ae6 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3686,11 +3686,6 @@ err2:
err1:
scsi_host_put(lport->host);
err0:
- if (qedf) {
- QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
-
- clear_bit(QEDF_PROBING, &qedf->flags);
- }
return rc;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 2b2f68288375..4acff4e84b90 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2151,8 +2151,10 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
le32_to_cpu(abts->exchange_addr_to_abort));
- if (!abort_cmd)
+ if (!abort_cmd) {
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
return -EIO;
+ }
mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
if (abort_cmd->qpair) {
@@ -6935,14 +6937,8 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
if (ha->flags.msix_enabled) {
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (IS_QLA2071(ha)) {
- /* 4 ports Baker: Enable Interrupt Handshake */
- icb->msix_atio = 0;
- icb->firmware_options_2 |= cpu_to_le32(BIT_26);
- } else {
- icb->msix_atio = cpu_to_le16(msix->entry);
- icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
- }
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
ql_dbg(ql_dbg_init, vha, 0xf072,
"Registering ICB vector 0x%x for atio que.\n",
msix->entry);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b59a71aedcd7..96e7e3eaca29 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -111,7 +111,7 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
}
}
-static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
+static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
{
struct request *rq = scsi_cmd_to_rq(cmd);
@@ -121,7 +121,12 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
} else {
WARN_ON_ONCE(true);
}
- blk_mq_requeue_request(rq, true);
+
+ if (msecs) {
+ blk_mq_requeue_request(rq, false);
+ blk_mq_delay_kick_requeue_list(rq->q, msecs);
+ } else
+ blk_mq_requeue_request(rq, true);
}
/**
@@ -651,14 +656,6 @@ static unsigned int scsi_rq_err_bytes(const struct request *rq)
return bytes;
}
-/* Helper for scsi_io_completion() when "reprep" action required. */
-static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
- struct request_queue *q)
-{
- /* A new command will be prepared and issued. */
- scsi_mq_requeue_cmd(cmd);
-}
-
static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
{
struct request *req = scsi_cmd_to_rq(cmd);
@@ -676,14 +673,21 @@ static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
return false;
}
+/*
+ * When ALUA transition state is returned, reprep the cmd to
+ * use the ALUA handler's transition timeout. Delay the reprep
+ * 1 sec to avoid aggressive retries of the target in that
+ * state.
+ */
+#define ALUA_TRANSITION_REPREP_DELAY 1000
+
/* Helper for scsi_io_completion() when special action required. */
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
- struct request_queue *q = cmd->device->request_queue;
struct request *req = scsi_cmd_to_rq(cmd);
int level = 0;
- enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
- ACTION_DELAYED_RETRY} action;
+ enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP,
+ ACTION_RETRY, ACTION_DELAYED_RETRY} action;
struct scsi_sense_hdr sshdr;
bool sense_valid;
bool sense_current = true; /* false implies "deferred sense" */
@@ -772,8 +776,8 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
action = ACTION_DELAYED_RETRY;
break;
case 0x0a: /* ALUA state transition */
- blk_stat = BLK_STS_TRANSPORT;
- fallthrough;
+ action = ACTION_DELAYED_REPREP;
+ break;
default:
action = ACTION_FAIL;
break;
@@ -832,7 +836,10 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
return;
fallthrough;
case ACTION_REPREP:
- scsi_io_completion_reprep(cmd, q);
+ scsi_mq_requeue_cmd(cmd, 0);
+ break;
+ case ACTION_DELAYED_REPREP:
+ scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY);
break;
case ACTION_RETRY:
/* Retry the same command immediately */
@@ -926,7 +933,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
* command block will be released and the queue function will be goosed. If we
* are not done then we have to figure out what to do next:
*
- * a) We can call scsi_io_completion_reprep(). The request will be
+ * a) We can call scsi_mq_requeue_cmd(). The request will be
* unprepared and put back on the queue. Then a new command will
* be created for it. This should be used if we made forward
* progress, or if we want to switch from READ(10) to READ(6) for
@@ -942,7 +949,6 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
- struct request_queue *q = cmd->device->request_queue;
struct request *req = scsi_cmd_to_rq(cmd);
blk_status_t blk_stat = BLK_STS_OK;
@@ -979,7 +985,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* request just queue the command up again.
*/
if (likely(result == 0))
- scsi_io_completion_reprep(cmd, q);
+ scsi_mq_requeue_cmd(cmd, 0);
else
scsi_io_completion_action(cmd, result);
}
@@ -1542,7 +1548,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
scsi_init_command(sdev, cmd);
cmd->eh_eflags = 0;
- cmd->allowed = 0;
cmd->prot_type = 0;
cmd->prot_flags = 0;
cmd->submitter = 0;
@@ -1593,6 +1598,8 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
return ret;
}
+ /* Usually overridden by the ULP */
+ cmd->allowed = 0;
memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
@@ -1876,10 +1883,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
}
- if (dev->dma_mask) {
- shost->max_sectors = min_t(unsigned int, shost->max_sectors,
- dma_max_mapping_size(dev) >> SECTOR_SHIFT);
- }
blk_queue_max_hw_sectors(q, shost->max_sectors);
blk_queue_segment_boundary(q, shost->dma_boundary);
dma_set_seg_boundary(dev, shost->dma_boundary);
@@ -1980,9 +1983,13 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
return blk_mq_alloc_tag_set(tag_set);
}
-void scsi_mq_destroy_tags(struct Scsi_Host *shost)
+void scsi_mq_free_tags(struct kref *kref)
{
+ struct Scsi_Host *shost = container_of(kref, typeof(*shost),
+ tagset_refcnt);
+
blk_mq_free_tag_set(&shost->tag_set);
+ complete(&shost->tagset_freed);
}
/**
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 429663bd78ec..f385b3f04d6e 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -94,7 +94,7 @@ extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern void scsi_requeue_run_queue(struct work_struct *work);
extern void scsi_start_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
-extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
+extern void scsi_mq_free_tags(struct kref *kref);
extern void scsi_exit_queue(void);
extern void scsi_evt_thread(struct work_struct *work);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 91ac901a6682..5d27f5196de6 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -340,6 +340,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kfree(sdev);
goto out;
}
+ kref_get(&sdev->host->tagset_refcnt);
sdev->request_queue = q;
q->queuedata = sdev;
__scsi_init_queue(sdev->host, q);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index aa70d9282161..5d61f58399dc 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1476,6 +1476,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
mutex_unlock(&sdev->state_mutex);
blk_mq_destroy_queue(sdev->request_queue);
+ kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
cancel_work_sync(&sdev->requeue_work);
if (sdev->host->hostt->slave_destroy)
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 12bff64dade6..2f88c61216ee 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -225,6 +225,7 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
{
struct Scsi_Host *shost = dev_to_shost(dev);
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+ struct device *dma_dev = shost->dma_dev;
INIT_LIST_HEAD(&sas_host->rphy_list);
mutex_init(&sas_host->lock);
@@ -236,6 +237,11 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
dev_printk(KERN_ERR, dev, "fail to a bsg device %d\n",
shost->host_no);
+ if (dma_dev->dma_mask) {
+ shost->opt_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_opt_mapping_size(dma_dev) >> SECTOR_SHIFT);
+ }
+
return 0;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 6f7a39b96750..eb76ba055021 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -103,7 +103,6 @@ static void sd_config_discard(struct scsi_disk *, unsigned int);
static void sd_config_write_same(struct scsi_disk *);
static int sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
-static void sd_start_done_work(struct work_struct *work);
static int sd_probe(struct device *);
static int sd_remove(struct device *);
static void sd_shutdown(struct device *);
@@ -3297,6 +3296,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
(sector_t)BLK_DEF_MAX_SECTORS);
}
+ /*
+ * Limit default to SCSI host optimal sector limit if set. There may be
+ * an impact on performance for when the size of a request exceeds this
+ * host limit.
+ */
+ rw_max = min_not_zero(rw_max, sdp->host->opt_sectors);
+
/* Do not exceed controller limit */
rw_max = min(rw_max, queue_max_hw_sectors(q));
@@ -3464,7 +3470,6 @@ static int sd_probe(struct device *dev)
sdkp->max_retries = SD_MAX_RETRIES;
atomic_set(&sdkp->openers, 0);
atomic_set(&sdkp->device->ioerr_cnt, 0);
- INIT_WORK(&sdkp->start_done_work, sd_start_done_work);
if (!sdp->request_queue->rq_timeout) {
if (sdp->type != TYPE_MOD)
@@ -3587,69 +3592,12 @@ static void scsi_disk_release(struct device *dev)
kfree(sdkp);
}
-/* Process sense data after a START command finished. */
-static void sd_start_done_work(struct work_struct *work)
-{
- struct scsi_disk *sdkp = container_of(work, typeof(*sdkp),
- start_done_work);
- struct scsi_sense_hdr sshdr;
- int res = sdkp->start_result;
-
- if (res == 0)
- return;
-
- sd_print_result(sdkp, "Start/Stop Unit failed", res);
-
- if (res < 0)
- return;
-
- if (scsi_normalize_sense(sdkp->start_sense_buffer,
- sdkp->start_sense_len, &sshdr))
- sd_print_sense_hdr(sdkp, &sshdr);
-}
-
-/* A START command finished. May be called from interrupt context. */
-static void sd_start_done(struct request *req, blk_status_t status)
-{
- const struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
- struct scsi_disk *sdkp = scsi_disk(req->q->disk);
-
- sdkp->start_result = scmd->result;
- WARN_ON_ONCE(scmd->sense_len > SCSI_SENSE_BUFFERSIZE);
- sdkp->start_sense_len = scmd->sense_len;
- memcpy(sdkp->start_sense_buffer, scmd->sense_buffer,
- ARRAY_SIZE(sdkp->start_sense_buffer));
- WARN_ON_ONCE(!schedule_work(&sdkp->start_done_work));
-}
-
-/* Submit a START command asynchronously. */
-static int sd_submit_start(struct scsi_disk *sdkp, u8 cmd[], u8 cmd_len)
-{
- struct scsi_device *sdev = sdkp->device;
- struct request_queue *q = sdev->request_queue;
- struct request *req;
- struct scsi_cmnd *scmd;
-
- req = scsi_alloc_request(q, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- scmd = blk_mq_rq_to_pdu(req);
- scmd->cmd_len = cmd_len;
- memcpy(scmd->cmnd, cmd, cmd_len);
- scmd->allowed = sdkp->max_retries;
- req->timeout = SD_TIMEOUT;
- req->rq_flags |= RQF_PM | RQF_QUIET;
- req->end_io = sd_start_done;
- blk_execute_rq_nowait(req, /*at_head=*/true);
-
- return 0;
-}
-
static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
{
unsigned char cmd[6] = { START_STOP }; /* START_VALID */
+ struct scsi_sense_hdr sshdr;
struct scsi_device *sdp = sdkp->device;
+ int res;
if (start)
cmd[4] |= 1; /* START */
@@ -3660,10 +3608,23 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
if (!scsi_device_online(sdp))
return -ENODEV;
- /* Wait until processing of sense data has finished. */
- flush_work(&sdkp->start_done_work);
+ res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+ SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
+ if (res) {
+ sd_print_result(sdkp, "Start/Stop Unit failed", res);
+ if (res > 0 && scsi_sense_valid(&sshdr)) {
+ sd_print_sense_hdr(sdkp, &sshdr);
+ /* 0x3a is medium not present */
+ if (sshdr.asc == 0x3a)
+ res = 0;
+ }
+ }
+
+ /* SCSI error codes must not go to the generic layer */
+ if (res)
+ return -EIO;
- return sd_submit_start(sdkp, cmd, sizeof(cmd));
+ return 0;
}
/*
@@ -3690,8 +3651,6 @@ static void sd_shutdown(struct device *dev)
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
-
- flush_work(&sdkp->start_done_work);
}
static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index b89187761d61..5eea762f84d1 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -150,11 +150,6 @@ struct scsi_disk {
unsigned urswrz : 1;
unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
-
- int start_result;
- u32 start_sense_len;
- u8 start_sense_buffer[SCSI_SENSE_BUFFERSIZE];
- struct work_struct start_done_work;
};
#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index fe000da11332..8ced292c4b96 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -2012,7 +2012,7 @@ static int storvsc_probe(struct hv_device *device,
*/
host_dev->handle_error_wq =
alloc_ordered_workqueue("storvsc_error_wq_%d",
- WQ_MEM_RECLAIM,
+ 0,
host->host_no);
if (!host_dev->handle_error_wq) {
ret = -ENOMEM;
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
index d6b30d521307..775da69b8efa 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -684,13 +684,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
const struct of_device_id *of_id = NULL;
struct device_node *dn;
void __iomem *base;
- int ret, i;
+ int ret, i, s;
/* AON ctrl registers */
base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
if (IS_ERR(base)) {
pr_err("error mapping AON_CTRL\n");
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto aon_err;
}
ctrl.aon_ctrl_base = base;
@@ -700,8 +701,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
/* Assume standard offset */
ctrl.aon_sram = ctrl.aon_ctrl_base +
AON_CTRL_SYSTEM_DATA_RAM_OFS;
+ s = 0;
} else {
ctrl.aon_sram = base;
+ s = 1;
}
writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
@@ -711,7 +714,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
(const void **)&ddr_phy_data);
if (IS_ERR(base)) {
pr_err("error mapping DDR PHY\n");
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto ddr_phy_err;
}
ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
@@ -731,17 +735,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
for_each_matching_node(dn, ddr_shimphy_dt_ids) {
i = ctrl.num_memc;
if (i >= MAX_NUM_MEMC) {
+ of_node_put(dn);
pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
break;
}
base = of_io_request_and_map(dn, 0, dn->full_name);
if (IS_ERR(base)) {
+ of_node_put(dn);
if (!ctrl.support_warm_boot)
break;
pr_err("error mapping DDR SHIMPHY %d\n", i);
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto ddr_shimphy_err;
}
ctrl.memcs[i].ddr_shimphy_base = base;
ctrl.num_memc++;
@@ -752,14 +759,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
for_each_matching_node(dn, brcmstb_memc_of_match) {
base = of_iomap(dn, 0);
if (!base) {
+ of_node_put(dn);
pr_err("error mapping DDR Sequencer %d\n", i);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto brcmstb_memc_err;
}
of_id = of_match_node(brcmstb_memc_of_match, dn);
if (!of_id) {
iounmap(base);
- return -EINVAL;
+ of_node_put(dn);
+ ret = -EINVAL;
+ goto brcmstb_memc_err;
}
ddr_seq_data = of_id->data;
@@ -779,21 +790,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
dn = of_find_matching_node(NULL, sram_dt_ids);
if (!dn) {
pr_err("SRAM not found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto brcmstb_memc_err;
}
ret = brcmstb_init_sram(dn);
of_node_put(dn);
if (ret) {
pr_err("error setting up SRAM for PM\n");
- return ret;
+ goto brcmstb_memc_err;
}
ctrl.pdev = pdev;
ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
- if (!ctrl.s3_params)
- return -ENOMEM;
+ if (!ctrl.s3_params) {
+ ret = -ENOMEM;
+ goto s3_params_err;
+ }
ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
sizeof(*ctrl.s3_params),
DMA_TO_DEVICE);
@@ -813,7 +827,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
out:
kfree(ctrl.s3_params);
-
+s3_params_err:
+ iounmap(ctrl.boot_sram);
+brcmstb_memc_err:
+ for (i--; i >= 0; i--)
+ iounmap(ctrl.memcs[i].ddr_ctrl);
+ddr_shimphy_err:
+ for (i = 0; i < ctrl.num_memc; i++)
+ iounmap(ctrl.memcs[i].ddr_shimphy_base);
+
+ iounmap(ctrl.memcs[0].ddr_phy_base);
+ddr_phy_err:
+ iounmap(ctrl.aon_ctrl_base);
+ if (s)
+ iounmap(ctrl.aon_sram);
+aon_err:
pr_warn("PM: initialization failed with code %d\n", ret);
return ret;
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 07d52cafbb31..fcec6ed83d5e 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -24,6 +24,7 @@ config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
depends on FSL_MC_BUS
select SOC_BUS
+ select FSL_GUTS
select DIMLIB
help
Driver for the DPAA2 DPIO object. A DPIO provides queue and
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 6383a4edc360..88aee59730e3 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -335,6 +335,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
}
}
+ reset_control_assert(domain->reset);
+
/* Enable reset clocks for all devices in the domain */
ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
if (ret) {
@@ -342,7 +344,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
goto out_regulator_disable;
}
- reset_control_assert(domain->reset);
+ /* delays for reset to propagate */
+ udelay(5);
if (domain->bits.pxx) {
/* request the domain to power up */
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index dff7529268e4..972f289d300a 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -243,7 +243,6 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
ret = PTR_ERR(domain->power_dev);
goto cleanup_pds;
}
- dev_set_name(domain->power_dev, "%s", data->name);
domain->genpd.name = data->name;
domain->genpd.power_on = imx8m_blk_ctrl_power_on;
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 28a8c0dda66c..a0ceeede450f 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
+#define __DISABLE_TRACE_MMIO__
+
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/slab.h>
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
index 32c346b72635..dff6d5ef4e46 100644
--- a/drivers/soc/tegra/common.c
+++ b/drivers/soc/tegra/common.c
@@ -107,30 +107,47 @@ int devm_tegra_core_dev_init_opp_table(struct device *dev,
{
u32 hw_version;
int err;
-
- err = devm_pm_opp_set_clkname(dev, NULL);
- if (err) {
- dev_err(dev, "failed to set OPP clk: %d\n", err);
- return err;
- }
-
- /* Tegra114+ doesn't support OPP yet */
- if (!of_machine_is_compatible("nvidia,tegra20") &&
- !of_machine_is_compatible("nvidia,tegra30"))
- return -ENODEV;
-
- if (of_machine_is_compatible("nvidia,tegra20"))
+ /*
+ * The clk's connection id to set is NULL and this is a NULL terminated
+ * array, hence two NULL entries.
+ */
+ const char *clk_names[] = { NULL, NULL };
+ struct dev_pm_opp_config config = {
+ /*
+ * For some devices we don't have any OPP table in the DT, and
+ * in order to use the same code path for all the devices, we
+ * create a dummy OPP table for them via this. The dummy OPP
+ * table is only capable of doing clk_set_rate() on invocation
+ * of dev_pm_opp_set_rate() and doesn't provide any other
+ * functionality.
+ */
+ .clk_names = clk_names,
+ };
+
+ if (of_machine_is_compatible("nvidia,tegra20")) {
hw_version = BIT(tegra_sku_info.soc_process_id);
- else
+ config.supported_hw = &hw_version;
+ config.supported_hw_count = 1;
+ } else if (of_machine_is_compatible("nvidia,tegra30")) {
hw_version = BIT(tegra_sku_info.soc_speedo_id);
+ config.supported_hw = &hw_version;
+ config.supported_hw_count = 1;
+ }
- err = devm_pm_opp_set_supported_hw(dev, &hw_version, 1);
+ err = devm_pm_opp_set_config(dev, &config);
if (err) {
- dev_err(dev, "failed to set OPP supported HW: %d\n", err);
+ dev_err(dev, "failed to set OPP config: %d\n", err);
return err;
}
/*
+ * Tegra114+ doesn't support OPP yet, return early for non tegra20/30
+ * case.
+ */
+ if (!config.supported_hw)
+ return -ENODEV;
+
+ /*
* Older device-trees have an empty OPP table, we will get
* -ENODEV from devm_pm_opp_of_add_table() in this case.
*/
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 5611d14d3ba2..6a4b8f7e7948 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -1384,7 +1384,7 @@ tegra_pmc_core_pd_opp_to_performance_state(struct generic_pm_domain *genpd,
static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
{
struct generic_pm_domain *genpd;
- const char *rname = "core";
+ const char *rname[] = { "core", NULL};
int err;
genpd = devm_kzalloc(pmc->dev, sizeof(*genpd), GFP_KERNEL);
@@ -1395,7 +1395,7 @@ static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state;
genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state;
- err = devm_pm_opp_set_regulators(pmc->dev, &rname, 1);
+ err = devm_pm_opp_set_regulators(pmc->dev, rname);
if (err)
return dev_err_probe(pmc->dev, err,
"failed to set core OPP regulator\n");
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 95ce292994cc..89d1d0d021fc 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -1004,9 +1004,18 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_intel_link_res *res = sdw->link_res;
struct sdw_cdns_dma_data *dma;
int ret = 0;
+ /*
+ * The .trigger callback is used to send required IPC to audio
+ * firmware. The .free_stream callback will still be called
+ * by intel_free_stream() in the TRIGGER_SUSPEND case.
+ */
+ if (res->ops && res->ops->trigger)
+ res->ops->trigger(dai, cmd, substream->stream);
+
dma = snd_soc_dai_get_dma_data(dai, substream);
if (!dma) {
dev_err(dai->dev, "failed to get dma data in %s\n",
@@ -1114,9 +1123,10 @@ static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
};
static const struct snd_soc_component_driver dai_component = {
- .name = "soundwire",
- .probe = intel_component_probe,
- .suspend = intel_component_dais_suspend
+ .name = "soundwire",
+ .probe = intel_component_probe,
+ .suspend = intel_component_dais_suspend,
+ .legacy_dai_naming = 1,
};
static int intel_create_dai(struct sdw_cdns *cdns,
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 9df970eeca45..3a992a6478c3 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -169,7 +169,7 @@ struct qcom_swrm_ctrl {
u8 wcmd_id;
struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS];
struct sdw_stream_runtime *sruntime[SWRM_MAX_DAIS];
- enum sdw_slave_status status[SDW_MAX_DEVICES];
+ enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
int (*reg_read)(struct qcom_swrm_ctrl *ctrl, int reg, u32 *val);
int (*reg_write)(struct qcom_swrm_ctrl *ctrl, int reg, int val);
u32 slave_status;
@@ -420,7 +420,7 @@ static int qcom_swrm_get_alert_slave_dev_num(struct qcom_swrm_ctrl *ctrl)
ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
- for (dev_num = 0; dev_num < SDW_MAX_DEVICES; dev_num++) {
+ for (dev_num = 0; dev_num <= SDW_MAX_DEVICES; dev_num++) {
status = (val >> (dev_num * SWRM_MCP_SLV_STATUS_SZ));
if ((status & SWRM_MCP_SLV_STATUS_MASK) == SDW_SLAVE_ALERT) {
@@ -440,7 +440,7 @@ static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
ctrl->slave_status = val;
- for (i = 0; i < SDW_MAX_DEVICES; i++) {
+ for (i = 0; i <= SDW_MAX_DEVICES; i++) {
u32 s;
s = (val >> (i * 2));
@@ -1356,10 +1356,6 @@ static int qcom_swrm_probe(struct platform_device *pdev)
ctrl->bus.compute_params = &qcom_swrm_compute_params;
ctrl->bus.clk_stop_timeout = 300;
- ctrl->audio_cgcr = devm_reset_control_get_exclusive(dev, "swr_audio_cgcr");
- if (IS_ERR(ctrl->audio_cgcr))
- dev_err(dev, "Failed to get audio_cgcr reset required for soundwire-v1.6.0\n");
-
ret = qcom_swrm_get_port_config(ctrl);
if (ret)
goto err_clk;
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
index 267342dfa738..2dcbe166df63 100644
--- a/drivers/spi/spi-bitbang-txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -116,6 +116,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
{
/* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+ u8 rxbit = bits - 1;
u32 oldbit = !(word & 1);
/* clock starts at inactive polarity */
for (; likely(bits); bits--) {
@@ -135,7 +136,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
/* sample LSB (from slave) on leading edge */
word >>= 1;
if ((flags & SPI_MASTER_NO_RX) == 0)
- word |= getmiso(spi) << (bits - 1);
+ word |= getmiso(spi) << rxbit;
setsck(spi, cpol);
}
return word;
@@ -148,6 +149,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
{
/* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
+ u8 rxbit = bits - 1;
u32 oldbit = !(word & 1);
/* clock starts at inactive polarity */
for (; likely(bits); bits--) {
@@ -168,7 +170,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
/* sample LSB (from slave) on trailing edge */
word >>= 1;
if ((flags & SPI_MASTER_NO_RX) == 0)
- word |= getmiso(spi) << (bits - 1);
+ word |= getmiso(spi) << rxbit;
}
return word;
}
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 72b1a5a2298c..e12ab5b43f34 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -39,6 +39,7 @@
#define CQSPI_DISABLE_DAC_MODE BIT(1)
#define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
+#define CQSPI_SLOW_SRAM BIT(4)
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
@@ -87,6 +88,7 @@ struct cqspi_st {
bool use_dma_read;
u32 pd_dev_id;
bool wr_completion;
+ bool slow_sram;
};
struct cqspi_driver_platdata {
@@ -333,7 +335,10 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
}
}
- irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+ else if (!cqspi->slow_sram)
+ irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+ else
+ irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR;
if (irq_status)
complete(&cqspi->transfer_complete);
@@ -673,7 +678,18 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
/* Clear all interrupts. */
writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
- writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+ /*
+ * On SoCFPGA platform reading the SRAM is slow due to
+ * hardware limitation and causing read interrupt storm to CPU,
+ * so enabling only watermark interrupt to disable all read
+ * interrupts later as we want to run "bytes to read" loop with
+ * all the read interrupts disabled for max performance.
+ */
+
+ if (!cqspi->slow_sram)
+ writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+ else
+ writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTRD_START_MASK,
@@ -684,6 +700,13 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
ret = -ETIMEDOUT;
+ /*
+ * Disable all read interrupts until
+ * we are out of "bytes to read"
+ */
+ if (cqspi->slow_sram)
+ writel(0x0, reg_base + CQSPI_REG_IRQMASK);
+
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
if (ret && bytes_to_read == 0) {
@@ -715,8 +738,11 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
}
- if (remaining > 0)
+ if (remaining > 0) {
reinit_completion(&cqspi->transfer_complete);
+ if (cqspi->slow_sram)
+ writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
+ }
}
/* Check indirect done status */
@@ -1667,6 +1693,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->use_dma_read = true;
if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
cqspi->wr_completion = false;
+ if (ddata->quirks & CQSPI_SLOW_SRAM)
+ cqspi->slow_sram = true;
if (of_device_is_compatible(pdev->dev.of_node,
"xlnx,versal-ospi-1.0"))
@@ -1779,7 +1807,9 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
};
static const struct cqspi_driver_platdata socfpga_qspi = {
- .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION,
+ .quirks = CQSPI_DISABLE_DAC_MODE
+ | CQSPI_NO_SUPPORT_WR_COMPLETION
+ | CQSPI_SLOW_SRAM,
};
static const struct cqspi_driver_platdata versal_ospi = {
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 0bc7daa7afc8..e4cb52e1fe26 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -156,6 +156,7 @@ struct meson_spicc_device {
void __iomem *base;
struct clk *core;
struct clk *pclk;
+ struct clk_divider pow2_div;
struct clk *clk;
struct spi_message *message;
struct spi_transfer *xfer;
@@ -168,6 +169,8 @@ struct meson_spicc_device {
unsigned long xfer_remain;
};
+#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
+
static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
{
u32 conf;
@@ -421,7 +424,7 @@ static int meson_spicc_prepare_message(struct spi_master *master,
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
struct spi_device *spi = message->spi;
- u32 conf = 0;
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Store current message */
spicc->message = message;
@@ -458,8 +461,6 @@ static int meson_spicc_prepare_message(struct spi_master *master,
/* Select CS */
conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select);
- /* Default Clock rate core/4 */
-
/* Default 8bit word */
conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
@@ -476,12 +477,16 @@ static int meson_spicc_prepare_message(struct spi_master *master,
static int meson_spicc_unprepare_transfer(struct spi_master *master)
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Disable all IRQs */
writel(0, spicc->base + SPICC_INTREG);
device_reset_optional(&spicc->pdev->dev);
+ /* Set default configuration, keeping datarate field */
+ writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
return 0;
}
@@ -518,14 +523,60 @@ static void meson_spicc_cleanup(struct spi_device *spi)
* Clk path for G12A series:
* pclk -> pow2 fixed div -> pow2 div -> mux -> out
* pclk -> enh fixed div -> enh div -> mux -> out
+ *
+ * The pow2 divider is tied to the controller HW state, and the
+ * divider is only valid when the controller is initialized.
+ *
+ * A set of clock ops is added to make sure we don't read/set this
+ * clock rate while the controller is in an unknown state.
*/
-static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
+static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return 0;
+
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static int meson_spicc_pow2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return -EINVAL;
+
+ return clk_divider_ops.determine_rate(hw, req);
+}
+
+static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return -EINVAL;
+
+ return clk_divider_ops.set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops meson_spicc_pow2_clk_ops = {
+ .recalc_rate = meson_spicc_pow2_recalc_rate,
+ .determine_rate = meson_spicc_pow2_determine_rate,
+ .set_rate = meson_spicc_pow2_set_rate,
+};
+
+static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
{
struct device *dev = &spicc->pdev->dev;
- struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div;
- struct clk_divider *pow2_div, *enh_div;
- struct clk_mux *mux;
+ struct clk_fixed_factor *pow2_fixed_div;
struct clk_init_data init;
struct clk *clk;
struct clk_parent_data parent_data[2];
@@ -560,31 +611,45 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
if (WARN_ON(IS_ERR(clk)))
return PTR_ERR(clk);
- pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL);
- if (!pow2_div)
- return -ENOMEM;
-
snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev));
init.name = name;
- init.ops = &clk_divider_ops;
- init.flags = CLK_SET_RATE_PARENT;
+ init.ops = &meson_spicc_pow2_clk_ops;
+ /*
+ * Set NOCACHE here to make sure we read the actual HW value
+ * since we reset the HW after each transfer.
+ */
+ init.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
parent_data[0].hw = &pow2_fixed_div->hw;
init.num_parents = 1;
- pow2_div->shift = 16,
- pow2_div->width = 3,
- pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO,
- pow2_div->reg = spicc->base + SPICC_CONREG;
- pow2_div->hw.init = &init;
+ spicc->pow2_div.shift = 16,
+ spicc->pow2_div.width = 3,
+ spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO,
+ spicc->pow2_div.reg = spicc->base + SPICC_CONREG;
+ spicc->pow2_div.hw.init = &init;
- clk = devm_clk_register(dev, &pow2_div->hw);
- if (WARN_ON(IS_ERR(clk)))
- return PTR_ERR(clk);
+ spicc->clk = devm_clk_register(dev, &spicc->pow2_div.hw);
+ if (WARN_ON(IS_ERR(spicc->clk)))
+ return PTR_ERR(spicc->clk);
- if (!spicc->data->has_enhance_clk_div) {
- spicc->clk = clk;
- return 0;
- }
+ return 0;
+}
+
+static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
+{
+ struct device *dev = &spicc->pdev->dev;
+ struct clk_fixed_factor *enh_fixed_div;
+ struct clk_divider *enh_div;
+ struct clk_mux *mux;
+ struct clk_init_data init;
+ struct clk *clk;
+ struct clk_parent_data parent_data[2];
+ char name[64];
+
+ memset(&init, 0, sizeof(init));
+ memset(&parent_data, 0, sizeof(parent_data));
+
+ init.parent_data = parent_data;
/* algorithm for enh div: rate = freq / 2 / (N + 1) */
@@ -637,7 +702,7 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
snprintf(name, sizeof(name), "%s#sel", dev_name(dev));
init.name = name;
init.ops = &clk_mux_ops;
- parent_data[0].hw = &pow2_div->hw;
+ parent_data[0].hw = &spicc->pow2_div.hw;
parent_data[1].hw = &enh_div->hw;
init.num_parents = 2;
init.flags = CLK_SET_RATE_PARENT;
@@ -754,12 +819,20 @@ static int meson_spicc_probe(struct platform_device *pdev)
meson_spicc_oen_enable(spicc);
- ret = meson_spicc_clk_init(spicc);
+ ret = meson_spicc_pow2_clk_init(spicc);
if (ret) {
- dev_err(&pdev->dev, "clock registration failed\n");
+ dev_err(&pdev->dev, "pow2 clock registration failed\n");
goto out_clk;
}
+ if (spicc->data->has_enhance_clk_div) {
+ ret = meson_spicc_enh_clk_init(spicc);
+ if (ret) {
+ dev_err(&pdev->dev, "clock registration failed\n");
+ goto out_clk;
+ }
+ }
+
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "spi master registration failed\n");
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 3ebdce804b90..bc5e36fd4288 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -437,7 +437,7 @@ static int mpc52xx_spi_probe(struct platform_device *op)
ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0);
ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1);
ms->state = mpc52xx_spi_fsmstate_idle;
- ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ ms->ipb_freq = mpc5xxx_get_bus_frequency(&op->dev);
ms->gpio_cs_count = of_gpio_count(op->dev.of_node);
if (ms->gpio_cs_count > 0) {
master->num_chipselect = ms->gpio_cs_count;
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
index f5d32ec4634e..0709e987bd5a 100644
--- a/drivers/spi/spi-mux.c
+++ b/drivers/spi/spi-mux.c
@@ -161,6 +161,7 @@ static int spi_mux_probe(struct spi_device *spi)
ctlr->num_chipselect = mux_control_states(priv->mux);
ctlr->bus_num = -1;
ctlr->dev.of_node = spi->dev.of_node;
+ ctlr->must_async = true;
ret = devm_spi_register_controller(&spi->dev, ctlr);
if (ret)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8f97a3eacdea..32c01e684af3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -95,7 +95,7 @@ static ssize_t driver_override_show(struct device *dev,
}
static DEVICE_ATTR_RW(driver_override);
-static struct spi_statistics *spi_alloc_pcpu_stats(struct device *dev)
+static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
{
struct spi_statistics __percpu *pcpu_stats;
@@ -162,7 +162,7 @@ static struct device_attribute dev_attr_spi_device_##field = { \
}
#define SPI_STATISTICS_SHOW_NAME(name, file, field) \
-static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
+static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
char *buf) \
{ \
ssize_t len; \
@@ -309,7 +309,7 @@ static const struct attribute_group *spi_master_groups[] = {
NULL,
};
-static void spi_statistics_add_transfer_stats(struct spi_statistics *pcpu_stats,
+static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
struct spi_transfer *xfer,
struct spi_controller *ctlr)
{
@@ -1275,8 +1275,8 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
struct spi_message *msg,
struct spi_transfer *xfer)
{
- struct spi_statistics *statm = ctlr->pcpu_statistics;
- struct spi_statistics *stats = msg->spi->pcpu_statistics;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
@@ -1432,8 +1432,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
- struct spi_statistics *statm = ctlr->pcpu_statistics;
- struct spi_statistics *stats = msg->spi->pcpu_statistics;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
spi_set_cs(msg->spi, true, false);
@@ -1727,8 +1727,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
- if (!ret)
- kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
ctlr->cur_msg = NULL;
ctlr->fallback = false;
@@ -4033,7 +4032,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
* guard against reentrancy from a different context. The io_mutex
* will catch those cases.
*/
- if (READ_ONCE(ctlr->queue_empty)) {
+ if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
message->actual_length = 0;
message->status = -EINPROGRESS;
diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c
index cac9553666e6..aa100b5141e1 100644
--- a/drivers/staging/r8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/r8188eu/os_dep/os_intfs.c
@@ -18,6 +18,7 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
MODULE_AUTHOR("Realtek Semiconductor Corp.");
MODULE_VERSION(DRIVERVERSION);
+MODULE_FIRMWARE("rtlwifi/rtl8188eufw.bin");
#define CONFIG_BR_EXT_BRNAME "br0"
#define RTW_NOTCH_FILTER 0 /* 0:Disable, 1:Enable, */
diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
index cc2b44f60c46..9147d176da4f 100644
--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/r8188eu/os_dep/usb_intf.c
@@ -28,6 +28,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
/*=== Realtek demoboard ===*/
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8179)}, /* 8188EUS */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
+ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill USB-N150 Nano */
/*=== Customer ID ===*/
/****** 8188EUS ********/
{USB_DEVICE(0x07B8, 0x8179)}, /* Abocom - Abocom */
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 2326aae6709e..bb7db96ed821 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -117,34 +117,6 @@ static void r871x_internal_cmd_hdl(struct _adapter *padapter, u8 *pbuf)
kfree(pdrvcmd->pbuf);
}
-static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
-{
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
-
- /* invoke cmd->callback function */
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
- return H2C_SUCCESS;
-}
-
-static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
-{
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
-
- /* invoke cmd->callback function */
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
- return H2C_SUCCESS;
-}
-
static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
@@ -213,14 +185,6 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
pcmd_r = NULL;
switch (pcmd->cmdcode) {
- case GEN_CMD_CODE(_Read_MACREG):
- read_macreg_hdl(padapter, (u8 *)pcmd);
- pcmd_r = pcmd;
- break;
- case GEN_CMD_CODE(_Write_MACREG):
- write_macreg_hdl(padapter, (u8 *)pcmd);
- pcmd_r = pcmd;
- break;
case GEN_CMD_CODE(_Read_BBREG):
read_bbreg_hdl(padapter, (u8 *)pcmd);
break;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 58df0145e8d0..fb91423a4e2e 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -934,8 +934,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
spin_lock(&lun->lun_deve_lock);
list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
- lacl = rcu_dereference_check(se_deve->se_lun_acl,
- lockdep_is_held(&lun->lun_deve_lock));
+ lacl = se_deve->se_lun_acl;
/*
* spc4r37 p.242:
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 086ac9c9343c..b7f16ee8aa0e 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -75,7 +75,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd)
return TCM_WRITE_PROTECTED;
}
- se_lun = rcu_dereference(deve->se_lun);
+ se_lun = deve->se_lun;
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
se_lun = NULL;
@@ -152,7 +152,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
if (deve) {
- se_lun = rcu_dereference(deve->se_lun);
+ se_lun = deve->se_lun;
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
se_lun = NULL;
@@ -216,7 +216,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
rcu_read_lock();
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
- lun = rcu_dereference(deve->se_lun);
+ lun = deve->se_lun;
if (!lun) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
@@ -243,11 +243,8 @@ void core_free_device_list_for_node(
struct se_dev_entry *deve;
mutex_lock(&nacl->lun_entry_mutex);
- hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
- struct se_lun *lun = rcu_dereference_check(deve->se_lun,
- lockdep_is_held(&nacl->lun_entry_mutex));
- core_disable_device_list_for_node(lun, deve, nacl, tpg);
- }
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
+ core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg);
mutex_unlock(&nacl->lun_entry_mutex);
}
@@ -334,8 +331,7 @@ int core_enable_device_list_for_node(
mutex_lock(&nacl->lun_entry_mutex);
orig = target_nacl_find_deve(nacl, mapped_lun);
if (orig && orig->se_lun) {
- struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
- lockdep_is_held(&nacl->lun_entry_mutex));
+ struct se_lun *orig_lun = orig->se_lun;
if (orig_lun != lun) {
pr_err("Existing orig->se_lun doesn't match new lun"
@@ -355,8 +351,8 @@ int core_enable_device_list_for_node(
return -EINVAL;
}
- rcu_assign_pointer(new->se_lun, lun);
- rcu_assign_pointer(new->se_lun_acl, lun_acl);
+ new->se_lun = lun;
+ new->se_lun_acl = lun_acl;
hlist_del_rcu(&orig->link);
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
@@ -374,8 +370,8 @@ int core_enable_device_list_for_node(
return 0;
}
- rcu_assign_pointer(new->se_lun, lun);
- rcu_assign_pointer(new->se_lun_acl, lun_acl);
+ new->se_lun = lun;
+ new->se_lun_acl = lun_acl;
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
@@ -434,9 +430,6 @@ void core_disable_device_list_for_node(
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
- rcu_assign_pointer(orig->se_lun, NULL);
- rcu_assign_pointer(orig->se_lun_acl, NULL);
-
kfree_rcu(orig, rcu_head);
core_scsi3_free_pr_reg_from_nacl(dev, nacl);
@@ -457,10 +450,7 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
mutex_lock(&nacl->lun_entry_mutex);
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
- struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
- lockdep_is_held(&nacl->lun_entry_mutex));
-
- if (lun != tmp_lun)
+ if (lun != deve->se_lun)
continue;
core_disable_device_list_for_node(lun, deve, nacl, tpg);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 3829b61b56c1..a1d67554709f 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -739,8 +739,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
if (!deve_tmp->se_lun_acl)
continue;
- lacl_tmp = rcu_dereference_check(deve_tmp->se_lun_acl,
- lockdep_is_held(&lun_tmp->lun_deve_lock));
+ lacl_tmp = deve_tmp->se_lun_acl;
nacl_tmp = lacl_tmp->se_lun_nacl;
/*
* Skip the matching struct se_node_acl that is allocated
@@ -784,8 +783,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* the original *pr_reg is processed in
* __core_scsi3_add_registration()
*/
- dest_lun = rcu_dereference_check(deve_tmp->se_lun,
- kref_read(&deve_tmp->pr_kref) != 0);
+ dest_lun = deve_tmp->se_lun;
pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
nacl_tmp, dest_lun, deve_tmp,
@@ -1437,34 +1435,26 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
{
- struct se_lun_acl *lun_acl;
-
/*
* For nacl->dynamic_node_acl=1
*/
- lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
- kref_read(&se_deve->pr_kref) != 0);
- if (!lun_acl)
+ if (!se_deve->se_lun_acl)
return 0;
- return target_depend_item(&lun_acl->se_lun_group.cg_item);
+ return target_depend_item(&se_deve->se_lun_acl->se_lun_group.cg_item);
}
static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
{
- struct se_lun_acl *lun_acl;
-
/*
* For nacl->dynamic_node_acl=1
*/
- lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
- kref_read(&se_deve->pr_kref) != 0);
- if (!lun_acl) {
+ if (!se_deve->se_lun_acl) {
kref_put(&se_deve->pr_kref, target_pr_kref_release);
return;
}
- target_undepend_item(&lun_acl->se_lun_group.cg_item);
+ target_undepend_item(&se_deve->se_lun_acl->se_lun_group.cg_item);
kref_put(&se_deve->pr_kref, target_pr_kref_release);
}
@@ -1751,8 +1741,7 @@ core_scsi3_decode_spec_i_port(
* and then call __core_scsi3_add_registration() in the
* 2nd loop which will never fail.
*/
- dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
- kref_read(&dest_se_deve->pr_kref) != 0);
+ dest_lun = dest_se_deve->se_lun;
dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_lun, dest_se_deve,
@@ -3446,8 +3435,7 @@ after_iport_check:
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
if (!dest_pr_reg) {
- struct se_lun *dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
- kref_read(&dest_se_deve->pr_kref) != 0);
+ struct se_lun *dest_lun = dest_se_deve->se_lun;
spin_unlock(&dev->dev_reservation_lock);
if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 62d15bcc3d93..f85ee5b0fd80 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -877,7 +877,6 @@ static ssize_t target_stat_auth_dev_show(struct config_item *item,
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
- struct se_lun *lun;
ssize_t ret;
rcu_read_lock();
@@ -886,9 +885,9 @@ static ssize_t target_stat_auth_dev_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
- lun = rcu_dereference(deve->se_lun);
+
/* scsiDeviceIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
rcu_read_unlock();
return ret;
}
@@ -1217,7 +1216,6 @@ static ssize_t target_stat_iport_dev_show(struct config_item *item,
struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
- struct se_lun *lun;
ssize_t ret;
rcu_read_lock();
@@ -1226,9 +1224,9 @@ static ssize_t target_stat_iport_dev_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
- lun = rcu_dereference(deve->se_lun);
+
/* scsiDeviceIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
rcu_read_unlock();
return ret;
}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 6bb20aa9c5bc..8713cda0c2fb 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -88,7 +88,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
struct se_device *this_dev;
int rc;
- this_lun = rcu_dereference(deve->se_lun);
+ this_lun = deve->se_lun;
this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index f2b1bcefcadd..27295bda3e0b 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
+#include <linux/uaccess.h>
#include <linux/uio.h>
#include "tee_private.h"
@@ -326,6 +327,9 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
void *ret;
int id;
+ if (!access_ok((void __user *)addr, length))
+ return ERR_PTR(-EFAULT);
+
mutex_lock(&teedev->mutex);
id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
mutex_unlock(&teedev->mutex);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 0e5cc948373c..e052dae614eb 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -221,7 +221,7 @@ config THERMAL_EMULATION
config THERMAL_MMIO
tristate "Generic Thermal MMIO driver"
- depends on OF || COMPILE_TEST
+ depends on OF
depends on HAS_IOMEM
help
This option enables the generic thermal MMIO driver that will use
@@ -496,7 +496,7 @@ config SPRD_THERMAL
config KHADAS_MCU_FAN_THERMAL
tristate "Khadas MCU controller FAN cooling support"
- depends on OF || COMPILE_TEST
+ depends on OF
depends on MFD_KHADAS_MCU
select MFD_CORE
select REGMAP
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 80d4e0676083..365489bf4b8c 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -527,7 +527,7 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv)
priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer,
obj->package.elements[0].buffer.length,
GFP_KERNEL);
- if (!priv->data_vault)
+ if (ZERO_OR_NULL_PTR(priv->data_vault))
goto out_free;
bin_attr_data_vault.private = priv->data_vault;
@@ -597,7 +597,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
goto free_imok;
}
- if (priv->data_vault) {
+ if (!ZERO_OR_NULL_PTR(priv->data_vault)) {
result = sysfs_create_group(&pdev->dev.kobj,
&data_attribute_group);
if (result)
@@ -615,7 +615,8 @@ static int int3400_thermal_probe(struct platform_device *pdev)
free_sysfs:
cleanup_odvp(priv);
if (priv->data_vault) {
- sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
+ if (!ZERO_OR_NULL_PTR(priv->data_vault))
+ sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
kfree(priv->data_vault);
}
free_uuid:
@@ -647,7 +648,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
if (!priv->rel_misc_dev_res)
acpi_thermal_rel_misc_device_remove(priv->adev->handle);
- if (priv->data_vault)
+ if (!ZERO_OR_NULL_PTR(priv->data_vault))
sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group);
diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c
index a9596e7562ea..95adac427b6f 100644
--- a/drivers/thermal/intel/intel_tcc_cooling.c
+++ b/drivers/thermal/intel/intel_tcc_cooling.c
@@ -81,7 +81,9 @@ static const struct x86_cpu_id tcc_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, NULL),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
{}
};
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 6a5d0ae5d7a4..50d50cec7774 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1329,6 +1329,7 @@ free_tz:
kfree(tz);
return ERR_PTR(result);
}
+EXPORT_SYMBOL_GPL(thermal_zone_device_register_with_trips);
struct thermal_zone_device *thermal_zone_device_register(const char *type, int ntrips, int mask,
void *devdata, struct thermal_zone_device_ops *ops,
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 5018459e8dd9..3a8d6e747c25 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -813,12 +813,13 @@ static const struct attribute_group cooling_device_stats_attr_group = {
static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
{
+ const struct attribute_group *stats_attr_group = NULL;
struct cooling_dev_stats *stats;
unsigned long states;
int var;
if (cdev->ops->get_max_state(cdev, &states))
- return;
+ goto out;
states++; /* Total number of states is highest state + 1 */
@@ -828,7 +829,7 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
stats = kzalloc(var, GFP_KERNEL);
if (!stats)
- return;
+ goto out;
stats->time_in_state = (ktime_t *)(stats + 1);
stats->trans_table = (unsigned int *)(stats->time_in_state + states);
@@ -838,9 +839,12 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
spin_lock_init(&stats->lock);
+ stats_attr_group = &cooling_device_stats_attr_group;
+
+out:
/* Fill the empty slot left in cooling_device_attr_groups */
var = ARRAY_SIZE(cooling_device_attr_groups) - 2;
- cooling_device_attr_groups[var] = &cooling_device_stats_attr_group;
+ cooling_device_attr_groups[var] = stats_attr_group;
}
static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev)
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index e76a6c173637..f12d0a3ee3e2 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -29,8 +29,7 @@ config USB4_DEBUGFS_WRITE
config USB4_KUNIT_TEST
bool "KUnit tests" if !KUNIT_ALL_TESTS
- depends on (USB4=m || KUNIT=y)
- depends on KUNIT
+ depends on USB4 && KUNIT=y
default KUNIT_ALL_TESTS
config USB4_DMA_TEST
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index e5ede5debfb0..0c661a706160 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -407,7 +407,7 @@ static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
static int tb_async_error(const struct ctl_pkg *pkg)
{
- const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
+ const struct cfg_error_pkg *error = pkg->buffer;
if (pkg->frame.eof != TB_CFG_PKG_ERROR)
return false;
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index ae38f0d25a8d..572b5896caa3 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2529,6 +2529,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
tb->cm_ops = &icm_icl_ops;
break;
+ case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
icm->is_supported = icm_tgl_is_supported;
icm->get_mode = icm_ar_get_mode;
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index f09da5b62233..01190d9ced16 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -55,6 +55,7 @@ extern const struct tb_nhi_ops icl_nhi_ops;
* need for the PCI quirk anymore as we will use ICM also on Apple
* hardware.
*/
+#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137
#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d
#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 244f8cd38b25..c63c1f4ff9dc 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -3786,14 +3786,18 @@ int tb_switch_pcie_l1_enable(struct tb_switch *sw)
*/
int tb_switch_xhci_connect(struct tb_switch *sw)
{
- bool usb_port1, usb_port3, xhci_port1, xhci_port3;
struct tb_port *port1, *port3;
int ret;
+ if (sw->generation != 3)
+ return 0;
+
port1 = &sw->ports[1];
port3 = &sw->ports[3];
if (tb_switch_is_alpine_ridge(sw)) {
+ bool usb_port1, usb_port3, xhci_port1, xhci_port3;
+
usb_port1 = tb_lc_is_usb_plugged(port1);
usb_port3 = tb_lc_is_usb_plugged(port3);
xhci_port1 = tb_lc_is_xhci_connected(port1);
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index afb2d373dd47..81e7f64c1739 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -12,7 +12,7 @@
* (non hardware specific) changes to serial.c.
*
* The port is registered with the tty driver as minor device 64, and
- * therefore other ports should should only use 65 upwards.
+ * therefore other ports should only use 65 upwards.
*
* Richard Lucock 28/12/99
*
@@ -51,6 +51,7 @@
#include <linux/seq_file.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
+#include <linux/serial_core.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/slab.h>
@@ -283,12 +284,12 @@ static void transmit_chars(struct serial_state *info)
amiga_custom.serdat = info->xmit.buf[info->xmit.tail++] | 0x100;
mb();
- info->xmit.tail = info->xmit.tail & (SERIAL_XMIT_SIZE-1);
+ info->xmit.tail = info->xmit.tail & (UART_XMIT_SIZE - 1);
info->icount.tx++;
if (CIRC_CNT(info->xmit.head,
info->xmit.tail,
- SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
+ UART_XMIT_SIZE) < WAKEUP_CHARS)
tty_wakeup(info->tport.tty);
#ifdef SERIAL_DEBUG_INTR
@@ -708,13 +709,13 @@ static int rs_put_char(struct tty_struct *tty, unsigned char ch)
local_irq_save(flags);
if (CIRC_SPACE(info->xmit.head,
info->xmit.tail,
- SERIAL_XMIT_SIZE) == 0) {
+ UART_XMIT_SIZE) == 0) {
local_irq_restore(flags);
return 0;
}
info->xmit.buf[info->xmit.head++] = ch;
- info->xmit.head &= SERIAL_XMIT_SIZE-1;
+ info->xmit.head &= UART_XMIT_SIZE - 1;
local_irq_restore(flags);
return 1;
}
@@ -753,15 +754,14 @@ static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count
while (1) {
c = CIRC_SPACE_TO_END(info->xmit.head,
info->xmit.tail,
- SERIAL_XMIT_SIZE);
+ UART_XMIT_SIZE);
if (count < c)
c = count;
if (c <= 0) {
break;
}
memcpy(info->xmit.buf + info->xmit.head, buf, c);
- info->xmit.head = ((info->xmit.head + c) &
- (SERIAL_XMIT_SIZE-1));
+ info->xmit.head = (info->xmit.head + c) & (UART_XMIT_SIZE - 1);
buf += c;
count -= c;
ret += c;
@@ -788,14 +788,14 @@ static unsigned int rs_write_room(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+ return CIRC_SPACE(info->xmit.head, info->xmit.tail, UART_XMIT_SIZE);
}
static unsigned int rs_chars_in_buffer(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+ return CIRC_CNT(info->xmit.head, info->xmit.tail, UART_XMIT_SIZE);
}
static void rs_flush_buffer(struct tty_struct *tty)
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 31dceb5039b5..e81701a66429 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -916,7 +916,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
/* Make each port's xmit FIFO big enough to fill FDC TX FIFO */
- priv->xmit_size = min(tx_fifo * 4, (unsigned int)SERIAL_XMIT_SIZE);
+ priv->xmit_size = min(tx_fifo * 4, (unsigned int)UART_XMIT_SIZE);
driver = tty_alloc_driver(NUM_TTY_CHANNELS, TTY_DRIVER_REAL_RAW);
if (IS_ERR(driver))
@@ -1222,7 +1222,7 @@ static void kgdbfdc_push_one(void)
/* Construct a word from any data in buffer */
word = mips_ejtag_fdc_encode(bufs, &kgdbfdc_wbuflen, 1);
- /* Relocate any remaining data to beginnning of buffer */
+ /* Relocate any remaining data to beginning of buffer */
kgdbfdc_wbuflen -= word.bytes;
for (i = 0; i < kgdbfdc_wbuflen; ++i)
kgdbfdc_wbuf[i] = kgdbfdc_wbuf[i + word.bytes];
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index fd4d24f61c46..01c112e2e214 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -5,6 +5,14 @@
*
* * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE *
*
+ * Outgoing path:
+ * tty -> DLCI fifo -> scheduler -> GSM MUX data queue ---o-> ldisc
+ * control message -> GSM MUX control queue --´
+ *
+ * Incoming path:
+ * ldisc -> gsm_queue() -o--> tty
+ * `-> gsm_control_response()
+ *
* TO DO:
* Mostly done: ioctls for setting modes/timing
* Partly done: hooks so you can pull off frames to non tty devs
@@ -210,6 +218,9 @@ struct gsm_mux {
/* Events on the GSM channel */
wait_queue_head_t event;
+ /* ldisc send work */
+ struct work_struct tx_work;
+
/* Bits for GSM mode decoding */
/* Framing Layer */
@@ -235,14 +246,17 @@ struct gsm_mux {
struct gsm_dlci *dlci[NUM_DLCI];
int old_c_iflag; /* termios c_iflag value before attach */
bool constipated; /* Asked by remote to shut up */
+ bool has_devices; /* Devices were registered */
- spinlock_t tx_lock;
+ struct mutex tx_mutex;
unsigned int tx_bytes; /* TX data outstanding */
#define TX_THRESH_HI 8192
#define TX_THRESH_LO 2048
- struct list_head tx_list; /* Pending data packets */
+ struct list_head tx_ctrl_list; /* Pending control packets */
+ struct list_head tx_data_list; /* Pending data packets */
/* Control messages */
+ struct delayed_work kick_timeout; /* Kick TX queuing on timeout */
struct timer_list t2_timer; /* Retransmit timer for commands */
int cretries; /* Command retry counter */
struct gsm_control *pending_cmd;/* Our current pending command */
@@ -369,6 +383,11 @@ static const u8 gsm_fcs8[256] = {
static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len);
static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk);
+static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+ u8 ctrl);
+static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg);
+static void gsmld_write_trigger(struct gsm_mux *gsm);
+static void gsmld_write_task(struct work_struct *work);
/**
* gsm_fcs_add - update FCS
@@ -420,6 +439,27 @@ static int gsm_read_ea(unsigned int *val, u8 c)
}
/**
+ * gsm_read_ea_val - read a value until EA
+ * @val: variable holding value
+ * @data: buffer of data
+ * @dlen: length of data
+ *
+ * Processes an EA value. Updates the passed variable and
+ * returns the processed data length.
+ */
+static unsigned int gsm_read_ea_val(unsigned int *val, const u8 *data, int dlen)
+{
+ unsigned int len = 0;
+
+ for (; dlen > 0; dlen--) {
+ len++;
+ if (gsm_read_ea(val, *data++))
+ break;
+ }
+ return len;
+}
+
+/**
* gsm_encode_modem - encode modem data bits
* @dlci: DLCI to encode from
*
@@ -464,6 +504,68 @@ static void gsm_hex_dump_bytes(const char *fname, const u8 *data,
}
/**
+ * gsm_register_devices - register all tty devices for a given mux index
+ *
+ * @driver: the tty driver that describes the tty devices
+ * @index: the mux number is used to calculate the minor numbers of the
+ * ttys for this mux and may differ from the position in the
+ * mux array.
+ */
+static int gsm_register_devices(struct tty_driver *driver, unsigned int index)
+{
+ struct device *dev;
+ int i;
+ unsigned int base;
+
+ if (!driver || index >= MAX_MUX)
+ return -EINVAL;
+
+ base = index * NUM_DLCI; /* first minor for this index */
+ for (i = 1; i < NUM_DLCI; i++) {
+ /* Don't register device 0 - this is the control channel
+ * and not a usable tty interface
+ */
+ dev = tty_register_device(gsm_tty_driver, base + i, NULL);
+ if (IS_ERR(dev)) {
+ if (debug & 8)
+ pr_info("%s failed to register device minor %u",
+ __func__, base + i);
+ for (i--; i >= 1; i--)
+ tty_unregister_device(gsm_tty_driver, base + i);
+ return PTR_ERR(dev);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * gsm_unregister_devices - unregister all tty devices for a given mux index
+ *
+ * @driver: the tty driver that describes the tty devices
+ * @index: the mux number is used to calculate the minor numbers of the
+ * ttys for this mux and may differ from the position in the
+ * mux array.
+ */
+static void gsm_unregister_devices(struct tty_driver *driver,
+ unsigned int index)
+{
+ int i;
+ unsigned int base;
+
+ if (!driver || index >= MAX_MUX)
+ return;
+
+ base = index * NUM_DLCI; /* first minor for this index */
+ for (i = 1; i < NUM_DLCI; i++) {
+ /* Don't unregister device 0 - this is the control
+ * channel and not a usable tty interface
+ */
+ tty_unregister_device(gsm_tty_driver, base + i);
+ }
+}
+
+/**
* gsm_print_packet - display a frame for debug
* @hdr: header to print before decode
* @addr: address EA from the frame
@@ -570,57 +672,72 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
* @cr: command/response bit seen as initiator
* @control: control byte including PF bit
*
- * Format up and transmit a control frame. These do not go via the
- * queueing logic as they should be transmitted ahead of data when
- * they are needed.
- *
- * FIXME: Lock versus data TX path
+ * Format up and transmit a control frame. These should be transmitted
+ * ahead of data when they are needed.
*/
-
-static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
+static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
{
- int len;
- u8 cbuf[10];
- u8 ibuf[3];
+ struct gsm_msg *msg;
+ u8 *dp;
int ocr;
+ msg = gsm_data_alloc(gsm, addr, 0, control);
+ if (!msg)
+ return -ENOMEM;
+
/* toggle C/R coding if not initiator */
ocr = cr ^ (gsm->initiator ? 0 : 1);
- switch (gsm->encoding) {
- case 0:
- cbuf[0] = GSM0_SOF;
- cbuf[1] = (addr << 2) | (ocr << 1) | EA;
- cbuf[2] = control;
- cbuf[3] = EA; /* Length of data = 0 */
- cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3);
- cbuf[5] = GSM0_SOF;
- len = 6;
- break;
- case 1:
- case 2:
- /* Control frame + packing (but not frame stuffing) in mode 1 */
- ibuf[0] = (addr << 2) | (ocr << 1) | EA;
- ibuf[1] = control;
- ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2);
- /* Stuffing may double the size worst case */
- len = gsm_stuff_frame(ibuf, cbuf + 1, 3);
- /* Now add the SOF markers */
- cbuf[0] = GSM1_SOF;
- cbuf[len + 1] = GSM1_SOF;
- /* FIXME: we can omit the lead one in many cases */
- len += 2;
- break;
- default:
- WARN_ON(1);
- return;
- }
- gsmld_output(gsm, cbuf, len);
- if (!gsm->initiator) {
- cr = cr & gsm->initiator;
- control = control & ~PF;
+ msg->data -= 3;
+ dp = msg->data;
+ *dp++ = (addr << 2) | (ocr << 1) | EA;
+ *dp++ = control;
+
+ if (gsm->encoding == 0)
+ *dp++ = EA; /* Length of data = 0 */
+
+ *dp = 0xFF - gsm_fcs_add_block(INIT_FCS, msg->data, dp - msg->data);
+ msg->len = (dp - msg->data) + 1;
+
+ gsm_print_packet("Q->", addr, cr, control, NULL, 0);
+
+ mutex_lock(&gsm->tx_mutex);
+ list_add_tail(&msg->list, &gsm->tx_ctrl_list);
+ gsm->tx_bytes += msg->len;
+ mutex_unlock(&gsm->tx_mutex);
+ gsmld_write_trigger(gsm);
+
+ return 0;
+}
+
+/**
+ * gsm_dlci_clear_queues - remove outstanding data for a DLCI
+ * @gsm: mux
+ * @dlci: clear for this DLCI
+ *
+ * Clears the data queues for a given DLCI.
+ */
+static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+{
+ struct gsm_msg *msg, *nmsg;
+ int addr = dlci->addr;
+ unsigned long flags;
+
+ /* Clear DLCI write fifo first */
+ spin_lock_irqsave(&dlci->lock, flags);
+ kfifo_reset(&dlci->fifo);
+ spin_unlock_irqrestore(&dlci->lock, flags);
+
+ /* Clear data packets in MUX write queue */
+ mutex_lock(&gsm->tx_mutex);
+ list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
+ if (msg->addr != addr)
+ continue;
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
}
- gsm_print_packet("-->", addr, cr, control, NULL, 0);
+ mutex_unlock(&gsm->tx_mutex);
}
/**
@@ -683,59 +800,151 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
}
/**
- * gsm_data_kick - poke the queue
+ * gsm_send_packet - sends a single packet
* @gsm: GSM Mux
- * @dlci: DLCI sending the data
+ * @msg: packet to send
*
- * The tty device has called us to indicate that room has appeared in
- * the transmit queue. Ram more data into the pipe if we have any
- * If we have been flow-stopped by a CMD_FCOFF, then we can only
- * send messages on DLCI0 until CMD_FCON
+ * The given packet is encoded and sent out. No memory is freed.
+ * The caller must hold the gsm tx lock.
+ */
+static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg)
+{
+ int len, ret;
+
+
+ if (gsm->encoding == 0) {
+ gsm->txframe[0] = GSM0_SOF;
+ memcpy(gsm->txframe + 1, msg->data, msg->len);
+ gsm->txframe[msg->len + 1] = GSM0_SOF;
+ len = msg->len + 2;
+ } else {
+ gsm->txframe[0] = GSM1_SOF;
+ len = gsm_stuff_frame(msg->data, gsm->txframe + 1, msg->len);
+ gsm->txframe[len + 1] = GSM1_SOF;
+ len += 2;
+ }
+
+ if (debug & 4)
+ gsm_hex_dump_bytes(__func__, gsm->txframe, len);
+ gsm_print_packet("-->", msg->addr, gsm->initiator, msg->ctrl, msg->data,
+ msg->len);
+
+ ret = gsmld_output(gsm, gsm->txframe, len);
+ if (ret <= 0)
+ return ret;
+ /* FIXME: Can eliminate one SOF in many more cases */
+ gsm->tx_bytes -= msg->len;
+
+ return 0;
+}
+
+/**
+ * gsm_is_flow_ctrl_msg - checks if flow control message
+ * @msg: message to check
*
- * FIXME: lock against link layer control transmissions
+ * Returns true if the given message is a flow control command of the
+ * control channel. False is returned in any other case.
*/
+static bool gsm_is_flow_ctrl_msg(struct gsm_msg *msg)
+{
+ unsigned int cmd;
-static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+ if (msg->addr > 0)
+ return false;
+
+ switch (msg->ctrl & ~PF) {
+ case UI:
+ case UIH:
+ cmd = 0;
+ if (gsm_read_ea_val(&cmd, msg->data + 2, msg->len - 2) < 1)
+ break;
+ switch (cmd & ~PF) {
+ case CMD_FCOFF:
+ case CMD_FCON:
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+/**
+ * gsm_data_kick - poke the queue
+ * @gsm: GSM Mux
+ *
+ * The tty device has called us to indicate that room has appeared in
+ * the transmit queue. Ram more data into the pipe if we have any.
+ * If we have been flow-stopped by a CMD_FCOFF, then we can only
+ * send messages on DLCI0 until CMD_FCON. The caller must hold
+ * the gsm tx lock.
+ */
+static int gsm_data_kick(struct gsm_mux *gsm)
{
struct gsm_msg *msg, *nmsg;
- int len;
+ struct gsm_dlci *dlci;
+ int ret;
- list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) {
- if (gsm->constipated && msg->addr)
- continue;
- if (gsm->encoding != 0) {
- gsm->txframe[0] = GSM1_SOF;
- len = gsm_stuff_frame(msg->data,
- gsm->txframe + 1, msg->len);
- gsm->txframe[len + 1] = GSM1_SOF;
- len += 2;
- } else {
- gsm->txframe[0] = GSM0_SOF;
- memcpy(gsm->txframe + 1 , msg->data, msg->len);
- gsm->txframe[msg->len + 1] = GSM0_SOF;
- len = msg->len + 2;
- }
+ clear_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
- if (debug & 4)
- gsm_hex_dump_bytes(__func__, gsm->txframe, len);
- if (gsmld_output(gsm, gsm->txframe, len) <= 0)
+ /* Serialize control messages and control channel messages first */
+ list_for_each_entry_safe(msg, nmsg, &gsm->tx_ctrl_list, list) {
+ if (gsm->constipated && !gsm_is_flow_ctrl_msg(msg))
+ continue;
+ ret = gsm_send_packet(gsm, msg);
+ switch (ret) {
+ case -ENOSPC:
+ return -ENOSPC;
+ case -ENODEV:
+ /* ldisc not open */
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
+ continue;
+ default:
+ if (ret >= 0) {
+ list_del(&msg->list);
+ kfree(msg);
+ }
break;
- /* FIXME: Can eliminate one SOF in many more cases */
- gsm->tx_bytes -= msg->len;
-
- list_del(&msg->list);
- kfree(msg);
+ }
+ }
- if (dlci) {
- tty_port_tty_wakeup(&dlci->port);
- } else {
- int i = 0;
+ if (gsm->constipated)
+ return -EAGAIN;
- for (i = 0; i < NUM_DLCI; i++)
- if (gsm->dlci[i])
- tty_port_tty_wakeup(&gsm->dlci[i]->port);
+ /* Serialize other channels */
+ if (list_empty(&gsm->tx_data_list))
+ return 0;
+ list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
+ dlci = gsm->dlci[msg->addr];
+ /* Send only messages for DLCIs with valid state */
+ if (dlci->state != DLCI_OPEN) {
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
+ continue;
+ }
+ ret = gsm_send_packet(gsm, msg);
+ switch (ret) {
+ case -ENOSPC:
+ return -ENOSPC;
+ case -ENODEV:
+ /* ldisc not open */
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
+ continue;
+ default:
+ if (ret >= 0) {
+ list_del(&msg->list);
+ kfree(msg);
+ }
+ break;
}
}
+
+ return 1;
}
/**
@@ -784,9 +993,22 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
msg->data = dp;
/* Add to the actual output queue */
- list_add_tail(&msg->list, &gsm->tx_list);
+ switch (msg->ctrl & ~PF) {
+ case UI:
+ case UIH:
+ if (msg->addr > 0) {
+ list_add_tail(&msg->list, &gsm->tx_data_list);
+ break;
+ }
+ fallthrough;
+ default:
+ list_add_tail(&msg->list, &gsm->tx_ctrl_list);
+ break;
+ }
gsm->tx_bytes += msg->len;
- gsm_data_kick(gsm, dlci);
+
+ gsmld_write_trigger(gsm);
+ schedule_delayed_work(&gsm->kick_timeout, 10 * gsm->t1 * HZ / 100);
}
/**
@@ -801,10 +1023,9 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
{
- unsigned long flags;
- spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ mutex_lock(&dlci->gsm->tx_mutex);
__gsm_data_queue(dlci, msg);
- spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ mutex_unlock(&dlci->gsm->tx_mutex);
}
/**
@@ -816,48 +1037,55 @@ static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
* is data. Keep to the MRU of the mux. This path handles the usual tty
* interface which is a byte stream with optional modem data.
*
- * Caller must hold the tx_lock of the mux.
+ * Caller must hold the tx_mutex of the mux.
*/
static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
{
struct gsm_msg *msg;
u8 *dp;
- int len, total_size, size;
- int h = dlci->adaption - 1;
+ int h, len, size;
- total_size = 0;
- while (1) {
- len = kfifo_len(&dlci->fifo);
- if (len == 0)
- return total_size;
-
- /* MTU/MRU count only the data bits */
- if (len > gsm->mtu)
- len = gsm->mtu;
-
- size = len + h;
-
- msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
- /* FIXME: need a timer or something to kick this so it can't
- get stuck with no work outstanding and no buffer free */
- if (msg == NULL)
- return -ENOMEM;
- dp = msg->data;
- switch (dlci->adaption) {
- case 1: /* Unstructured */
- break;
- case 2: /* Unstructed with modem bits.
- Always one byte as we never send inline break data */
- *dp++ = (gsm_encode_modem(dlci) << 1) | EA;
- break;
- }
- WARN_ON(kfifo_out_locked(&dlci->fifo, dp , len, &dlci->lock) != len);
- __gsm_data_queue(dlci, msg);
- total_size += size;
+ /* for modem bits without break data */
+ h = ((dlci->adaption == 1) ? 0 : 1);
+
+ len = kfifo_len(&dlci->fifo);
+ if (len == 0)
+ return 0;
+
+ /* MTU/MRU count only the data bits but watch adaption mode */
+ if ((len + h) > gsm->mtu)
+ len = gsm->mtu - h;
+
+ size = len + h;
+
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+ if (!msg)
+ return -ENOMEM;
+ dp = msg->data;
+ switch (dlci->adaption) {
+ case 1: /* Unstructured */
+ break;
+ case 2: /* Unstructured with modem bits.
+ * Always one byte as we never send inline break data
+ */
+ *dp++ = (gsm_encode_modem(dlci) << 1) | EA;
+ break;
+ default:
+ pr_err("%s: unsupported adaption %d\n", __func__,
+ dlci->adaption);
+ break;
}
+
+ WARN_ON(len != kfifo_out_locked(&dlci->fifo, dp, len,
+ &dlci->lock));
+
+ /* Notify upper layer about available send space. */
+ tty_port_tty_wakeup(&dlci->port);
+
+ __gsm_data_queue(dlci, msg);
/* Bytes of data we used up */
- return total_size;
+ return size;
}
/**
@@ -869,7 +1097,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
* is data. Keep to the MRU of the mux. This path handles framed data
* queued as skbuffs to the DLCI.
*
- * Caller must hold the tx_lock of the mux.
+ * Caller must hold the tx_mutex of the mux.
*/
static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
@@ -885,7 +1113,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
if (dlci->adaption == 4)
overhead = 1;
- /* dlci->skb is locked by tx_lock */
+ /* dlci->skb is locked by tx_mutex */
if (dlci->skb == NULL) {
dlci->skb = skb_dequeue_tail(&dlci->skb_list);
if (dlci->skb == NULL)
@@ -908,9 +1136,6 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
size = len + overhead;
msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
-
- /* FIXME: need a timer or something to kick this so it can't
- get stuck with no work outstanding and no buffer free */
if (msg == NULL) {
skb_queue_tail(&dlci->skb_list, dlci->skb);
dlci->skb = NULL;
@@ -942,7 +1167,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
* Push an empty frame in to the transmit queue to update the modem status
* bits and to transmit an optional break.
*
- * Caller must hold the tx_lock of the mux.
+ * Caller must hold the tx_mutex of the mux.
*/
static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
@@ -1006,32 +1231,43 @@ static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
* renegotiate DLCI priorities with optional stuff. Needs optimising.
*/
-static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
+static int gsm_dlci_data_sweep(struct gsm_mux *gsm)
{
- int len;
/* Priority ordering: We should do priority with RR of the groups */
- int i = 1;
-
- while (i < NUM_DLCI) {
- struct gsm_dlci *dlci;
+ int i, len, ret = 0;
+ bool sent;
+ struct gsm_dlci *dlci;
- if (gsm->tx_bytes > TX_THRESH_HI)
- break;
- dlci = gsm->dlci[i];
- if (dlci == NULL || dlci->constipated) {
- i++;
- continue;
+ while (gsm->tx_bytes < TX_THRESH_HI) {
+ for (sent = false, i = 1; i < NUM_DLCI; i++) {
+ dlci = gsm->dlci[i];
+ /* skip unused or blocked channel */
+ if (!dlci || dlci->constipated)
+ continue;
+ /* skip channels with invalid state */
+ if (dlci->state != DLCI_OPEN)
+ continue;
+ /* count the sent data per adaption */
+ if (dlci->adaption < 3 && !dlci->net)
+ len = gsm_dlci_data_output(gsm, dlci);
+ else
+ len = gsm_dlci_data_output_framed(gsm, dlci);
+ /* on error exit */
+ if (len < 0)
+ return ret;
+ if (len > 0) {
+ ret++;
+ sent = true;
+ /* The lower DLCs can starve the higher DLCs! */
+ break;
+ }
+ /* try next */
}
- if (dlci->adaption < 3 && !dlci->net)
- len = gsm_dlci_data_output(gsm, dlci);
- else
- len = gsm_dlci_data_output_framed(gsm, dlci);
- if (len < 0)
+ if (!sent)
break;
- /* DLCI empty - try the next */
- if (len == 0)
- i++;
- }
+ };
+
+ return ret;
}
/**
@@ -1045,13 +1281,12 @@ static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
{
- unsigned long flags;
int sweep;
if (dlci->constipated)
return;
- spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ mutex_lock(&dlci->gsm->tx_mutex);
/* If we have nothing running then we need to fire up */
sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
if (dlci->gsm->tx_bytes == 0) {
@@ -1062,7 +1297,7 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
}
if (sweep)
gsm_dlci_data_sweep(dlci->gsm);
- spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ mutex_unlock(&dlci->gsm->tx_mutex);
}
/*
@@ -1277,7 +1512,6 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
const u8 *data, int clen)
{
u8 buf[1];
- unsigned long flags;
switch (command) {
case CMD_CLD: {
@@ -1299,9 +1533,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
gsm->constipated = false;
gsm_control_reply(gsm, CMD_FCON, NULL, 0);
/* Kick the link in case it is idling */
- spin_lock_irqsave(&gsm->tx_lock, flags);
- gsm_data_kick(gsm, NULL);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ gsmld_write_trigger(gsm);
break;
case CMD_FCOFF:
/* Modem wants us to STFU */
@@ -1407,7 +1639,7 @@ static void gsm_control_retransmit(struct timer_list *t)
spin_lock_irqsave(&gsm->control_lock, flags);
ctrl = gsm->pending_cmd;
if (ctrl) {
- if (gsm->cretries == 0) {
+ if (gsm->cretries == 0 || !gsm->dlci[0] || gsm->dlci[0]->dead) {
gsm->pending_cmd = NULL;
ctrl->error = -ETIMEDOUT;
ctrl->done = 1;
@@ -1504,25 +1736,24 @@ static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
static void gsm_dlci_close(struct gsm_dlci *dlci)
{
- unsigned long flags;
-
del_timer(&dlci->t1);
if (debug & 8)
pr_debug("DLCI %d goes closed.\n", dlci->addr);
dlci->state = DLCI_CLOSED;
+ /* Prevent us from sending data before the link is up again */
+ dlci->constipated = true;
if (dlci->addr != 0) {
tty_port_tty_hangup(&dlci->port, false);
- spin_lock_irqsave(&dlci->lock, flags);
- kfifo_reset(&dlci->fifo);
- spin_unlock_irqrestore(&dlci->lock, flags);
+ gsm_dlci_clear_queues(dlci->gsm, dlci);
/* Ensure that gsmtty_open() can return. */
tty_port_set_initialized(&dlci->port, 0);
wake_up_interruptible(&dlci->port.open_wait);
} else
dlci->gsm->dead = true;
- wake_up(&dlci->gsm->event);
/* A DLCI 0 close is a MUX termination so we need to kick that
back to userspace somehow */
+ gsm_dlci_data_kick(dlci);
+ wake_up(&dlci->gsm->event);
}
/**
@@ -1539,11 +1770,13 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
del_timer(&dlci->t1);
/* This will let a tty open continue */
dlci->state = DLCI_OPEN;
+ dlci->constipated = false;
if (debug & 8)
pr_debug("DLCI %d goes open.\n", dlci->addr);
/* Send current modem state */
if (dlci->addr)
gsm_modem_update(dlci, 0);
+ gsm_dlci_data_kick(dlci);
wake_up(&dlci->gsm->event);
}
@@ -1569,8 +1802,8 @@ static void gsm_dlci_t1(struct timer_list *t)
switch (dlci->state) {
case DLCI_OPENING:
- dlci->retries--;
if (dlci->retries) {
+ dlci->retries--;
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else if (!dlci->addr && gsm->control == (DM | PF)) {
@@ -1585,8 +1818,8 @@ static void gsm_dlci_t1(struct timer_list *t)
break;
case DLCI_CLOSING:
- dlci->retries--;
if (dlci->retries) {
+ dlci->retries--;
gsm_command(dlci->gsm, dlci->addr, DISC|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else
@@ -1620,6 +1853,25 @@ static void gsm_dlci_begin_open(struct gsm_dlci *dlci)
}
/**
+ * gsm_dlci_set_opening - change state to opening
+ * @dlci: DLCI to open
+ *
+ * Change internal state to wait for DLCI open from initiator side.
+ * We set off timers and responses upon reception of an SABM.
+ */
+static void gsm_dlci_set_opening(struct gsm_dlci *dlci)
+{
+ switch (dlci->state) {
+ case DLCI_CLOSED:
+ case DLCI_CLOSING:
+ dlci->state = DLCI_OPENING;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
* gsm_dlci_begin_close - start channel open procedure
* @dlci: DLCI to open
*
@@ -1728,6 +1980,29 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
}
}
+/**
+ * gsm_kick_timeout - transmit if possible
+ * @work: work contained in our gsm object
+ *
+ * Transmit data from DLCIs if the queue is empty. We can't rely on
+ * a tty wakeup except when we filled the pipe so we need to fire off
+ * new data ourselves in other cases.
+ */
+static void gsm_kick_timeout(struct work_struct *work)
+{
+ struct gsm_mux *gsm = container_of(work, struct gsm_mux, kick_timeout.work);
+ int sent = 0;
+
+ mutex_lock(&gsm->tx_mutex);
+ /* If we have nothing running then we need to fire up */
+ if (gsm->tx_bytes < TX_THRESH_LO)
+ sent = gsm_dlci_data_sweep(gsm);
+ mutex_unlock(&gsm->tx_mutex);
+
+ if (sent && debug & 4)
+ pr_info("%s TX queue stalled\n", __func__);
+}
+
/*
* Allocate/Free DLCI channels
*/
@@ -1762,10 +2037,13 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
dlci->addr = addr;
dlci->adaption = gsm->adaption;
dlci->state = DLCI_CLOSED;
- if (addr)
+ if (addr) {
dlci->data = gsm_dlci_data;
- else
+ /* Prevent us from sending data before the link is up */
+ dlci->constipated = true;
+ } else {
dlci->data = gsm_dlci_command;
+ }
gsm->dlci[addr] = dlci;
return dlci;
}
@@ -1925,7 +2203,7 @@ static void gsm_queue(struct gsm_mux *gsm)
case UIH:
case UIH|PF:
if (dlci == NULL || dlci->state != DLCI_OPEN) {
- gsm_command(gsm, address, DM|PF);
+ gsm_response(gsm, address, DM|PF);
return;
}
dlci->data(dlci, gsm->buf, gsm->len);
@@ -2048,7 +2326,7 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
} else if ((c & ISO_IEC_646_MASK) == XOFF) {
gsm->constipated = false;
/* Kick the link in case it is idling */
- gsm_data_kick(gsm, NULL);
+ gsmld_write_trigger(gsm);
return;
}
if (c == GSM1_SOF) {
@@ -2176,18 +2454,29 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
}
/* Finish outstanding timers, making sure they are done */
+ cancel_delayed_work_sync(&gsm->kick_timeout);
del_timer_sync(&gsm->t2_timer);
+ /* Finish writing to ldisc */
+ flush_work(&gsm->tx_work);
+
/* Free up any link layer users and finally the control channel */
+ if (gsm->has_devices) {
+ gsm_unregister_devices(gsm_tty_driver, gsm->num);
+ gsm->has_devices = false;
+ }
for (i = NUM_DLCI - 1; i >= 0; i--)
if (gsm->dlci[i])
gsm_dlci_release(gsm->dlci[i]);
mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
tty_ldisc_flush(gsm->tty);
- list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list)
+ list_for_each_entry_safe(txq, ntxq, &gsm->tx_ctrl_list, list)
kfree(txq);
- INIT_LIST_HEAD(&gsm->tx_list);
+ INIT_LIST_HEAD(&gsm->tx_ctrl_list);
+ list_for_each_entry_safe(txq, ntxq, &gsm->tx_data_list, list)
+ kfree(txq);
+ INIT_LIST_HEAD(&gsm->tx_data_list);
}
/**
@@ -2202,20 +2491,22 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
static int gsm_activate_mux(struct gsm_mux *gsm)
{
struct gsm_dlci *dlci;
+ int ret;
- timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
- init_waitqueue_head(&gsm->event);
- spin_lock_init(&gsm->control_lock);
- spin_lock_init(&gsm->tx_lock);
+ dlci = gsm_dlci_alloc(gsm, 0);
+ if (dlci == NULL)
+ return -ENOMEM;
if (gsm->encoding == 0)
gsm->receive = gsm0_receive;
else
gsm->receive = gsm1_receive;
- dlci = gsm_dlci_alloc(gsm, 0);
- if (dlci == NULL)
- return -ENOMEM;
+ ret = gsm_register_devices(gsm_tty_driver, gsm->num);
+ if (ret)
+ return ret;
+
+ gsm->has_devices = true;
gsm->dead = false; /* Tty opens are now permissible */
return 0;
}
@@ -2236,6 +2527,7 @@ static void gsm_free_mux(struct gsm_mux *gsm)
break;
}
}
+ mutex_destroy(&gsm->tx_mutex);
mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
@@ -2307,8 +2599,15 @@ static struct gsm_mux *gsm_alloc_mux(void)
}
spin_lock_init(&gsm->lock);
mutex_init(&gsm->mutex);
+ mutex_init(&gsm->tx_mutex);
kref_init(&gsm->ref);
- INIT_LIST_HEAD(&gsm->tx_list);
+ INIT_LIST_HEAD(&gsm->tx_ctrl_list);
+ INIT_LIST_HEAD(&gsm->tx_data_list);
+ INIT_DELAYED_WORK(&gsm->kick_timeout, gsm_kick_timeout);
+ timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
+ INIT_WORK(&gsm->tx_work, gsmld_write_task);
+ init_waitqueue_head(&gsm->event);
+ spin_lock_init(&gsm->control_lock);
gsm->t1 = T1;
gsm->t2 = T2;
@@ -2333,6 +2632,7 @@ static struct gsm_mux *gsm_alloc_mux(void)
}
spin_unlock(&gsm_mux_lock);
if (i == MAX_MUX) {
+ mutex_destroy(&gsm->tx_mutex);
mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
@@ -2465,6 +2765,46 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
return gsm->tty->ops->write(gsm->tty, data, len);
}
+
+/**
+ * gsmld_write_trigger - schedule ldisc write task
+ * @gsm: our mux
+ */
+static void gsmld_write_trigger(struct gsm_mux *gsm)
+{
+ if (!gsm || !gsm->dlci[0] || gsm->dlci[0]->dead)
+ return;
+ schedule_work(&gsm->tx_work);
+}
+
+
+/**
+ * gsmld_write_task - ldisc write task
+ * @work: our tx write work
+ *
+ * Writes out data to the ldisc if possible. We are doing this here to
+ * avoid dead-locking. This returns if no space or data is left for output.
+ */
+static void gsmld_write_task(struct work_struct *work)
+{
+ struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work);
+ int i, ret;
+
+ /* All outstanding control channel and control messages and one data
+ * frame is sent.
+ */
+ ret = -ENODEV;
+ mutex_lock(&gsm->tx_mutex);
+ if (gsm->tty)
+ ret = gsm_data_kick(gsm);
+ mutex_unlock(&gsm->tx_mutex);
+
+ if (ret >= 0)
+ for (i = 0; i < NUM_DLCI; i++)
+ if (gsm->dlci[i])
+ tty_port_tty_wakeup(&gsm->dlci[i]->port);
+}
+
/**
* gsmld_attach_gsm - mode set up
* @tty: our tty structure
@@ -2475,39 +2815,14 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
* will need moving to an ioctl path.
*/
-static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+static void gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
{
- unsigned int base;
- int ret, i;
-
gsm->tty = tty_kref_get(tty);
/* Turn off tty XON/XOFF handling to handle it explicitly. */
gsm->old_c_iflag = tty->termios.c_iflag;
tty->termios.c_iflag &= (IXON | IXOFF);
- ret = gsm_activate_mux(gsm);
- if (ret != 0)
- tty_kref_put(gsm->tty);
- else {
- /* Don't register device 0 - this is the control channel and not
- a usable tty interface */
- base = mux_num_to_base(gsm); /* Base for this MUX */
- for (i = 1; i < NUM_DLCI; i++) {
- struct device *dev;
-
- dev = tty_register_device(gsm_tty_driver,
- base + i, NULL);
- if (IS_ERR(dev)) {
- for (i--; i >= 1; i--)
- tty_unregister_device(gsm_tty_driver,
- base + i);
- return PTR_ERR(dev);
- }
- }
- }
- return ret;
}
-
/**
* gsmld_detach_gsm - stop doing 0710 mux
* @tty: tty attached to the mux
@@ -2518,12 +2833,7 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
{
- unsigned int base = mux_num_to_base(gsm); /* Base for this MUX */
- int i;
-
WARN_ON(tty != gsm->tty);
- for (i = 1; i < NUM_DLCI; i++)
- tty_unregister_device(gsm_tty_driver, base + i);
/* Restore tty XON/XOFF handling. */
gsm->tty->termios.c_iflag = gsm->old_c_iflag;
tty_kref_put(gsm->tty);
@@ -2544,7 +2854,8 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
flags = *fp++;
switch (flags) {
case TTY_NORMAL:
- gsm->receive(gsm, *cp);
+ if (gsm->receive)
+ gsm->receive(gsm, *cp);
break;
case TTY_OVERRUN:
case TTY_BREAK:
@@ -2615,7 +2926,6 @@ static void gsmld_close(struct tty_struct *tty)
static int gsmld_open(struct tty_struct *tty)
{
struct gsm_mux *gsm;
- int ret;
if (tty->ops->write == NULL)
return -EINVAL;
@@ -2631,12 +2941,9 @@ static int gsmld_open(struct tty_struct *tty)
/* Attach the initial passive connection */
gsm->encoding = 1;
- ret = gsmld_attach_gsm(tty, gsm);
- if (ret != 0) {
- gsm_cleanup_mux(gsm, false);
- mux_put(gsm);
- }
- return ret;
+ gsmld_attach_gsm(tty, gsm);
+
+ return 0;
}
/**
@@ -2651,16 +2958,9 @@ static int gsmld_open(struct tty_struct *tty)
static void gsmld_write_wakeup(struct tty_struct *tty)
{
struct gsm_mux *gsm = tty->disc_data;
- unsigned long flags;
/* Queue poll */
- clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- spin_lock_irqsave(&gsm->tx_lock, flags);
- gsm_data_kick(gsm, NULL);
- if (gsm->tx_bytes < TX_THRESH_LO) {
- gsm_dlci_data_sweep(gsm);
- }
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ gsmld_write_trigger(gsm);
}
/**
@@ -2704,11 +3004,23 @@ static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
- int space = tty_write_room(tty);
+ struct gsm_mux *gsm = tty->disc_data;
+ int space;
+ int ret;
+
+ if (!gsm)
+ return -ENODEV;
+
+ ret = -ENOBUFS;
+ mutex_lock(&gsm->tx_mutex);
+ space = tty_write_room(tty);
if (space >= nr)
- return tty->ops->write(tty, buf, nr);
- set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- return -ENOBUFS;
+ ret = tty->ops->write(tty, buf, nr);
+ else
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ mutex_unlock(&gsm->tx_mutex);
+
+ return ret;
}
/**
@@ -2733,12 +3045,15 @@ static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file,
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
+
+ if (gsm->dead)
+ mask |= EPOLLHUP;
if (tty_hung_up_p(file))
mask |= EPOLLHUP;
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ mask |= EPOLLHUP;
if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0)
mask |= EPOLLOUT | EPOLLWRNORM;
- if (gsm->dead)
- mask |= EPOLLHUP;
return mask;
}
@@ -3000,14 +3315,13 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk)
{
struct gsm_mux *gsm = dlci->gsm;
- unsigned long flags;
if (dlci->state != DLCI_OPEN || dlci->adaption != 2)
return;
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
gsm_dlci_modem_output(gsm, dlci, brk);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
}
/**
@@ -3174,6 +3488,8 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
/* Start sending off SABM messages */
if (gsm->initiator)
gsm_dlci_begin_open(dlci);
+ else
+ gsm_dlci_set_opening(dlci);
/* And wait for virtual carrier */
return tty_port_block_til_ready(port, tty, filp);
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 640c9e871044..3afdd9033a9c 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -118,6 +118,9 @@ struct n_tty_data {
size_t read_tail;
size_t line_start;
+ /* # of chars looked ahead (to find software flow control chars) */
+ size_t lookahead_count;
+
/* protected by output lock */
unsigned int column;
unsigned int canon_column;
@@ -333,6 +336,8 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
ldata->erasing = 0;
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
ldata->push = 0;
+
+ ldata->lookahead_count = 0;
}
static void n_tty_packet_mode_flush(struct tty_struct *tty)
@@ -1225,12 +1230,30 @@ static bool n_tty_is_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
return c == START_CHAR(tty) || c == STOP_CHAR(tty);
}
-/* Returns true if c is consumed as flow-control character */
-static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
+/**
+ * n_tty_receive_char_flow_ctrl - receive flow control chars
+ * @tty: terminal device
+ * @c: character
+ * @lookahead_done: lookahead has processed this character already
+ *
+ * Receive and process flow control character actions.
+ *
+ * In case lookahead for flow control chars already handled the character in
+ * advance to the normal receive, the actions are skipped during normal
+ * receive.
+ *
+ * Returns true if @c is consumed as flow-control character, the character
+ * must not be treated as normal character.
+ */
+static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c,
+ bool lookahead_done)
{
if (!n_tty_is_char_flow_ctrl(tty, c))
return false;
+ if (lookahead_done)
+ return true;
+
if (c == START_CHAR(tty)) {
start_tty(tty);
process_echoes(tty);
@@ -1242,11 +1265,12 @@ static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c
return true;
}
-static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c,
+ bool lookahead_done)
{
struct n_tty_data *ldata = tty->disc_data;
- if (I_IXON(tty) && n_tty_receive_char_flow_ctrl(tty, c))
+ if (I_IXON(tty) && n_tty_receive_char_flow_ctrl(tty, c, lookahead_done))
return;
if (L_ISIG(tty)) {
@@ -1401,7 +1425,8 @@ static void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
put_tty_queue(c, ldata);
}
-static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c,
+ bool lookahead_done)
{
if (I_ISTRIP(tty))
c &= 0x7f;
@@ -1409,12 +1434,10 @@ static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
c = tolower(c);
if (I_IXON(tty)) {
- if (c == STOP_CHAR(tty))
- stop_tty(tty);
- else if (c == START_CHAR(tty) ||
- (tty->flow.stopped && !tty->flow.tco_stopped && I_IXANY(tty) &&
- c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) &&
- c != SUSP_CHAR(tty))) {
+ if (!n_tty_receive_char_flow_ctrl(tty, c, lookahead_done) &&
+ tty->flow.stopped && !tty->flow.tco_stopped && I_IXANY(tty) &&
+ c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) &&
+ c != SUSP_CHAR(tty)) {
start_tty(tty);
process_echoes(tty);
}
@@ -1457,6 +1480,27 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
n_tty_receive_char_flagged(tty, c, flag);
}
+/* Caller must ensure count > 0 */
+static void n_tty_lookahead_flow_ctrl(struct tty_struct *tty, const unsigned char *cp,
+ const unsigned char *fp, unsigned int count)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ unsigned char flag = TTY_NORMAL;
+
+ ldata->lookahead_count += count;
+
+ if (!I_IXON(tty))
+ return;
+
+ while (count--) {
+ if (fp)
+ flag = *fp++;
+ if (likely(flag == TTY_NORMAL))
+ n_tty_receive_char_flow_ctrl(tty, *cp, false);
+ cp++;
+ }
+}
+
static void
n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
const char *fp, int count)
@@ -1496,7 +1540,7 @@ n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp,
static void
n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count)
+ const char *fp, int count, bool lookahead_done)
{
char flag = TTY_NORMAL;
@@ -1504,12 +1548,12 @@ n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
if (fp)
flag = *fp++;
if (likely(flag == TTY_NORMAL))
- n_tty_receive_char_closing(tty, *cp++);
+ n_tty_receive_char_closing(tty, *cp++, lookahead_done);
}
}
static void n_tty_receive_buf_standard(struct tty_struct *tty,
- const unsigned char *cp, const char *fp, int count)
+ const unsigned char *cp, const char *fp, int count, bool lookahead_done)
{
struct n_tty_data *ldata = tty->disc_data;
char flag = TTY_NORMAL;
@@ -1540,7 +1584,7 @@ static void n_tty_receive_buf_standard(struct tty_struct *tty,
}
if (test_bit(c, ldata->char_map))
- n_tty_receive_char_special(tty, c);
+ n_tty_receive_char_special(tty, c, lookahead_done);
else
n_tty_receive_char(tty, c);
}
@@ -1551,21 +1595,30 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
{
struct n_tty_data *ldata = tty->disc_data;
bool preops = I_ISTRIP(tty) || (I_IUCLC(tty) && L_IEXTEN(tty));
+ size_t la_count = min_t(size_t, ldata->lookahead_count, count);
if (ldata->real_raw)
n_tty_receive_buf_real_raw(tty, cp, fp, count);
else if (ldata->raw || (L_EXTPROC(tty) && !preops))
n_tty_receive_buf_raw(tty, cp, fp, count);
- else if (tty->closing && !L_EXTPROC(tty))
- n_tty_receive_buf_closing(tty, cp, fp, count);
- else {
- n_tty_receive_buf_standard(tty, cp, fp, count);
+ else if (tty->closing && !L_EXTPROC(tty)) {
+ if (la_count > 0)
+ n_tty_receive_buf_closing(tty, cp, fp, la_count, true);
+ if (count > la_count)
+ n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false);
+ } else {
+ if (la_count > 0)
+ n_tty_receive_buf_standard(tty, cp, fp, la_count, true);
+ if (count > la_count)
+ n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false);
flush_echoes(tty);
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
}
+ ldata->lookahead_count -= la_count;
+
if (ldata->icanon && !L_EXTPROC(tty))
return;
@@ -2446,6 +2499,7 @@ static struct tty_ldisc_ops n_tty_ops = {
.receive_buf = n_tty_receive_buf,
.write_wakeup = n_tty_write_wakeup,
.receive_buf2 = n_tty_receive_buf2,
+ .lookahead_buf = n_tty_lookahead_flow_ctrl,
};
/**
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 696030cfcb09..287153d32536 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -123,6 +123,26 @@ static inline void serial_out(struct uart_8250_port *up, int offset, int value)
up->port.serial_out(&up->port, offset, value);
}
+/**
+ * serial_lsr_in - Read LSR register and preserve flags across reads
+ * @up: uart 8250 port
+ *
+ * Read LSR register and handle saving non-preserved flags across reads.
+ * The flags that are not preserved across reads are stored into
+ * up->lsr_saved_flags.
+ *
+ * Returns LSR value or'ed with the preserved flags (if any).
+ */
+static inline u16 serial_lsr_in(struct uart_8250_port *up)
+{
+ u16 lsr = up->lsr_saved_flags;
+
+ lsr |= serial_in(up, UART_LSR);
+ up->lsr_saved_flags = lsr & up->lsr_save_mask;
+
+ return lsr;
+}
+
/*
* For the 16C950
*/
@@ -183,10 +203,12 @@ void serial8250_rpm_put(struct uart_8250_port *p);
void serial8250_rpm_get_tx(struct uart_8250_port *p);
void serial8250_rpm_put_tx(struct uart_8250_port *p);
-int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485);
+int serial8250_em485_config(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485);
void serial8250_em485_start_tx(struct uart_8250_port *p);
void serial8250_em485_stop_tx(struct uart_8250_port *p);
void serial8250_em485_destroy(struct uart_8250_port *p);
+extern struct serial_rs485 serial8250_em485_supported;
/* MCR <-> TIOCM conversion */
static inline int serial8250_TIOCM_to_MCR(int tiocm)
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index 2a1226a78a0c..15a2387a5b25 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -108,6 +108,7 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
up.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE |
UPF_SKIP_TEST | UPF_IOREMAP;
up.port.rs485_config = serial8250_em485_config;
+ up.port.rs485_supported = serial8250_em485_supported;
up.rs485_start_tx = bcm2835aux_rs485_start_tx;
up.rs485_stop_tx = bcm2835aux_rs485_stop_tx;
@@ -166,8 +167,10 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
uartclk = clk_get_rate(data->clk);
if (!uartclk) {
ret = device_property_read_u32(&pdev->dev, "clock-frequency", &uartclk);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "could not get clk rate\n");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "could not get clk rate\n");
+ goto dis_clk;
+ }
}
/* the HW-clock divider for bcm2835aux is 8,
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index 9b878d023dac..8efdc271eb75 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -1139,16 +1139,19 @@ static int __maybe_unused brcmuart_suspend(struct device *dev)
struct brcmuart_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
struct uart_port *port = &up->port;
-
- serial8250_suspend_port(priv->line);
- clk_disable_unprepare(priv->baud_mux_clk);
+ unsigned long flags;
/*
* This will prevent resume from enabling RTS before the
- * baud rate has been resored.
+ * baud rate has been restored.
*/
+ spin_lock_irqsave(&port->lock, flags);
priv->saved_mctrl = port->mctrl;
- port->mctrl = 0;
+ port->mctrl &= ~TIOCM_RTS;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ serial8250_suspend_port(priv->line);
+ clk_disable_unprepare(priv->baud_mux_clk);
return 0;
}
@@ -1158,6 +1161,7 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
struct brcmuart_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
struct uart_port *port = &up->port;
+ unsigned long flags;
int ret;
ret = clk_prepare_enable(priv->baud_mux_clk);
@@ -1180,7 +1184,15 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
start_rx_dma(serial8250_get_port(priv->line));
}
serial8250_resume_port(priv->line);
- port->mctrl = priv->saved_mctrl;
+
+ if (priv->saved_mctrl & TIOCM_RTS) {
+ /* Restore RTS */
+ spin_lock_irqsave(&port->lock, flags);
+ port->mctrl |= TIOCM_RTS;
+ port->ops->set_mctrl(port, port->mctrl);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 3f56dbc9432b..2e83e7367441 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -277,8 +277,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
* the "Diva" UART used on the management processor on many HP
* ia64 and parisc boxes.
*/
- lsr = serial_in(up, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ lsr = serial_lsr_in(up);
if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&
(!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) &&
(lsr & UART_LSR_THRE)) {
@@ -1008,9 +1007,11 @@ int serial8250_register_8250_port(const struct uart_8250_port *up)
uart->port.throttle = up->port.throttle;
uart->port.unthrottle = up->port.unthrottle;
uart->port.rs485_config = up->port.rs485_config;
+ uart->port.rs485_supported = up->port.rs485_supported;
uart->port.rs485 = up->port.rs485;
uart->rs485_start_tx = up->rs485_start_tx;
uart->rs485_stop_tx = up->rs485_stop_tx;
+ uart->lsr_save_mask = up->lsr_save_mask;
uart->dma = up->dma;
/* Take tx_loadsz from fifosize if it wasn't set separately */
@@ -1098,6 +1099,9 @@ int serial8250_register_8250_port(const struct uart_8250_port *up)
ret = 0;
}
+ if (!uart->lsr_save_mask)
+ uart->lsr_save_mask = LSR_SAVE_FLAGS; /* Use default LSR mask */
+
/* Initialise interrupt backoff work if required */
if (up->overrun_backoff_time_ms > 0) {
uart->overrun_backoff_time_ms =
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index bb6aca07ab56..a604b42e4458 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -9,26 +9,27 @@
* LCR is written whilst busy. If it is, then a busy detect interrupt is
* raised, the LCR needs to be rewritten and the uart status register read.
*/
+#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/serial_8250.h>
-#include <linux/serial_reg.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/property.h>
-#include <linux/workqueue.h>
-#include <linux/notifier.h>
-#include <linux/slab.h>
-#include <linux/acpi.h>
-#include <linux/clk.h>
#include <linux/reset.h>
-#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_reg.h>
+
#include "8250_dwlib.h"
/* Offsets for the DesignWare specific registers */
@@ -82,8 +83,21 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
static void dw8250_force_idle(struct uart_port *p)
{
struct uart_8250_port *up = up_to_u8250p(p);
+ unsigned int lsr;
serial8250_clear_and_reinit_fifos(up);
+
+ /*
+ * With PSLVERR_RESP_EN parameter set to 1, the device generates an
+ * error response when an attempt to read an empty RBR with FIFO
+ * enabled.
+ */
+ if (up->fcr & UART_FCR_ENABLE_FIFO) {
+ lsr = p->serial_in(p, UART_LSR);
+ if (!(lsr & UART_LSR_DR))
+ return;
+ }
+
(void)p->serial_in(p, UART_RX);
}
@@ -122,12 +136,15 @@ static void dw8250_check_lcr(struct uart_port *p, int value)
/* Returns once the transmitter is empty or we run out of retries */
static void dw8250_tx_wait_empty(struct uart_port *p)
{
+ struct uart_8250_port *up = up_to_u8250p(p);
unsigned int tries = 20000;
unsigned int delay_threshold = tries - 1000;
unsigned int lsr;
while (tries--) {
lsr = readb (p->membase + (UART_LSR << p->regshift));
+ up->lsr_saved_flags |= lsr & up->lsr_save_mask;
+
if (lsr & UART_LSR_TEMT)
break;
@@ -140,29 +157,23 @@ static void dw8250_tx_wait_empty(struct uart_port *p)
}
}
-static void dw8250_serial_out38x(struct uart_port *p, int offset, int value)
+static void dw8250_serial_out(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = to_dw8250_data(p->private_data);
- /* Allow the TX to drain before we reconfigure */
- if (offset == UART_LCR)
- dw8250_tx_wait_empty(p);
-
writeb(value, p->membase + (offset << p->regshift));
if (offset == UART_LCR && !d->uart_16550_compatible)
dw8250_check_lcr(p, value);
}
-
-static void dw8250_serial_out(struct uart_port *p, int offset, int value)
+static void dw8250_serial_out38x(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = to_dw8250_data(p->private_data);
-
- writeb(value, p->membase + (offset << p->regshift));
+ /* Allow the TX to drain before we reconfigure */
+ if (offset == UART_LCR)
+ dw8250_tx_wait_empty(p);
- if (offset == UART_LCR && !d->uart_16550_compatible)
- dw8250_check_lcr(p, value);
+ dw8250_serial_out(p, offset, value);
}
static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
@@ -253,7 +264,7 @@ static int dw8250_handle_irq(struct uart_port *p)
*/
if (!up->dma && rx_timeout) {
spin_lock_irqsave(&p->lock, flags);
- status = p->serial_in(p, UART_LSR);
+ status = serial_lsr_in(up);
if (!(status & (UART_LSR_DR | UART_LSR_BI)))
(void) p->serial_in(p, UART_RX);
@@ -263,7 +274,10 @@ static int dw8250_handle_irq(struct uart_port *p)
/* Manually stop the Rx DMA transfer when acting as flow controller */
if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
- status = p->serial_in(p, UART_LSR);
+ spin_lock_irqsave(&p->lock, flags);
+ status = serial_lsr_in(up);
+ spin_unlock_irqrestore(&p->lock, flags);
+
if (status & (UART_LSR_DR | UART_LSR_BI)) {
dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
dw8250_writel_ext(p, DW_UART_DMASA, 1);
@@ -688,7 +702,6 @@ static int dw8250_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int dw8250_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
@@ -706,9 +719,7 @@ static int dw8250_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM
static int dw8250_runtime_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
@@ -730,11 +741,10 @@ static int dw8250_runtime_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops dw8250_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dw8250_suspend, dw8250_resume)
- SET_RUNTIME_PM_OPS(dw8250_runtime_suspend, dw8250_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dw8250_suspend, dw8250_resume)
+ RUNTIME_PM_OPS(dw8250_runtime_suspend, dw8250_runtime_resume, NULL)
};
static const struct dw8250_platform_data dw8250_dw_apb = {
@@ -792,7 +802,7 @@ MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
static struct platform_driver dw8250_platform_driver = {
.driver = {
.name = "dw-apb-uart",
- .pm = &dw8250_pm_ops,
+ .pm = pm_ptr(&dw8250_pm_ops),
.of_match_table = dw8250_of_match,
.acpi_match_table = dw8250_acpi_match,
},
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index fbabfdd8c7b8..dbe4d44f60d4 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -3,8 +3,10 @@
#include <linux/bitops.h>
#include <linux/bitfield.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/property.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
@@ -16,9 +18,18 @@
#define DW_UART_DE_EN 0xb0 /* Driver Output Enable Register */
#define DW_UART_RE_EN 0xb4 /* Receiver Output Enable Register */
#define DW_UART_DLF 0xc0 /* Divisor Latch Fraction Register */
+#define DW_UART_RAR 0xc4 /* Receive Address Register */
+#define DW_UART_TAR 0xc8 /* Transmit Address Register */
+#define DW_UART_LCR_EXT 0xcc /* Line Extended Control Register */
#define DW_UART_CPR 0xf4 /* Component Parameter Register */
#define DW_UART_UCV 0xf8 /* UART Component Version */
+/* Receive / Transmit Address Register bits */
+#define DW_UART_ADDR_MASK GENMASK(7, 0)
+
+/* Line Status Register bits */
+#define DW_UART_LSR_ADDR_RCVD BIT(8)
+
/* Transceiver Control Register bits */
#define DW_UART_TCR_RS485_EN BIT(0)
#define DW_UART_TCR_RE_POL BIT(1)
@@ -28,22 +39,28 @@
#define DW_UART_TCR_XFER_MODE_SW_DE_OR_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 1)
#define DW_UART_TCR_XFER_MODE_DE_OR_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 2)
+/* Line Extended Control Register bits */
+#define DW_UART_LCR_EXT_DLS_E BIT(0)
+#define DW_UART_LCR_EXT_ADDR_MATCH BIT(1)
+#define DW_UART_LCR_EXT_SEND_ADDR BIT(2)
+#define DW_UART_LCR_EXT_TRANSMIT_MODE BIT(3)
+
/* Component Parameter Register bits */
-#define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0)
-#define DW_UART_CPR_AFCE_MODE (1 << 4)
-#define DW_UART_CPR_THRE_MODE (1 << 5)
-#define DW_UART_CPR_SIR_MODE (1 << 6)
-#define DW_UART_CPR_SIR_LP_MODE (1 << 7)
-#define DW_UART_CPR_ADDITIONAL_FEATURES (1 << 8)
-#define DW_UART_CPR_FIFO_ACCESS (1 << 9)
-#define DW_UART_CPR_FIFO_STAT (1 << 10)
-#define DW_UART_CPR_SHADOW (1 << 11)
-#define DW_UART_CPR_ENCODED_PARMS (1 << 12)
-#define DW_UART_CPR_DMA_EXTRA (1 << 13)
-#define DW_UART_CPR_FIFO_MODE (0xff << 16)
+#define DW_UART_CPR_ABP_DATA_WIDTH GENMASK(1, 0)
+#define DW_UART_CPR_AFCE_MODE BIT(4)
+#define DW_UART_CPR_THRE_MODE BIT(5)
+#define DW_UART_CPR_SIR_MODE BIT(6)
+#define DW_UART_CPR_SIR_LP_MODE BIT(7)
+#define DW_UART_CPR_ADDITIONAL_FEATURES BIT(8)
+#define DW_UART_CPR_FIFO_ACCESS BIT(9)
+#define DW_UART_CPR_FIFO_STAT BIT(10)
+#define DW_UART_CPR_SHADOW BIT(11)
+#define DW_UART_CPR_ENCODED_PARMS BIT(12)
+#define DW_UART_CPR_DMA_EXTRA BIT(13)
+#define DW_UART_CPR_FIFO_MODE GENMASK(23, 16)
/* Helper for FIFO size calculation */
-#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16)
+#define DW_UART_CPR_FIFO_SIZE(a) (FIELD_GET(DW_UART_CPR_FIFO_MODE, (a)) * 16)
/*
* divisor = div(I) + div(F)
@@ -82,10 +99,85 @@ void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, struct
p->status |= UPSTAT_AUTOCTS;
serial8250_do_set_termios(p, termios, old);
+
+ /* Filter addresses which have 9th bit set */
+ p->ignore_status_mask |= DW_UART_LSR_ADDR_RCVD;
+ p->read_status_mask |= DW_UART_LSR_ADDR_RCVD;
}
EXPORT_SYMBOL_GPL(dw8250_do_set_termios);
-static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
+/*
+ * Wait until re is de-asserted for sure. An ongoing receive will keep
+ * re asserted until end of frame. Without BUSY indication available,
+ * only available course of action is to wait for the time it takes to
+ * receive one frame (there might nothing to receive but w/o BUSY the
+ * driver cannot know).
+ */
+static void dw8250_wait_re_deassert(struct uart_port *p)
+{
+ ndelay(p->frame_time);
+}
+
+static void dw8250_update_rar(struct uart_port *p, u32 addr)
+{
+ u32 re_en = dw8250_readl_ext(p, DW_UART_RE_EN);
+
+ /*
+ * RAR shouldn't be changed while receiving. Thus, de-assert RE_EN
+ * if asserted and wait.
+ */
+ if (re_en)
+ dw8250_writel_ext(p, DW_UART_RE_EN, 0);
+ dw8250_wait_re_deassert(p);
+ dw8250_writel_ext(p, DW_UART_RAR, addr);
+ if (re_en)
+ dw8250_writel_ext(p, DW_UART_RE_EN, re_en);
+}
+
+static void dw8250_rs485_set_addr(struct uart_port *p, struct serial_rs485 *rs485,
+ struct ktermios *termios)
+{
+ u32 lcr = dw8250_readl_ext(p, DW_UART_LCR_EXT);
+
+ if (rs485->flags & SER_RS485_ADDRB) {
+ lcr |= DW_UART_LCR_EXT_DLS_E;
+ if (termios)
+ termios->c_cflag |= ADDRB;
+
+ if (rs485->flags & SER_RS485_ADDR_RECV) {
+ u32 delta = p->rs485.flags ^ rs485->flags;
+
+ /*
+ * rs485 (param) is equal to uart_port's rs485 only during init
+ * (during init, delta is not yet applicable).
+ */
+ if (unlikely(&p->rs485 == rs485))
+ delta = rs485->flags;
+
+ if ((delta & SER_RS485_ADDR_RECV) ||
+ (p->rs485.addr_recv != rs485->addr_recv))
+ dw8250_update_rar(p, rs485->addr_recv);
+ lcr |= DW_UART_LCR_EXT_ADDR_MATCH;
+ } else {
+ lcr &= ~DW_UART_LCR_EXT_ADDR_MATCH;
+ }
+ if (rs485->flags & SER_RS485_ADDR_DEST) {
+ /*
+ * Don't skip writes here as another endpoint could
+ * have changed communication line's destination
+ * address in between.
+ */
+ dw8250_writel_ext(p, DW_UART_TAR, rs485->addr_dest);
+ lcr |= DW_UART_LCR_EXT_SEND_ADDR;
+ }
+ } else {
+ lcr = 0;
+ }
+ dw8250_writel_ext(p, DW_UART_LCR_EXT, lcr);
+}
+
+static int dw8250_rs485_config(struct uart_port *p, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
u32 tcr;
@@ -93,25 +185,17 @@ static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
tcr &= ~DW_UART_TCR_XFER_MODE;
if (rs485->flags & SER_RS485_ENABLED) {
- /* Clear unsupported flags. */
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RX_DURING_TX |
- SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND;
tcr |= DW_UART_TCR_RS485_EN;
- if (rs485->flags & SER_RS485_RX_DURING_TX) {
+ if (rs485->flags & SER_RS485_RX_DURING_TX)
tcr |= DW_UART_TCR_XFER_MODE_DE_DURING_RE;
- } else {
- /* HW does not support same DE level for tx and rx */
- if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
- !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
- return -EINVAL;
-
+ else
tcr |= DW_UART_TCR_XFER_MODE_DE_OR_RE;
- }
dw8250_writel_ext(p, DW_UART_DE_EN, 1);
dw8250_writel_ext(p, DW_UART_RE_EN, 1);
} else {
- rs485->flags = 0;
+ if (termios)
+ termios->c_cflag &= ~ADDRB;
tcr &= ~DW_UART_TCR_RS485_EN;
}
@@ -127,10 +211,9 @@ static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
dw8250_writel_ext(p, DW_UART_TCR, tcr);
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
-
- p->rs485 = *rs485;
+ /* Addressing mode can only be set up after TCR */
+ if (rs485->flags & SER_RS485_ENABLED)
+ dw8250_rs485_set_addr(p, rs485, termios);
return 0;
}
@@ -149,6 +232,12 @@ static bool dw8250_detect_rs485_hw(struct uart_port *p)
return reg;
}
+static const struct serial_rs485 dw8250_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RX_DURING_TX | SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND | SER_RS485_ADDRB | SER_RS485_ADDR_RECV |
+ SER_RS485_ADDR_DEST,
+};
+
void dw8250_setup_port(struct uart_port *p)
{
struct dw8250_port_data *pd = p->private_data;
@@ -159,8 +248,11 @@ void dw8250_setup_port(struct uart_port *p)
pd->hw_rs485_support = dw8250_detect_rs485_hw(p);
if (pd->hw_rs485_support) {
p->rs485_config = dw8250_rs485_config;
+ up->lsr_save_mask = LSR_SAVE_FLAGS | DW_UART_LSR_ADDR_RCVD;
+ p->rs485_supported = dw8250_rs485_supported;
} else {
p->rs485_config = serial8250_em485_config;
+ p->rs485_supported = serial8250_em485_supported;
up->rs485_start_tx = serial8250_em485_start_tx;
up->rs485_stop_tx = serial8250_em485_stop_tx;
}
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index e52585064565..f271becfc46c 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -84,8 +84,6 @@ static void serial8250_early_out(struct uart_port *port, int offset, int value)
}
}
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
static void serial_putc(struct uart_port *port, unsigned char c)
{
unsigned int status;
@@ -94,7 +92,7 @@ static void serial_putc(struct uart_port *port, unsigned char c)
for (;;) {
status = serial8250_early_in(port, UART_LSR);
- if ((status & BOTH_EMPTY) == BOTH_EMPTY)
+ if (uart_lsr_tx_empty(status))
break;
cpu_relax();
}
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 7292917ac878..314a05e009df 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -112,7 +112,9 @@
struct exar8250;
struct exar8250_platform {
- int (*rs485_config)(struct uart_port *, struct serial_rs485 *);
+ int (*rs485_config)(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485);
+ const struct serial_rs485 *rs485_supported;
int (*register_gpio)(struct pci_dev *, struct uart_8250_port *);
void (*unregister_gpio)(struct uart_8250_port *);
};
@@ -194,11 +196,11 @@ static int xr17v35x_startup(struct uart_port *port)
static void exar_shutdown(struct uart_port *port)
{
- unsigned char lsr;
bool tx_complete = false;
struct uart_8250_port *up = up_to_u8250p(port);
struct circ_buf *xmit = &port->state->xmit;
int i = 0;
+ u16 lsr;
do {
lsr = serial_in(up, UART_LSR);
@@ -408,7 +410,7 @@ static void xr17v35x_unregister_gpio(struct uart_8250_port *port)
port->port.private_data = NULL;
}
-static int generic_rs485_config(struct uart_port *port,
+static int generic_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
bool is_rs485 = !!(rs485->flags & SER_RS485_ENABLED);
@@ -426,18 +428,21 @@ static int generic_rs485_config(struct uart_port *port,
if (is_rs485)
writeb(UART_EXAR_RS485_DLY(4), p + UART_MSR);
- port->rs485 = *rs485;
-
return 0;
}
+static const struct serial_rs485 generic_rs485_supported = {
+ .flags = SER_RS485_ENABLED,
+};
+
static const struct exar8250_platform exar8250_default_platform = {
.register_gpio = xr17v35x_register_gpio,
.unregister_gpio = xr17v35x_unregister_gpio,
.rs485_config = generic_rs485_config,
+ .rs485_supported = &generic_rs485_supported,
};
-static int iot2040_rs485_config(struct uart_port *port,
+static int iot2040_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
bool is_rs485 = !!(rs485->flags & SER_RS485_ENABLED);
@@ -467,9 +472,13 @@ static int iot2040_rs485_config(struct uart_port *port,
value |= mode;
writeb(value, p + UART_EXAR_MPIOLVL_7_0);
- return generic_rs485_config(port, rs485);
+ return generic_rs485_config(port, termios, rs485);
}
+static const struct serial_rs485 iot2040_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS,
+};
+
static const struct property_entry iot2040_gpio_properties[] = {
PROPERTY_ENTRY_U32("exar,first-pin", 10),
PROPERTY_ENTRY_U32("ngpios", 1),
@@ -498,6 +507,7 @@ static int iot2040_register_gpio(struct pci_dev *pcidev,
static const struct exar8250_platform iot2040_platform = {
.rs485_config = iot2040_rs485_config,
+ .rs485_supported = &iot2040_rs485_supported,
.register_gpio = iot2040_register_gpio,
.unregister_gpio = xr17v35x_unregister_gpio,
};
@@ -540,6 +550,7 @@ pci_xr17v35x_setup(struct exar8250 *priv, struct pci_dev *pcidev,
port->port.uartclk = baud * 16;
port->port.rs485_config = platform->rs485_config;
+ port->port.rs485_supported = *(platform->rs485_supported);
/*
* Setup the UART clock for the devices on expansion slot to
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index dba5950b8d0e..65b6b3cbaff6 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -191,7 +191,7 @@ static int fintek_8250_get_ldn_range(struct fintek_8250 *pdata, int *min,
return -ENODEV;
}
-static int fintek_8250_rs485_config(struct uart_port *port,
+static int fintek_8250_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
uint8_t config = 0;
@@ -206,19 +206,7 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
!(rs485->flags & SER_RS485_RTS_AFTER_SEND))
return -EINVAL;
- memset(rs485->padding, 0, sizeof(rs485->padding));
config |= RS485_URA;
- } else {
- memset(rs485, 0, sizeof(*rs485));
- }
-
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
- SER_RS485_RTS_AFTER_SEND;
-
- /* Only the first port supports delays */
- if (pdata->index) {
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
}
if (rs485->delay_rts_before_send) {
@@ -241,8 +229,6 @@ static int fintek_8250_rs485_config(struct uart_port *port,
sio_write_reg(pdata, RS485, config);
fintek_8250_exit_key(pdata->base_port);
- port->rs485 = *rs485;
-
return 0;
}
@@ -424,6 +410,17 @@ static int probe_setup_port(struct fintek_8250 *pdata,
return -ENODEV;
}
+/* Only the first port supports delays */
+static const struct serial_rs485 fintek_8250_rs485_supported_port0 = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
+static const struct serial_rs485 fintek_8250_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+};
+
static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart)
{
struct fintek_8250 *pdata = uart->port.private_data;
@@ -435,6 +432,10 @@ static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart)
case CHIP_ID_F81866:
case CHIP_ID_F81865:
uart->port.rs485_config = fintek_8250_rs485_config;
+ if (!pdata->index)
+ uart->port.rs485_supported = fintek_8250_rs485_supported_port0;
+ else
+ uart->port.rs485_supported = fintek_8250_rs485_supported;
break;
default: /* No RS485 Auto direction functional */
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index 9c01c531349d..8aad15622a2e 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -25,8 +25,8 @@
int fsl8250_handle_irq(struct uart_port *port)
{
- unsigned char lsr, orig_lsr;
unsigned long flags;
+ u16 lsr, orig_lsr;
unsigned int iir;
struct uart_8250_port *up = up_to_u8250p(port);
@@ -77,7 +77,7 @@ int fsl8250_handle_irq(struct uart_port *port)
if ((lsr & UART_LSR_THRE) && (up->ier & UART_IER_THRI))
serial8250_tx_chars(up);
- up->lsr_saved_flags = orig_lsr;
+ up->lsr_saved_flags |= orig_lsr & UART_LSR_BI;
uart_unlock_and_check_sysrq_irqrestore(&up->port, flags);
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index cff91aa03f29..2b2f5d8d24b9 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -54,7 +54,7 @@ static void early_out(struct uart_port *port, int offset, uint8_t value)
static void ingenic_early_console_putc(struct uart_port *port, unsigned char c)
{
- uint8_t lsr;
+ u16 lsr;
do {
lsr = early_in(port, UART_LSR);
diff --git a/drivers/tty/serial/8250/8250_lpc18xx.c b/drivers/tty/serial/8250/8250_lpc18xx.c
index 570e25d6f37e..6dc85aaba5d0 100644
--- a/drivers/tty/serial/8250/8250_lpc18xx.c
+++ b/drivers/tty/serial/8250/8250_lpc18xx.c
@@ -32,7 +32,7 @@ struct lpc18xx_uart_data {
int line;
};
-static int lpc18xx_rs485_config(struct uart_port *port,
+static int lpc18xx_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -40,24 +40,12 @@ static int lpc18xx_rs485_config(struct uart_port *port,
u32 rs485_dly_reg = 0;
unsigned baud_clk;
- if (rs485->flags & SER_RS485_ENABLED)
- memset(rs485->padding, 0, sizeof(rs485->padding));
- else
- memset(rs485, 0, sizeof(*rs485));
-
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
- SER_RS485_RTS_AFTER_SEND;
-
if (rs485->flags & SER_RS485_ENABLED) {
rs485_ctrl_reg |= LPC18XX_UART_RS485CTRL_NMMEN |
LPC18XX_UART_RS485CTRL_DCTRL;
- if (rs485->flags & SER_RS485_RTS_ON_SEND) {
+ if (rs485->flags & SER_RS485_RTS_ON_SEND)
rs485_ctrl_reg |= LPC18XX_UART_RS485CTRL_OINV;
- rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
- } else {
- rs485->flags |= SER_RS485_RTS_AFTER_SEND;
- }
}
if (rs485->delay_rts_after_send) {
@@ -73,14 +61,9 @@ static int lpc18xx_rs485_config(struct uart_port *port,
/ baud_clk;
}
- /* Delay RTS before send not supported */
- rs485->delay_rts_before_send = 0;
-
serial_out(up, LPC18XX_UART_RS485CTRL, rs485_ctrl_reg);
serial_out(up, LPC18XX_UART_RS485DLY, rs485_dly_reg);
- port->rs485 = *rs485;
-
return 0;
}
@@ -98,6 +81,12 @@ static void lpc18xx_uart_serial_out(struct uart_port *p, int offset, int value)
writel(value, p->membase + offset);
}
+static const struct serial_rs485 lpc18xx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_after_send = 1,
+ /* Delay RTS before send is not supported */
+};
+
static int lpc18xx_serial_probe(struct platform_device *pdev)
{
struct lpc18xx_uart_data *data;
@@ -168,6 +157,7 @@ static int lpc18xx_serial_probe(struct platform_device *pdev)
uart.port.uartclk = clk_get_rate(data->clk_uart);
uart.port.private_data = data;
uart.port.rs485_config = lpc18xx_rs485_config;
+ uart.port.rs485_supported = lpc18xx_rs485_supported;
uart.port.serial_out = lpc18xx_uart_serial_out;
uart.dma = &data->dma;
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 0f5af061e0b4..4ba43bef9933 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -330,7 +330,7 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
uart.port.irq = pci_irq_vector(pdev, 0);
uart.port.private_data = &lpss->data;
uart.port.type = PORT_16550A;
- uart.port.iotype = UPIO_MEM;
+ uart.port.iotype = UPIO_MEM32;
uart.port.regshift = 2;
uart.port.uartclk = lpss->board->base_baud * 16;
uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 5a699a1aa79c..1b461fba15a3 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -165,6 +165,7 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->dev = &ofdev->dev;
port->rs485_config = serial8250_em485_config;
+ port->rs485_supported = serial8250_em485_supported;
up->rs485_start_tx = serial8250_em485_start_tx;
up->rs485_stop_tx = serial8250_em485_stop_tx;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index ac8bfa042391..0dcecbbc3967 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1115,8 +1115,7 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
return omap_8250_rx_dma(up);
}
-static unsigned char omap_8250_handle_rx_dma(struct uart_8250_port *up,
- u8 iir, unsigned char status)
+static u16 omap_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir, u16 status)
{
if ((status & (UART_LSR_DR | UART_LSR_BI)) &&
(iir & UART_IIR_RDI)) {
@@ -1130,7 +1129,7 @@ static unsigned char omap_8250_handle_rx_dma(struct uart_8250_port *up,
}
static void am654_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir,
- unsigned char status)
+ u16 status)
{
/*
* Queue a new transfer if FIFO has data.
@@ -1164,7 +1163,7 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct omap8250_priv *priv = up->port.private_data;
- unsigned char status;
+ u16 status;
u8 iir;
serial8250_rpm_get(up);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index a17619db7939..6f66dc2ebacc 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1553,7 +1553,7 @@ pci_brcm_trumanage_setup(struct serial_private *priv,
#define FINTEK_RTS_INVERT BIT(5)
/* We should do proper H/W transceiver setting before change to RS485 mode */
-static int pci_fintek_rs485_config(struct uart_port *port,
+static int pci_fintek_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct pci_dev *pci_dev = to_pci_dev(port->dev);
@@ -1562,16 +1562,6 @@ static int pci_fintek_rs485_config(struct uart_port *port,
pci_read_config_byte(pci_dev, 0x40 + 8 * *index + 7, &setting);
- if (!rs485)
- rs485 = &port->rs485;
- else if (rs485->flags & SER_RS485_ENABLED)
- memset(rs485->padding, 0, sizeof(rs485->padding));
- else
- memset(rs485, 0, sizeof(*rs485));
-
- /* F81504/508/512 not support RTS delay before or after send */
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND;
-
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable RTS H/W control mode */
setting |= FINTEK_RTS_CONTROL_BY_HW;
@@ -1583,9 +1573,6 @@ static int pci_fintek_rs485_config(struct uart_port *port,
/* RTS driving low on TX */
setting |= FINTEK_RTS_INVERT;
}
-
- rs485->delay_rts_after_send = 0;
- rs485->delay_rts_before_send = 0;
} else {
/* Disable RTS H/W control mode */
setting &= ~(FINTEK_RTS_CONTROL_BY_HW | FINTEK_RTS_INVERT);
@@ -1593,12 +1580,14 @@ static int pci_fintek_rs485_config(struct uart_port *port,
pci_write_config_byte(pci_dev, 0x40 + 8 * *index + 7, setting);
- if (rs485 != &port->rs485)
- port->rs485 = *rs485;
-
return 0;
}
+static const struct serial_rs485 pci_fintek_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND,
+ /* F81504/508/512 does not support RTS delay before or after send */
+};
+
static int pci_fintek_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -1618,6 +1607,7 @@ static int pci_fintek_setup(struct serial_private *priv,
port->port.iotype = UPIO_PORT;
port->port.iobase = iobase;
port->port.rs485_config = pci_fintek_rs485_config;
+ port->port.rs485_supported = pci_fintek_rs485_supported;
data = devm_kzalloc(&pdev->dev, sizeof(u8), GFP_KERNEL);
if (!data)
@@ -1689,7 +1679,7 @@ static int pci_fintek_init(struct pci_dev *dev)
* pciserial_resume_ports()
*/
port = serial8250_get_port(priv->line[i]);
- pci_fintek_rs485_config(&port->port, NULL);
+ uart_rs485_config(&port->port);
} else {
/* First init without port data
* force init to RS232 Mode
@@ -5077,6 +5067,115 @@ static const struct pci_device_id serial_pci_tbl[] = {
0, 0,
pbn_b2_4_115200 },
/*
+ * Brainboxes PX-101
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4005,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4019,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-235/246
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4004,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_1_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4016,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes PX-203/PX-257
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4006,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4015,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-260/PX-701
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400A,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-310
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400E,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-313
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400C,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-320/324/PX-376/PX-387
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400B,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes PX-335/346
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400F,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-368
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4010,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-420
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4000,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_4_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4011,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-803
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4009,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_1_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x401E,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes PX-846
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4008,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_1_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4017,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+
+ /*
* Perle PCI-RAS cards
*/
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
diff --git a/drivers/tty/serial/8250/8250_pericom.c b/drivers/tty/serial/8250/8250_pericom.c
index 95ff10f25d58..b8d5b7714a9d 100644
--- a/drivers/tty/serial/8250/8250_pericom.c
+++ b/drivers/tty/serial/8250/8250_pericom.c
@@ -73,7 +73,7 @@ static void pericom_do_set_divisor(struct uart_port *port, unsigned int baud,
struct uart_8250_port *up = up_to_u8250p(port);
int lcr = serial_port_in(port, UART_LCR);
- serial_port_out(port, UART_LCR, lcr | 0x80);
+ serial_port_out(port, UART_LCR, lcr | UART_LCR_DLAB);
serial_dl_write(up, divisor);
serial_port_out(port, 2, 16 - scr);
serial_port_out(port, UART_LCR, lcr);
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 3c36a06a20b0..39b35a61958c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -50,8 +50,6 @@
#define DEBUG_AUTOCONF(fmt...) do { } while (0)
#endif
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
/*
* Here we define the default xmit fifo size used for each type of UART.
*/
@@ -336,27 +334,29 @@ static void default_serial_dl_write(struct uart_8250_port *up, int value)
#ifdef CONFIG_SERIAL_8250_RT288X
+#define UART_REG_UNMAPPED -1
+
/* Au1x00/RT288x UART hardware has a weird register layout */
static const s8 au_io_in_map[8] = {
- 0, /* UART_RX */
- 2, /* UART_IER */
- 3, /* UART_IIR */
- 5, /* UART_LCR */
- 6, /* UART_MCR */
- 7, /* UART_LSR */
- 8, /* UART_MSR */
- -1, /* UART_SCR (unmapped) */
+ [UART_RX] = 0,
+ [UART_IER] = 2,
+ [UART_IIR] = 3,
+ [UART_LCR] = 5,
+ [UART_MCR] = 6,
+ [UART_LSR] = 7,
+ [UART_MSR] = 8,
+ [UART_SCR] = UART_REG_UNMAPPED,
};
static const s8 au_io_out_map[8] = {
- 1, /* UART_TX */
- 2, /* UART_IER */
- 4, /* UART_FCR */
- 5, /* UART_LCR */
- 6, /* UART_MCR */
- -1, /* UART_LSR (unmapped) */
- -1, /* UART_MSR (unmapped) */
- -1, /* UART_SCR (unmapped) */
+ [UART_TX] = 1,
+ [UART_IER] = 2,
+ [UART_FCR] = 4,
+ [UART_LCR] = 5,
+ [UART_MCR] = 6,
+ [UART_LSR] = UART_REG_UNMAPPED,
+ [UART_MSR] = UART_REG_UNMAPPED,
+ [UART_SCR] = UART_REG_UNMAPPED,
};
unsigned int au_serial_in(struct uart_port *p, int offset)
@@ -364,7 +364,7 @@ unsigned int au_serial_in(struct uart_port *p, int offset)
if (offset >= ARRAY_SIZE(au_io_in_map))
return UINT_MAX;
offset = au_io_in_map[offset];
- if (offset < 0)
+ if (offset == UART_REG_UNMAPPED)
return UINT_MAX;
return __raw_readl(p->membase + (offset << p->regshift));
}
@@ -374,7 +374,7 @@ void au_serial_out(struct uart_port *p, int offset, int value)
if (offset >= ARRAY_SIZE(au_io_out_map))
return;
offset = au_io_out_map[offset];
- if (offset < 0)
+ if (offset == UART_REG_UNMAPPED)
return;
__raw_writel(value, p->membase + (offset << p->regshift));
}
@@ -647,6 +647,14 @@ void serial8250_em485_destroy(struct uart_8250_port *p)
}
EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
+struct serial_rs485 serial8250_em485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_TERMINATE_BUS | SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+EXPORT_SYMBOL_GPL(serial8250_em485_supported);
+
/**
* serial8250_em485_config() - generic ->rs485_config() callback
* @port: uart port
@@ -656,7 +664,8 @@ EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
* if the uart is incapable of driving RTS as a Transmit Enable signal in
* hardware, relying on software emulation instead.
*/
-int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485)
+int serial8250_em485_config(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -667,29 +676,12 @@ int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485)
rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
}
- /* clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
-
- memset(rs485->padding, 0, sizeof(rs485->padding));
- port->rs485 = *rs485;
-
- gpiod_set_value(port->rs485_term_gpio,
- rs485->flags & SER_RS485_TERMINATE_BUS);
-
/*
* Both serial8250_em485_init() and serial8250_em485_destroy()
* are idempotent.
*/
- if (rs485->flags & SER_RS485_ENABLED) {
- int ret = serial8250_em485_init(up);
-
- if (ret) {
- rs485->flags &= ~SER_RS485_ENABLED;
- port->rs485.flags &= ~SER_RS485_ENABLED;
- }
- return ret;
- }
+ if (rs485->flags & SER_RS485_ENABLED)
+ return serial8250_em485_init(up);
serial8250_em485_destroy(up);
return 0;
@@ -849,7 +841,7 @@ static int size_fifo(struct uart_8250_port *up)
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
old_dl = serial_dl_read(up);
serial_dl_write(up, 0x0001);
- serial_out(up, UART_LCR, 0x03);
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
for (count = 0; count < 256; count++)
serial_out(up, UART_TX, count);
mdelay(20);/* FIXME - schedule_timeout */
@@ -1503,18 +1495,12 @@ static void __stop_tx_rs485(struct uart_8250_port *p, u64 stop_delay)
}
}
-static inline void __do_stop_tx(struct uart_8250_port *p)
-{
- if (serial8250_clear_THRI(p))
- serial8250_rpm_put_tx(p);
-}
-
static inline void __stop_tx(struct uart_8250_port *p)
{
struct uart_8250_em485 *em485 = p->em485;
if (em485) {
- unsigned char lsr = serial_in(p, UART_LSR);
+ u16 lsr = serial_lsr_in(p);
u64 stop_delay = 0;
p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
@@ -1522,7 +1508,7 @@ static inline void __stop_tx(struct uart_8250_port *p)
if (!(lsr & UART_LSR_THRE))
return;
/*
- * To provide required timeing and allow FIFO transfer,
+ * To provide required timing and allow FIFO transfer,
* __stop_tx_rs485() must be called only when both FIFO and
* shift register are empty. The device driver should either
* enable interrupt on TEMT or set UART_CAP_NOTEMT that will
@@ -1544,7 +1530,9 @@ static inline void __stop_tx(struct uart_8250_port *p)
__stop_tx_rs485(p, stop_delay);
}
- __do_stop_tx(p);
+
+ if (serial8250_clear_THRI(p))
+ serial8250_rpm_put_tx(p);
}
static void serial8250_stop_tx(struct uart_port *port)
@@ -1573,10 +1561,8 @@ static inline void __start_tx(struct uart_port *port)
if (serial8250_set_THRI(up)) {
if (up->bugs & UART_BUG_TXEN) {
- unsigned char lsr;
+ u16 lsr = serial_lsr_in(up);
- lsr = serial_in(up, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
if (lsr & UART_LSR_THRE)
serial8250_tx_chars(up);
}
@@ -1616,7 +1602,8 @@ void serial8250_em485_start_tx(struct uart_8250_port *up)
}
EXPORT_SYMBOL_GPL(serial8250_em485_start_tx);
-static inline void start_tx_rs485(struct uart_port *port)
+/* Returns false, if start_tx_timer was setup to defer TX start */
+static bool start_tx_rs485(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct uart_8250_em485 *em485 = up->em485;
@@ -1644,11 +1631,11 @@ static inline void start_tx_rs485(struct uart_port *port)
em485->active_timer = &em485->start_tx_timer;
start_hrtimer_ms(&em485->start_tx_timer,
up->port.rs485.delay_rts_before_send);
- return;
+ return false;
}
}
- __start_tx(port);
+ return true;
}
static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t)
@@ -1678,14 +1665,12 @@ static void serial8250_start_tx(struct uart_port *port)
serial8250_rpm_get_tx(up);
- if (em485 &&
- em485->active_timer == &em485->start_tx_timer)
- return;
-
- if (em485)
- start_tx_rs485(port);
- else
- __start_tx(port);
+ if (em485) {
+ if ((em485->active_timer == &em485->start_tx_timer) ||
+ !start_tx_rs485(port))
+ return;
+ }
+ __start_tx(port);
}
static void serial8250_throttle(struct uart_port *port)
@@ -1729,7 +1714,7 @@ static void serial8250_enable_ms(struct uart_port *port)
serial8250_rpm_put(up);
}
-void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr)
+void serial8250_read_char(struct uart_8250_port *up, u16 lsr)
{
struct uart_port *port = &up->port;
unsigned char ch;
@@ -1792,11 +1777,13 @@ void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr)
EXPORT_SYMBOL_GPL(serial8250_read_char);
/*
- * serial8250_rx_chars: processes according to the passed in LSR
- * value, and returns the remaining LSR bits not handled
- * by this Rx routine.
+ * serial8250_rx_chars - Read characters. The first LSR value must be passed in.
+ *
+ * Returns LSR bits. The caller should rely only on non-Rx related LSR bits
+ * (such as THRE) because the LSR value might come from an already consumed
+ * character.
*/
-unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
+u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr)
{
struct uart_port *port = &up->port;
int max_count = 256;
@@ -1852,7 +1839,7 @@ void serial8250_tx_chars(struct uart_8250_port *up)
if (uart_circ_empty(xmit))
break;
if ((up->capabilities & UART_CAP_HFIFO) &&
- (serial_in(up, UART_LSR) & BOTH_EMPTY) != BOTH_EMPTY)
+ !uart_lsr_tx_empty(serial_in(up, UART_LSR)))
break;
/* The BCM2835 MINI UART THRE bit is really a not-full bit. */
if ((up->capabilities & UART_CAP_MINI) &&
@@ -1916,17 +1903,17 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
*/
int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
{
- unsigned char status;
struct uart_8250_port *up = up_to_u8250p(port);
bool skip_rx = false;
unsigned long flags;
+ u16 status;
if (iir & UART_IIR_NO_INT)
return 0;
spin_lock_irqsave(&port->lock, flags);
- status = serial_port_in(port, UART_LSR);
+ status = serial_lsr_in(up);
/*
* If port is stopped and there are no error conditions in the
@@ -2002,18 +1989,17 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned long flags;
- unsigned int lsr;
+ u16 lsr;
serial8250_rpm_get(up);
spin_lock_irqsave(&port->lock, flags);
- lsr = serial_port_in(port, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ lsr = serial_lsr_in(up);
spin_unlock_irqrestore(&port->lock, flags);
serial8250_rpm_put(up);
- return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
+ return uart_lsr_tx_empty(lsr) ? TIOCSER_TEMT : 0;
}
unsigned int serial8250_do_get_mctrl(struct uart_port *port)
@@ -2084,9 +2070,7 @@ static void wait_for_lsr(struct uart_8250_port *up, int bits)
/* Wait up to 10ms for the character(s) to be sent. */
for (;;) {
- status = serial_in(up, UART_LSR);
-
- up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
+ status = serial_lsr_in(up);
if ((status & bits) == bits)
break;
@@ -2128,8 +2112,8 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
static int serial8250_get_poll_char(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
- unsigned char lsr;
int status;
+ u16 lsr;
serial8250_rpm_get(up);
@@ -2163,7 +2147,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
else
serial_port_out(port, UART_IER, 0);
- wait_for_xmitr(up, BOTH_EMPTY);
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
/*
* Send the character out.
*/
@@ -2173,7 +2157,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(up, BOTH_EMPTY);
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
serial_port_out(port, UART_IER, ier);
serial8250_rpm_put(up);
}
@@ -2184,8 +2168,9 @@ int serial8250_do_startup(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned long flags;
- unsigned char lsr, iir;
+ unsigned char iir;
int retval;
+ u16 lsr;
if (!port->fifosize)
port->fifosize = uart_config[port->type].fifo_size;
@@ -2810,7 +2795,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
port->read_status_mask |= UART_LSR_BI;
/*
- * Characteres to ignore
+ * Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
@@ -3203,7 +3188,7 @@ static void serial8250_config_port(struct uart_port *port, int flags)
autoconfig(up);
if (port->rs485.flags & SER_RS485_ENABLED)
- port->rs485_config(port, &port->rs485);
+ uart_rs485_config(port);
/* if access method is AU, it is a 16550 with a quirk */
if (port->type == PORT_16550A && port->iotype == UPIO_AU)
@@ -3445,7 +3430,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(up, BOTH_EMPTY);
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
if (em485) {
mdelay(port->rs485.delay_rts_after_send);
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index fdb6c4188695..d0b49e15fbf5 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -254,6 +254,7 @@ config SERIAL_8250_ASPEED_VUART
depends on SERIAL_8250
depends on OF
depends on REGMAP && MFD_SYSCON
+ depends on ARCH_ASPEED || COMPILE_TEST
help
If you want to use the virtual UART (VUART) device on Aspeed
BMC platforms, enable this option. This enables the 16550A-
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 7172cd1792df..877173907c53 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -324,6 +324,7 @@ config SERIAL_MAX310X
depends on SPI_MASTER
select SERIAL_CORE
select REGMAP_SPI if SPI_MASTER
+ select REGMAP_I2C if I2C
help
This selects support for an advanced UART from Maxim (Dallas).
Supported ICs are MAX3107, MAX3108, MAX3109, MAX14830.
@@ -889,23 +890,6 @@ config SERIAL_TXX9_STDSERIAL
bool "TX39XX/49XX SIO act as standard serial"
depends on !SERIAL_8250 && SERIAL_TXX9
-config SERIAL_VR41XX
- tristate "NEC VR4100 series Serial Interface Unit support"
- depends on CPU_VR41XX
- select SERIAL_CORE
- help
- If you have a NEC VR4100 series processor and you want to use
- Serial Interface Unit(SIU) or Debug Serial Interface Unit(DSIU)
- (not include VR4111/VR4121 DSIU), say Y. Otherwise, say N.
-
-config SERIAL_VR41XX_CONSOLE
- bool "Enable NEC VR4100 series Serial Interface Unit console"
- depends on SERIAL_VR41XX=y
- select SERIAL_CORE_CONSOLE
- help
- If you have a NEC VR4100 series processor and you want to use
- a console on a serial port, say Y. Otherwise, say N.
-
config SERIAL_JSM
tristate "Digi International NEO and Classic PCI Support"
depends on PCI
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 61cc8de95571..238a9557b487 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -51,7 +51,6 @@ obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o
obj-$(CONFIG_SERIAL_SC16IS7XX_CORE) += sc16is7xx.o
obj-$(CONFIG_SERIAL_JSM) += jsm/
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
-obj-$(CONFIG_SERIAL_VR41XX) += vr41xx_siu.o
obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 16a21422ddce..15f0e4d88c5a 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2214,7 +2214,7 @@ static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
return ret;
}
-static int pl011_rs485_config(struct uart_port *port,
+static int pl011_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct uart_amba_port *uap =
@@ -2700,17 +2700,12 @@ static int pl011_find_free_port(void)
static int pl011_get_rs485_mode(struct uart_amba_port *uap)
{
struct uart_port *port = &uap->port;
- struct serial_rs485 *rs485 = &port->rs485;
int ret;
ret = uart_get_rs485_mode(port);
if (ret)
return ret;
- /* clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
-
return 0;
}
@@ -2770,6 +2765,13 @@ static int pl011_register_port(struct uart_amba_port *uap)
return ret;
}
+static const struct serial_rs485 pl011_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
@@ -2796,6 +2798,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
uap->port.irq = dev->irq[0];
uap->port.ops = &amba_pl011_pops;
uap->port.rs485_config = pl011_rs485_config;
+ uap->port.rs485_supported = pl011_rs485_supported;
snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 6269dbf93546..32caeac12985 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -580,18 +580,9 @@ static const struct uart_ops ar933x_uart_ops = {
.verify_port = ar933x_uart_verify_port,
};
-static int ar933x_config_rs485(struct uart_port *port,
+static int ar933x_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
- struct ar933x_uart_port *up =
- container_of(port, struct ar933x_uart_port, port);
-
- if ((rs485conf->flags & SER_RS485_ENABLED) &&
- !up->rts_gpiod) {
- dev_err(port->dev, "RS485 needs rts-gpio\n");
- return 1;
- }
- port->rs485 = *rs485conf;
return 0;
}
@@ -702,6 +693,11 @@ static struct uart_driver ar933x_uart_driver = {
.cons = NULL, /* filled in runtime */
};
+static const struct serial_rs485 ar933x_no_rs485 = {};
+static const struct serial_rs485 ar933x_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+};
+
static int ar933x_uart_probe(struct platform_device *pdev)
{
struct ar933x_uart_port *up;
@@ -773,6 +769,7 @@ static int ar933x_uart_probe(struct platform_device *pdev)
port->fifosize = AR933X_UART_FIFO_SIZE;
port->ops = &ar933x_uart_ops;
port->rs485_config = ar933x_config_rs485;
+ port->rs485_supported = ar933x_rs485_supported;
baud = ar933x_uart_get_baud(port->uartclk, AR933X_UART_MAX_SCALE, 1);
up->min_baud = max_t(unsigned int, baud, AR933X_UART_MIN_BAUD);
@@ -792,10 +789,12 @@ static int ar933x_uart_probe(struct platform_device *pdev)
up->rts_gpiod = mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS);
- if ((port->rs485.flags & SER_RS485_ENABLED) &&
- !up->rts_gpiod) {
- dev_err(&pdev->dev, "lacking rts-gpio, disabling RS485\n");
- port->rs485.flags &= ~SER_RS485_ENABLED;
+ if (!up->rts_gpiod) {
+ port->rs485_supported = ar933x_no_rs485;
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ dev_err(&pdev->dev, "lacking rts-gpio, disabling RS485\n");
+ port->rs485.flags &= ~SER_RS485_ENABLED;
+ }
}
#ifdef CONFIG_SERIAL_AR933X_CONSOLE
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index dd1c7e4bd1c9..7450d3853031 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -166,7 +166,6 @@ struct atmel_uart_port {
unsigned int fidi_min;
unsigned int fidi_max;
-#ifdef CONFIG_PM
struct {
u32 cr;
u32 mr;
@@ -177,7 +176,6 @@ struct atmel_uart_port {
u32 fmr;
u32 fimr;
} cache;
-#endif
int (*prepare_rx)(struct uart_port *port);
int (*prepare_tx)(struct uart_port *port);
@@ -285,7 +283,7 @@ static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
}
/* Enable or disable the rs485 support */
-static int atmel_config_rs485(struct uart_port *port,
+static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
@@ -296,9 +294,6 @@ static int atmel_config_rs485(struct uart_port *port,
mode = atmel_uart_readl(port, ATMEL_US_MR);
- /* Resetting serial mode to RS232 (0x0) */
- mode &= ~ATMEL_US_USMODE;
-
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
if (rs485conf->flags & SER_RS485_RX_DURING_TX)
@@ -308,6 +303,7 @@ static int atmel_config_rs485(struct uart_port *port,
atmel_uart_writel(port, ATMEL_US_TTGR,
rs485conf->delay_rts_after_send);
+ mode &= ~ATMEL_US_USMODE;
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
@@ -2473,6 +2469,12 @@ static const struct uart_ops atmel_pops = {
#endif
};
+static const struct serial_rs485 atmel_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
/*
* Configure the port from the platform device resource info.
*/
@@ -2494,6 +2496,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
port->mapbase = mpdev->resource[0].start;
port->irq = platform_get_irq(mpdev, 0);
port->rs485_config = atmel_config_rs485;
+ port->rs485_supported = atmel_rs485_supported;
port->iso7816_config = atmel_config_iso7816;
port->membase = NULL;
@@ -2503,24 +2506,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
if (ret)
return ret;
- /* for console, the clock could already be configured */
- if (!atmel_port->clk) {
- atmel_port->clk = clk_get(&mpdev->dev, "usart");
- if (IS_ERR(atmel_port->clk)) {
- ret = PTR_ERR(atmel_port->clk);
- atmel_port->clk = NULL;
- return ret;
- }
- ret = clk_prepare_enable(atmel_port->clk);
- if (ret) {
- clk_put(atmel_port->clk);
- atmel_port->clk = NULL;
- return ret;
- }
- port->uartclk = clk_get_rate(atmel_port->clk);
- clk_disable_unprepare(atmel_port->clk);
- /* only enable clock when USART is in use */
- }
+ port->uartclk = clk_get_rate(atmel_port->clk);
/*
* Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or
@@ -2629,7 +2615,6 @@ static void __init atmel_console_get_options(struct uart_port *port, int *baud,
static int __init atmel_console_setup(struct console *co, char *options)
{
- int ret;
struct uart_port *port = &atmel_ports[co->index].uart;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int baud = 115200;
@@ -2642,10 +2627,6 @@ static int __init atmel_console_setup(struct console *co, char *options)
return -ENODEV;
}
- ret = clk_prepare_enable(atmel_ports[co->index].clk);
- if (ret)
- return ret;
-
atmel_uart_writel(port, ATMEL_US_IDR, -1);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
@@ -2711,7 +2692,6 @@ static struct uart_driver atmel_uart = {
.cons = ATMEL_CONSOLE_DEVICE,
};
-#ifdef CONFIG_PM
static bool atmel_serial_clk_will_stop(void)
{
#ifdef CONFIG_ARCH_AT91
@@ -2721,10 +2701,9 @@ static bool atmel_serial_clk_will_stop(void)
#endif
}
-static int atmel_serial_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int __maybe_unused atmel_serial_suspend(struct device *dev)
{
- struct uart_port *port = platform_get_drvdata(pdev);
+ struct uart_port *port = dev_get_drvdata(dev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (uart_console(port) && console_suspend_enabled) {
@@ -2749,14 +2728,14 @@ static int atmel_serial_suspend(struct platform_device *pdev,
}
/* we can not wake up if we're running on slow clock */
- atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
+ atmel_port->may_wakeup = device_may_wakeup(dev);
if (atmel_serial_clk_will_stop()) {
unsigned long flags;
spin_lock_irqsave(&atmel_port->lock_suspended, flags);
atmel_port->suspended = true;
spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
- device_set_wakeup_enable(&pdev->dev, 0);
+ device_set_wakeup_enable(dev, 0);
}
uart_suspend_port(&atmel_uart, port);
@@ -2764,9 +2743,9 @@ static int atmel_serial_suspend(struct platform_device *pdev,
return 0;
}
-static int atmel_serial_resume(struct platform_device *pdev)
+static int __maybe_unused atmel_serial_resume(struct device *dev)
{
- struct uart_port *port = platform_get_drvdata(pdev);
+ struct uart_port *port = dev_get_drvdata(dev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned long flags;
@@ -2801,14 +2780,10 @@ static int atmel_serial_resume(struct platform_device *pdev)
spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
uart_resume_port(&atmel_uart, port);
- device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
+ device_set_wakeup_enable(dev, atmel_port->may_wakeup);
return 0;
}
-#else
-#define atmel_serial_suspend NULL
-#define atmel_serial_resume NULL
-#endif
static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
struct platform_device *pdev)
@@ -2897,14 +2872,23 @@ static int atmel_serial_probe(struct platform_device *pdev)
atomic_set(&atmel_port->tasklet_shutdown, 0);
spin_lock_init(&atmel_port->lock_suspended);
+ atmel_port->clk = devm_clk_get(&pdev->dev, "usart");
+ if (IS_ERR(atmel_port->clk)) {
+ ret = PTR_ERR(atmel_port->clk);
+ goto err;
+ }
+ ret = clk_prepare_enable(atmel_port->clk);
+ if (ret)
+ goto err;
+
ret = atmel_init_port(atmel_port, pdev);
if (ret)
- goto err_clear_bit;
+ goto err_clk_disable_unprepare;
atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
if (IS_ERR(atmel_port->gpios)) {
ret = PTR_ERR(atmel_port->gpios);
- goto err_clear_bit;
+ goto err_clk_disable_unprepare;
}
if (!atmel_use_pdc_rx(&atmel_port->uart)) {
@@ -2913,7 +2897,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
sizeof(struct atmel_uart_char),
GFP_KERNEL);
if (!data)
- goto err_alloc_ring;
+ goto err_clk_disable_unprepare;
atmel_port->rx_ring.buf = data;
}
@@ -2923,26 +2907,9 @@ static int atmel_serial_probe(struct platform_device *pdev)
if (ret)
goto err_add_port;
-#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
- if (uart_console(&atmel_port->uart)
- && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
- /*
- * The serial core enabled the clock for us, so undo
- * the clk_prepare_enable() in atmel_console_setup()
- */
- clk_disable_unprepare(atmel_port->clk);
- }
-#endif
-
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, atmel_port);
- /*
- * The peripheral clock has been disabled by atmel_init_port():
- * enable it before accessing I/O registers
- */
- clk_prepare_enable(atmel_port->clk);
-
if (rs485_enabled) {
atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
ATMEL_US_USMODE_NORMAL);
@@ -2966,12 +2933,8 @@ static int atmel_serial_probe(struct platform_device *pdev)
err_add_port:
kfree(atmel_port->rx_ring.buf);
atmel_port->rx_ring.buf = NULL;
-err_alloc_ring:
- if (!uart_console(&atmel_port->uart)) {
- clk_put(atmel_port->clk);
- atmel_port->clk = NULL;
- }
-err_clear_bit:
+err_clk_disable_unprepare:
+ clk_disable_unprepare(atmel_port->clk);
clear_bit(atmel_port->uart.line, atmel_ports_in_use);
err:
return ret;
@@ -3005,21 +2968,21 @@ static int atmel_serial_remove(struct platform_device *pdev)
clear_bit(port->line, atmel_ports_in_use);
- clk_put(atmel_port->clk);
- atmel_port->clk = NULL;
pdev->dev.of_node = NULL;
return ret;
}
+static SIMPLE_DEV_PM_OPS(atmel_serial_pm_ops, atmel_serial_suspend,
+ atmel_serial_resume);
+
static struct platform_driver atmel_serial_driver = {
.probe = atmel_serial_probe,
.remove = atmel_serial_remove,
- .suspend = atmel_serial_suspend,
- .resume = atmel_serial_resume,
.driver = {
.name = "atmel_usart_serial",
.of_match_table = of_match_ptr(atmel_serial_dt_ids),
+ .pm = pm_ptr(&atmel_serial_pm_ops),
},
};
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 57c70851f22a..88d08ba1ca83 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -253,6 +253,9 @@ int __init of_setup_earlycon(const struct earlycon_id *match,
bool big_endian;
u64 addr;
+ if (early_con.flags & CON_ENABLED)
+ return -EALREADY;
+
spin_lock_init(&port->lock);
port->iotype = UPIO_MEM;
addr = of_flat_dt_translate_address(node);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 0d6e62f6bb07..b20f6f2fa51c 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -274,6 +274,8 @@ struct lpuart_port {
int rx_dma_rng_buf_len;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
+ bool is_cs7; /* Set to true when character size is 7 */
+ /* and the parity is enabled */
};
struct lpuart_soc_data {
@@ -990,12 +992,12 @@ static void lpuart32_rxint(struct lpuart_port *sport)
if (sr & (UARTSTAT_PE | UARTSTAT_OR | UARTSTAT_FE)) {
if (sr & UARTSTAT_PE) {
+ sport->port.icount.parity++;
+ } else if (sr & UARTSTAT_FE) {
if (is_break)
sport->port.icount.brk++;
else
- sport->port.icount.parity++;
- } else if (sr & UARTSTAT_FE) {
- sport->port.icount.frame++;
+ sport->port.icount.frame++;
}
if (sr & UARTSTAT_OR)
@@ -1010,18 +1012,21 @@ static void lpuart32_rxint(struct lpuart_port *sport)
sr &= sport->port.read_status_mask;
if (sr & UARTSTAT_PE) {
+ flg = TTY_PARITY;
+ } else if (sr & UARTSTAT_FE) {
if (is_break)
flg = TTY_BREAK;
else
- flg = TTY_PARITY;
- } else if (sr & UARTSTAT_FE) {
- flg = TTY_FRAME;
+ flg = TTY_FRAME;
}
if (sr & UARTSTAT_OR)
flg = TTY_OVERRUN;
}
+ if (sport->is_cs7)
+ rx &= 0x7F;
+
if (tty_insert_flip_char(port, rx, flg) == 0)
sport->port.icount.buf_overrun++;
}
@@ -1107,6 +1112,17 @@ static void lpuart_handle_sysrq(struct lpuart_port *sport)
}
}
+static int lpuart_tty_insert_flip_string(struct tty_port *port,
+ unsigned char *chars, size_t size, bool is_cs7)
+{
+ int i;
+
+ if (is_cs7)
+ for (i = 0; i < size; i++)
+ chars[i] &= 0x7F;
+ return tty_insert_flip_string(port, chars, size);
+}
+
static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
{
struct tty_port *port = &sport->port.state->port;
@@ -1217,7 +1233,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
if (ring->head < ring->tail) {
count = sport->rx_sgl.length - ring->tail;
- copied = tty_insert_flip_string(port, ring->buf + ring->tail, count);
+ copied = lpuart_tty_insert_flip_string(port, ring->buf + ring->tail,
+ count, sport->is_cs7);
if (copied != count)
sport->port.icount.buf_overrun++;
ring->tail = 0;
@@ -1227,7 +1244,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
/* Finally we read data from tail to head */
if (ring->tail < ring->head) {
count = ring->head - ring->tail;
- copied = tty_insert_flip_string(port, ring->buf + ring->tail, count);
+ copied = lpuart_tty_insert_flip_string(port, ring->buf + ring->tail,
+ count, sport->is_cs7);
if (copied != count)
sport->port.icount.buf_overrun++;
/* Wrap ring->head if needed */
@@ -1355,7 +1373,7 @@ static void lpuart_dma_rx_free(struct uart_port *port)
sport->dma_rx_cookie = -EINVAL;
}
-static int lpuart_config_rs485(struct uart_port *port,
+static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct lpuart_port *sport = container_of(port,
@@ -1365,11 +1383,6 @@ static int lpuart_config_rs485(struct uart_port *port,
~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
writeb(modem, sport->port.membase + UARTMODEM);
- /* clear unsupported configurations */
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
- rs485->flags &= ~SER_RS485_RX_DURING_TX;
-
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable auto RS-485 RTS mode */
modem |= UARTMODEM_TXRTSE;
@@ -1381,16 +1394,16 @@ static int lpuart_config_rs485(struct uart_port *port,
* Note: UART is assumed to be active high.
*/
if (rs485->flags & SER_RS485_RTS_ON_SEND)
- modem &= ~UARTMODEM_TXRTSPOL;
- else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
modem |= UARTMODEM_TXRTSPOL;
+ else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ modem &= ~UARTMODEM_TXRTSPOL;
}
writeb(modem, sport->port.membase + UARTMODEM);
return 0;
}
-static int lpuart32_config_rs485(struct uart_port *port,
+static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct lpuart_port *sport = container_of(port,
@@ -1400,11 +1413,6 @@ static int lpuart32_config_rs485(struct uart_port *port,
& ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
lpuart32_write(&sport->port, modem, UARTMODIR);
- /* clear unsupported configurations */
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
- rs485->flags &= ~SER_RS485_RX_DURING_TX;
-
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable auto RS-485 RTS mode */
modem |= UARTMODEM_TXRTSE;
@@ -2076,6 +2084,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL);
bd = lpuart32_read(&sport->port, UARTBAUD);
modem = lpuart32_read(&sport->port, UARTMODIR);
+ sport->is_cs7 = false;
/*
* only support CS8 and CS7, and for CS7 must enable PE.
* supported mode:
@@ -2182,6 +2191,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
uart_update_timeout(port, termios->c_cflag, baud);
/* wait transmit engin complete */
+ lpuart32_write(&sport->port, 0, UARTMODIR);
lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
/* disable transmit and receive */
@@ -2194,6 +2204,9 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
lpuart32_write(&sport->port, ctrl, UARTCTRL);
/* restore control register */
+ if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE)
+ sport->is_cs7 = true;
+
if (old && sport->lpuart_dma_rx_use) {
if (!lpuart_start_rx_dma(sport))
rx_dma_timer_init(sport);
@@ -2621,6 +2634,11 @@ static struct uart_driver lpuart_reg = {
.cons = LPUART_CONSOLE,
};
+static const struct serial_rs485 lpuart_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+ /* delay_rts_* and RX_DURING_TX are not supported */
+};
+
static int lpuart_probe(struct platform_device *pdev)
{
const struct lpuart_soc_data *sdata = of_device_get_match_data(&pdev->dev);
@@ -2660,6 +2678,7 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.rs485_config = lpuart32_config_rs485;
else
sport->port.rs485_config = lpuart_config_rs485;
+ sport->port.rs485_supported = lpuart_rs485_supported;
sport->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(sport->ipg_clk)) {
@@ -2717,14 +2736,7 @@ static int lpuart_probe(struct platform_device *pdev)
if (ret)
goto failed_get_rs485;
- if (sport->port.rs485.flags & SER_RS485_RX_DURING_TX)
- dev_err(&pdev->dev, "driver doesn't support RX during TX\n");
-
- if (sport->port.rs485.delay_rts_before_send ||
- sport->port.rs485.delay_rts_after_send)
- dev_err(&pdev->dev, "driver doesn't support RTS delays\n");
-
- sport->port.rs485_config(&sport->port, &sport->port.rs485);
+ uart_rs485_config(&sport->port);
ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0,
DRIVER_NAME, sport);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 30edb35a6a15..522445a8f666 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1907,16 +1907,12 @@ static void imx_uart_poll_put_char(struct uart_port *port, unsigned char c)
#endif
/* called with port.lock taken and irqs off or from .probe without locking */
-static int imx_uart_rs485_config(struct uart_port *port,
+static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct imx_port *sport = (struct imx_port *)port;
u32 ucr2;
- /* RTS is required to control the transmitter */
- if (!sport->have_rtscts && !sport->have_rtsgpio)
- rs485conf->flags &= ~SER_RS485_ENABLED;
-
if (rs485conf->flags & SER_RS485_ENABLED) {
/* Enable receiver if low-active RTS signal is requested */
if (sport->have_rtscts && !sport->have_rtsgpio &&
@@ -2200,6 +2196,14 @@ static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t)
return HRTIMER_NORESTART;
}
+static const struct serial_rs485 imx_no_rs485 = {}; /* No RS485 if no RTS */
+static const struct serial_rs485 imx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
/* Default RX DMA buffer configuration */
#define RX_DMA_PERIODS 16
#define RX_DMA_PERIOD_LEN (PAGE_SIZE / 4)
@@ -2279,6 +2283,11 @@ static int imx_uart_probe(struct platform_device *pdev)
sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE);
sport->port.ops = &imx_uart_pops;
sport->port.rs485_config = imx_uart_rs485_config;
+ /* RTS is required to control the RS485 transmitter */
+ if (sport->have_rtscts || sport->have_rtsgpio)
+ sport->port.rs485_supported = imx_rs485_supported;
+ else
+ sport->port.rs485_supported = imx_no_rs485;
sport->port.flags = UPF_BOOT_AUTOCONF;
timer_setup(&sport->timer, imx_uart_timeout, 0);
@@ -2338,7 +2347,7 @@ static int imx_uart_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"low-active RTS not possible when receiver is off, enabling receiver\n");
- imx_uart_rs485_config(&sport->port, &sport->port.rs485);
+ uart_rs485_config(&sport->port);
/* Disable interrupts before requesting them */
ucr1 = imx_uart_readl(sport, UCR1);
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 79b7db8580e0..7aa37be3216a 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -342,7 +342,7 @@ static int param_set_kgdboc_var(const char *kmessage,
/*
* Configure with the new params as long as init already ran.
* Note that we can get called before init if someone loads us
- * with "modprobe kgdboc kgdboc=..." or if they happen to use the
+ * with "modprobe kgdboc kgdboc=..." or if they happen to use
* the odd syntax of "kgdboc.kgdboc=..." on the kernel command.
*/
if (configured >= 0)
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index a0b6ea52d133..ab10ca4a45b5 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
@@ -72,7 +73,8 @@
#define MAX310X_GLOBALCMD_REG MAX310X_REG_1F /* Global Command (WO) */
/* Extended registers */
-#define MAX310X_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
+#define MAX310X_SPI_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
+#define MAX310X_I2C_REVID_EXTREG (0x25) /* Revision ID */
/* IRQ register bits */
#define MAX310X_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */
@@ -245,7 +247,17 @@
#define MAX14830_BRGCFG_CLKDIS_BIT (1 << 6) /* Clock Disable */
#define MAX14830_REV_ID (0xb0)
+struct max310x_if_cfg {
+ int (*extended_reg_enable)(struct device *dev, bool enable);
+
+ unsigned int rev_id_reg;
+};
+
struct max310x_devtype {
+ struct {
+ unsigned short min;
+ unsigned short max;
+ } slave_addr;
char name[9];
int nr;
u8 mode1;
@@ -258,9 +270,8 @@ struct max310x_one {
struct work_struct tx_work;
struct work_struct md_work;
struct work_struct rs_work;
+ struct regmap *regmap;
- u8 wr_header;
- u8 rd_header;
u8 rx_buf[MAX310X_FIFO_SIZE];
};
#define to_max310x_port(_port) \
@@ -268,6 +279,7 @@ struct max310x_one {
struct max310x_port {
const struct max310x_devtype *devtype;
+ const struct max310x_if_cfg *if_cfg;
struct regmap *regmap;
struct clk *clk;
#ifdef CONFIG_GPIOLIB
@@ -289,26 +301,26 @@ static DECLARE_BITMAP(max310x_lines, MAX310X_UART_NRMAX);
static u8 max310x_port_read(struct uart_port *port, u8 reg)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
unsigned int val = 0;
- regmap_read(s->regmap, port->iobase + reg, &val);
+ regmap_read(one->regmap, reg, &val);
return val;
}
static void max310x_port_write(struct uart_port *port, u8 reg, u8 val)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
- regmap_write(s->regmap, port->iobase + reg, val);
+ regmap_write(one->regmap, reg, val);
}
static void max310x_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
- regmap_update_bits(s->regmap, port->iobase + reg, mask, val);
+ regmap_update_bits(one->regmap, reg, mask, val);
}
static int max3107_detect(struct device *dev)
@@ -357,13 +369,12 @@ static int max3109_detect(struct device *dev)
unsigned int val = 0;
int ret;
- ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
- MAX310X_EXTREG_ENBL);
+ ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
- regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val);
- regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL);
+ regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
+ s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX3109_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
@@ -388,13 +399,12 @@ static int max14830_detect(struct device *dev)
unsigned int val = 0;
int ret;
- ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
- MAX310X_EXTREG_ENBL);
+ ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
- regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val);
- regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL);
+ regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
+ s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX14830_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
@@ -419,6 +429,10 @@ static const struct max310x_devtype max3107_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT | MAX310X_MODE1_IRQSEL_BIT,
.detect = max3107_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x2c,
+ .max = 0x2f,
+ },
};
static const struct max310x_devtype max3108_devtype = {
@@ -427,6 +441,10 @@ static const struct max310x_devtype max3108_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3108_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static const struct max310x_devtype max3109_devtype = {
@@ -435,6 +453,10 @@ static const struct max310x_devtype max3109_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3109_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static const struct max310x_devtype max14830_devtype = {
@@ -443,11 +465,15 @@ static const struct max310x_devtype max14830_devtype = {
.mode1 = MAX310X_MODE1_IRQSEL_BIT,
.detect = max14830_detect,
.power = max14830_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
@@ -464,7 +490,7 @@ static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
@@ -486,7 +512,7 @@ static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
static bool max310x_reg_precious(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
@@ -624,31 +650,15 @@ static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
static void max310x_batch_write(struct uart_port *port, u8 *txbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);
- struct spi_transfer xfer[] = {
- {
- .tx_buf = &one->wr_header,
- .len = sizeof(one->wr_header),
- }, {
- .tx_buf = txbuf,
- .len = len,
- }
- };
- spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer));
+
+ regmap_raw_write(one->regmap, MAX310X_THR_REG, txbuf, len);
}
static void max310x_batch_read(struct uart_port *port, u8 *rxbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);
- struct spi_transfer xfer[] = {
- {
- .tx_buf = &one->rd_header,
- .len = sizeof(one->rd_header),
- }, {
- .rx_buf = rxbuf,
- .len = len,
- }
- };
- spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer));
+
+ regmap_raw_read(one->regmap, MAX310X_RHR_REG, rxbuf, len);
}
static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
@@ -1026,7 +1036,7 @@ static void max310x_rs_proc(struct work_struct *ws)
MAX310X_MODE2_ECHOSUPR_BIT, mode2);
}
-static int max310x_rs485_config(struct uart_port *port,
+static int max310x_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct max310x_one *one = to_max310x_port(port);
@@ -1035,8 +1045,6 @@ static int max310x_rs485_config(struct uart_port *port,
(rs485->delay_rts_after_send > 0x0f))
return -ERANGE;
- rs485->flags &= SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX |
- SER_RS485_ENABLED;
port->rs485 = *rs485;
schedule_work(&one->rs_work);
@@ -1249,16 +1257,24 @@ static int max310x_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
}
#endif
+static const struct serial_rs485 max310x_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int max310x_probe(struct device *dev, const struct max310x_devtype *devtype,
- struct regmap *regmap, int irq)
+ const struct max310x_if_cfg *if_cfg,
+ struct regmap *regmaps[], int irq)
{
int i, ret, fmin, fmax, freq;
struct max310x_port *s;
u32 uartclk = 0;
bool xtal;
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ for (i = 0; i < devtype->nr; i++)
+ if (IS_ERR(regmaps[i]))
+ return PTR_ERR(regmaps[i]);
/* Alloc port structure */
s = devm_kzalloc(dev, struct_size(s, p, devtype->nr), GFP_KERNEL);
@@ -1305,8 +1321,9 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
goto out_clk;
}
- s->regmap = regmap;
+ s->regmap = regmaps[0];
s->devtype = devtype;
+ s->if_cfg = if_cfg;
dev_set_drvdata(dev, s);
/* Check device to ensure we are talking to what we expect */
@@ -1315,22 +1332,18 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
goto out_clk;
for (i = 0; i < devtype->nr; i++) {
- unsigned int offs = i << 5;
-
/* Reset port */
- regmap_write(s->regmap, MAX310X_MODE2_REG + offs,
+ regmap_write(regmaps[i], MAX310X_MODE2_REG,
MAX310X_MODE2_RST_BIT);
/* Clear port reset */
- regmap_write(s->regmap, MAX310X_MODE2_REG + offs, 0);
+ regmap_write(regmaps[i], MAX310X_MODE2_REG, 0);
/* Wait for port startup */
do {
- regmap_read(s->regmap,
- MAX310X_BRGDIVLSB_REG + offs, &ret);
+ regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &ret);
} while (ret != 0x01);
- regmap_write(s->regmap, MAX310X_MODE1_REG + offs,
- devtype->mode1);
+ regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1);
}
uartclk = max310x_set_ref_clk(dev, s, freq, xtal);
@@ -1353,11 +1366,14 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
s->p[i].port.fifosize = MAX310X_FIFO_SIZE;
s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
s->p[i].port.iotype = UPIO_PORT;
- s->p[i].port.iobase = i * 0x20;
+ s->p[i].port.iobase = i;
s->p[i].port.membase = (void __iomem *)~0;
s->p[i].port.uartclk = uartclk;
s->p[i].port.rs485_config = max310x_rs485_config;
+ s->p[i].port.rs485_supported = max310x_rs485_supported;
s->p[i].port.ops = &max310x_ops;
+ s->p[i].regmap = regmaps[i];
+
/* Disable all interrupts */
max310x_port_write(&s->p[i].port, MAX310X_IRQEN_REG, 0);
/* Clear IRQ status register */
@@ -1368,10 +1384,6 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
INIT_WORK(&s->p[i].md_work, max310x_md_proc);
/* Initialize queue for changing RS485 mode */
INIT_WORK(&s->p[i].rs_work, max310x_rs_proc);
- /* Initialize SPI-transfer buffers */
- s->p[i].wr_header = (s->p[i].port.iobase + MAX310X_THR_REG) |
- MAX310X_WRITE_BIT;
- s->p[i].rd_header = (s->p[i].port.iobase + MAX310X_RHR_REG);
/* Register port */
ret = uart_add_one_port(&max310x_uart, &s->p[i].port);
@@ -1456,16 +1468,31 @@ static struct regmap_config regcfg = {
.val_bits = 8,
.write_flag_mask = MAX310X_WRITE_BIT,
.cache_type = REGCACHE_RBTREE,
+ .max_register = MAX310X_REG_1F,
.writeable_reg = max310x_reg_writeable,
.volatile_reg = max310x_reg_volatile,
.precious_reg = max310x_reg_precious,
};
#ifdef CONFIG_SPI_MASTER
+static int max310x_spi_extended_reg_enable(struct device *dev, bool enable)
+{
+ struct max310x_port *s = dev_get_drvdata(dev);
+
+ return regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
+ enable ? MAX310X_EXTREG_ENBL : MAX310X_EXTREG_DSBL);
+}
+
+static const struct max310x_if_cfg __maybe_unused max310x_spi_if_cfg = {
+ .extended_reg_enable = max310x_spi_extended_reg_enable,
+ .rev_id_reg = MAX310X_SPI_REVID_EXTREG,
+};
+
static int max310x_spi_probe(struct spi_device *spi)
{
const struct max310x_devtype *devtype;
- struct regmap *regmap;
+ struct regmap *regmaps[4];
+ unsigned int i;
int ret;
/* Setup SPI bus */
@@ -1480,10 +1507,14 @@ static int max310x_spi_probe(struct spi_device *spi)
if (!devtype)
devtype = (struct max310x_devtype *)spi_get_device_id(spi)->driver_data;
- regcfg.max_register = devtype->nr * 0x20 - 1;
- regmap = devm_regmap_init_spi(spi, &regcfg);
+ for (i = 0; i < devtype->nr; i++) {
+ u8 port_mask = i * 0x20;
+ regcfg.read_flag_mask = port_mask;
+ regcfg.write_flag_mask = port_mask | MAX310X_WRITE_BIT;
+ regmaps[i] = devm_regmap_init_spi(spi, &regcfg);
+ }
- return max310x_probe(&spi->dev, devtype, regmap, spi->irq);
+ return max310x_probe(&spi->dev, devtype, &max310x_spi_if_cfg, regmaps, spi->irq);
}
static void max310x_spi_remove(struct spi_device *spi)
@@ -1512,6 +1543,97 @@ static struct spi_driver max310x_spi_driver = {
};
#endif
+#ifdef CONFIG_I2C
+static int max310x_i2c_extended_reg_enable(struct device *dev, bool enable)
+{
+ return 0;
+}
+
+static struct regmap_config regcfg_i2c = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .writeable_reg = max310x_reg_writeable,
+ .volatile_reg = max310x_reg_volatile,
+ .precious_reg = max310x_reg_precious,
+ .max_register = MAX310X_I2C_REVID_EXTREG,
+};
+
+static const struct max310x_if_cfg max310x_i2c_if_cfg = {
+ .extended_reg_enable = max310x_i2c_extended_reg_enable,
+ .rev_id_reg = MAX310X_I2C_REVID_EXTREG,
+};
+
+static unsigned short max310x_i2c_slave_addr(unsigned short addr,
+ unsigned int nr)
+{
+ /*
+ * For MAX14830 and MAX3109, the slave address depends on what the
+ * A0 and A1 pins are tied to.
+ * See Table I2C Address Map of the datasheet.
+ * Based on that table, the following formulas were determined.
+ * UART1 - UART0 = 0x10
+ * UART2 - UART1 = 0x20 + 0x10
+ * UART3 - UART2 = 0x10
+ */
+
+ addr -= nr * 0x10;
+
+ if (nr >= 2)
+ addr -= 0x20;
+
+ return addr;
+}
+
+static int max310x_i2c_probe(struct i2c_client *client)
+{
+ const struct max310x_devtype *devtype =
+ device_get_match_data(&client->dev);
+ struct i2c_client *port_client;
+ struct regmap *regmaps[4];
+ unsigned int i;
+ u8 port_addr;
+
+ if (client->addr < devtype->slave_addr.min ||
+ client->addr > devtype->slave_addr.max)
+ return dev_err_probe(&client->dev, -EINVAL,
+ "Slave addr 0x%x outside of range [0x%x, 0x%x]\n",
+ client->addr, devtype->slave_addr.min,
+ devtype->slave_addr.max);
+
+ regmaps[0] = devm_regmap_init_i2c(client, &regcfg_i2c);
+
+ for (i = 1; i < devtype->nr; i++) {
+ port_addr = max310x_i2c_slave_addr(client->addr, i);
+ port_client = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter,
+ port_addr);
+
+ regmaps[i] = devm_regmap_init_i2c(port_client, &regcfg_i2c);
+ }
+
+ return max310x_probe(&client->dev, devtype, &max310x_i2c_if_cfg,
+ regmaps, client->irq);
+}
+
+static int max310x_i2c_remove(struct i2c_client *client)
+{
+ max310x_remove(&client->dev);
+
+ return 0;
+}
+
+static struct i2c_driver max310x_i2c_driver = {
+ .driver = {
+ .name = MAX310X_NAME,
+ .of_match_table = max310x_dt_ids,
+ .pm = &max310x_pm_ops,
+ },
+ .probe_new = max310x_i2c_probe,
+ .remove = max310x_i2c_remove,
+};
+#endif
+
static int __init max310x_uart_init(void)
{
int ret;
@@ -1525,15 +1647,35 @@ static int __init max310x_uart_init(void)
#ifdef CONFIG_SPI_MASTER
ret = spi_register_driver(&max310x_spi_driver);
if (ret)
- uart_unregister_driver(&max310x_uart);
+ goto err_spi_register;
+#endif
+
+#ifdef CONFIG_I2C
+ ret = i2c_add_driver(&max310x_i2c_driver);
+ if (ret)
+ goto err_i2c_register;
+#endif
+
+ return 0;
+
+#ifdef CONFIG_I2C
+err_i2c_register:
+ spi_unregister_driver(&max310x_spi_driver);
#endif
+err_spi_register:
+ uart_unregister_driver(&max310x_uart);
+
return ret;
}
module_init(max310x_uart_init);
static void __exit max310x_uart_exit(void)
{
+#ifdef CONFIG_I2C
+ i2c_del_driver(&max310x_i2c_driver);
+#endif
+
#ifdef CONFIG_SPI_MASTER
spi_unregister_driver(&max310x_spi_driver);
#endif
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 2aec62b5d6c4..f4aaaadd0742 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -431,7 +431,8 @@ static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
/****************************************************************************/
/* Enable or disable the RS485 support */
-static int mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+static int mcf_config_rs485(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
unsigned char mr1, mr2;
@@ -448,11 +449,14 @@ static int mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
}
writeb(mr1, port->membase + MCFUART_UMR);
writeb(mr2, port->membase + MCFUART_UMR);
- port->rs485 = *rs485;
return 0;
}
+static const struct serial_rs485 mcf_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND,
+};
+
/****************************************************************************/
/*
@@ -502,6 +506,7 @@ int __init early_mcf_setup(struct mcf_platform_uart *platp)
port->uartclk = MCF_BUSCLK;
port->flags = UPF_BOOT_AUTOCONF;
port->rs485_config = mcf_config_rs485;
+ port->rs485_supported = mcf_rs485_supported;
port->ops = &mcf_uart_ops;
}
@@ -629,6 +634,7 @@ static int mcf_probe(struct platform_device *pdev)
port->ops = &mcf_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->rs485_config = mcf_config_rs485;
+ port->rs485_supported = mcf_rs485_supported;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MCF_CONSOLE);
uart_add_one_port(&mcf_driver, port);
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 4869c0059c98..6c8db19fd572 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -162,7 +162,7 @@ static void meson_uart_start_tx(struct uart_port *port)
ch = xmit->buf[xmit->tail];
writel(ch, port->membase + AML_UART_WFIFO);
- xmit->tail = (xmit->tail+1) & (SERIAL_XMIT_SIZE - 1);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index e50f069b5ebb..3f1986c89694 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1630,7 +1630,7 @@ mpc52xx_console_setup(struct console *co, char *options)
return ret;
}
- uartclk = mpc5xxx_get_bus_frequency(np);
+ uartclk = mpc5xxx_fwnode_get_bus_frequency(of_fwnode_handle(np));
if (uartclk == 0) {
pr_debug("Could not find uart clock frequency!\n");
return -EINVAL;
@@ -1747,7 +1747,7 @@ static int mpc52xx_uart_of_probe(struct platform_device *op)
/* set the uart clock to the input clock of the psc, the different
* prescalers are taken into account in the set_baudrate() methods
* of the respective chip */
- uartclk = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ uartclk = mpc5xxx_get_bus_frequency(&op->dev);
if (uartclk == 0) {
dev_dbg(&op->dev, "Could not find uart clock frequency!\n");
return -EINVAL;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index e676ec761f18..3159889ddae1 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -29,103 +29,103 @@
#include <linux/of_device.h>
#include <linux/wait.h>
-#define UART_MR1 0x0000
-
-#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
-#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
-#define UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00
-#define UART_MR1_RX_RDY_CTL BIT(7)
-#define UART_MR1_CTS_CTL BIT(6)
-
-#define UART_MR2 0x0004
-#define UART_MR2_ERROR_MODE BIT(6)
-#define UART_MR2_BITS_PER_CHAR 0x30
-#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
-#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
-#define UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
-#define UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
-#define UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
-#define UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
-#define UART_MR2_PARITY_MODE_NONE 0x0
-#define UART_MR2_PARITY_MODE_ODD 0x1
-#define UART_MR2_PARITY_MODE_EVEN 0x2
-#define UART_MR2_PARITY_MODE_SPACE 0x3
-#define UART_MR2_PARITY_MODE 0x3
-
-#define UART_CSR 0x0008
-
-#define UART_TF 0x000C
+#define MSM_UART_MR1 0x0000
+
+#define MSM_UART_MR1_AUTO_RFR_LEVEL0 0x3F
+#define MSM_UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
+#define MSM_UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00
+#define MSM_UART_MR1_RX_RDY_CTL BIT(7)
+#define MSM_UART_MR1_CTS_CTL BIT(6)
+
+#define MSM_UART_MR2 0x0004
+#define MSM_UART_MR2_ERROR_MODE BIT(6)
+#define MSM_UART_MR2_BITS_PER_CHAR 0x30
+#define MSM_UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
+#define MSM_UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
+#define MSM_UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
+#define MSM_UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
+#define MSM_UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
+#define MSM_UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
+#define MSM_UART_MR2_PARITY_MODE_NONE 0x0
+#define MSM_UART_MR2_PARITY_MODE_ODD 0x1
+#define MSM_UART_MR2_PARITY_MODE_EVEN 0x2
+#define MSM_UART_MR2_PARITY_MODE_SPACE 0x3
+#define MSM_UART_MR2_PARITY_MODE 0x3
+
+#define MSM_UART_CSR 0x0008
+
+#define MSM_UART_TF 0x000C
#define UARTDM_TF 0x0070
-#define UART_CR 0x0010
-#define UART_CR_CMD_NULL (0 << 4)
-#define UART_CR_CMD_RESET_RX (1 << 4)
-#define UART_CR_CMD_RESET_TX (2 << 4)
-#define UART_CR_CMD_RESET_ERR (3 << 4)
-#define UART_CR_CMD_RESET_BREAK_INT (4 << 4)
-#define UART_CR_CMD_START_BREAK (5 << 4)
-#define UART_CR_CMD_STOP_BREAK (6 << 4)
-#define UART_CR_CMD_RESET_CTS (7 << 4)
-#define UART_CR_CMD_RESET_STALE_INT (8 << 4)
-#define UART_CR_CMD_PACKET_MODE (9 << 4)
-#define UART_CR_CMD_MODE_RESET (12 << 4)
-#define UART_CR_CMD_SET_RFR (13 << 4)
-#define UART_CR_CMD_RESET_RFR (14 << 4)
-#define UART_CR_CMD_PROTECTION_EN (16 << 4)
-#define UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8)
-#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
-#define UART_CR_CMD_FORCE_STALE (4 << 8)
-#define UART_CR_CMD_RESET_TX_READY (3 << 8)
-#define UART_CR_TX_DISABLE BIT(3)
-#define UART_CR_TX_ENABLE BIT(2)
-#define UART_CR_RX_DISABLE BIT(1)
-#define UART_CR_RX_ENABLE BIT(0)
-#define UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
-
-#define UART_IMR 0x0014
-#define UART_IMR_TXLEV BIT(0)
-#define UART_IMR_RXSTALE BIT(3)
-#define UART_IMR_RXLEV BIT(4)
-#define UART_IMR_DELTA_CTS BIT(5)
-#define UART_IMR_CURRENT_CTS BIT(6)
-#define UART_IMR_RXBREAK_START BIT(10)
-
-#define UART_IPR_RXSTALE_LAST 0x20
-#define UART_IPR_STALE_LSB 0x1F
-#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
-#define UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80
-
-#define UART_IPR 0x0018
-#define UART_TFWR 0x001C
-#define UART_RFWR 0x0020
-#define UART_HCR 0x0024
-
-#define UART_MREG 0x0028
-#define UART_NREG 0x002C
-#define UART_DREG 0x0030
-#define UART_MNDREG 0x0034
-#define UART_IRDA 0x0038
-#define UART_MISR_MODE 0x0040
-#define UART_MISR_RESET 0x0044
-#define UART_MISR_EXPORT 0x0048
-#define UART_MISR_VAL 0x004C
-#define UART_TEST_CTRL 0x0050
-
-#define UART_SR 0x0008
-#define UART_SR_HUNT_CHAR BIT(7)
-#define UART_SR_RX_BREAK BIT(6)
-#define UART_SR_PAR_FRAME_ERR BIT(5)
-#define UART_SR_OVERRUN BIT(4)
-#define UART_SR_TX_EMPTY BIT(3)
-#define UART_SR_TX_READY BIT(2)
-#define UART_SR_RX_FULL BIT(1)
-#define UART_SR_RX_READY BIT(0)
-
-#define UART_RF 0x000C
+#define MSM_UART_CR 0x0010
+#define MSM_UART_CR_CMD_NULL (0 << 4)
+#define MSM_UART_CR_CMD_RESET_RX (1 << 4)
+#define MSM_UART_CR_CMD_RESET_TX (2 << 4)
+#define MSM_UART_CR_CMD_RESET_ERR (3 << 4)
+#define MSM_UART_CR_CMD_RESET_BREAK_INT (4 << 4)
+#define MSM_UART_CR_CMD_START_BREAK (5 << 4)
+#define MSM_UART_CR_CMD_STOP_BREAK (6 << 4)
+#define MSM_UART_CR_CMD_RESET_CTS (7 << 4)
+#define MSM_UART_CR_CMD_RESET_STALE_INT (8 << 4)
+#define MSM_UART_CR_CMD_PACKET_MODE (9 << 4)
+#define MSM_UART_CR_CMD_MODE_RESET (12 << 4)
+#define MSM_UART_CR_CMD_SET_RFR (13 << 4)
+#define MSM_UART_CR_CMD_RESET_RFR (14 << 4)
+#define MSM_UART_CR_CMD_PROTECTION_EN (16 << 4)
+#define MSM_UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8)
+#define MSM_UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
+#define MSM_UART_CR_CMD_FORCE_STALE (4 << 8)
+#define MSM_UART_CR_CMD_RESET_TX_READY (3 << 8)
+#define MSM_UART_CR_TX_DISABLE BIT(3)
+#define MSM_UART_CR_TX_ENABLE BIT(2)
+#define MSM_UART_CR_RX_DISABLE BIT(1)
+#define MSM_UART_CR_RX_ENABLE BIT(0)
+#define MSM_UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
+
+#define MSM_UART_IMR 0x0014
+#define MSM_UART_IMR_TXLEV BIT(0)
+#define MSM_UART_IMR_RXSTALE BIT(3)
+#define MSM_UART_IMR_RXLEV BIT(4)
+#define MSM_UART_IMR_DELTA_CTS BIT(5)
+#define MSM_UART_IMR_CURRENT_CTS BIT(6)
+#define MSM_UART_IMR_RXBREAK_START BIT(10)
+
+#define MSM_UART_IPR_RXSTALE_LAST 0x20
+#define MSM_UART_IPR_STALE_LSB 0x1F
+#define MSM_UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
+#define MSM_UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80
+
+#define MSM_UART_IPR 0x0018
+#define MSM_UART_TFWR 0x001C
+#define MSM_UART_RFWR 0x0020
+#define MSM_UART_HCR 0x0024
+
+#define MSM_UART_MREG 0x0028
+#define MSM_UART_NREG 0x002C
+#define MSM_UART_DREG 0x0030
+#define MSM_UART_MNDREG 0x0034
+#define MSM_UART_IRDA 0x0038
+#define MSM_UART_MISR_MODE 0x0040
+#define MSM_UART_MISR_RESET 0x0044
+#define MSM_UART_MISR_EXPORT 0x0048
+#define MSM_UART_MISR_VAL 0x004C
+#define MSM_UART_TEST_CTRL 0x0050
+
+#define MSM_UART_SR 0x0008
+#define MSM_UART_SR_HUNT_CHAR BIT(7)
+#define MSM_UART_SR_RX_BREAK BIT(6)
+#define MSM_UART_SR_PAR_FRAME_ERR BIT(5)
+#define MSM_UART_SR_OVERRUN BIT(4)
+#define MSM_UART_SR_TX_EMPTY BIT(3)
+#define MSM_UART_SR_TX_READY BIT(2)
+#define MSM_UART_SR_RX_FULL BIT(1)
+#define MSM_UART_SR_RX_READY BIT(0)
+
+#define MSM_UART_RF 0x000C
#define UARTDM_RF 0x0070
-#define UART_MISR 0x0010
-#define UART_ISR 0x0014
-#define UART_ISR_TX_READY BIT(7)
+#define MSM_UART_MISR 0x0010
+#define MSM_UART_ISR 0x0014
+#define MSM_UART_ISR_TX_READY BIT(7)
#define UARTDM_RXFS 0x50
#define UARTDM_RXFS_BUF_SHIFT 0x7
@@ -181,7 +181,10 @@ struct msm_port {
struct msm_dma rx_dma;
};
-#define UART_TO_MSM(uart_port) container_of(uart_port, struct msm_port, uart)
+static inline struct msm_port *to_msm_port(struct uart_port *up)
+{
+ return container_of(up, struct msm_port, uart);
+}
static
void msm_write(struct uart_port *port, unsigned int val, unsigned int off)
@@ -200,10 +203,10 @@ unsigned int msm_read(struct uart_port *port, unsigned int off)
*/
static void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
{
- msm_write(port, 0x06, UART_MREG);
- msm_write(port, 0xF1, UART_NREG);
- msm_write(port, 0x0F, UART_DREG);
- msm_write(port, 0x1A, UART_MNDREG);
+ msm_write(port, 0x06, MSM_UART_MREG);
+ msm_write(port, 0xF1, MSM_UART_NREG);
+ msm_write(port, 0x0F, MSM_UART_DREG);
+ msm_write(port, 0x1A, MSM_UART_MNDREG);
port->uartclk = 1843200;
}
@@ -212,16 +215,16 @@ static void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
*/
static void msm_serial_set_mnd_regs_tcxoby4(struct uart_port *port)
{
- msm_write(port, 0x18, UART_MREG);
- msm_write(port, 0xF6, UART_NREG);
- msm_write(port, 0x0F, UART_DREG);
- msm_write(port, 0x0A, UART_MNDREG);
+ msm_write(port, 0x18, MSM_UART_MREG);
+ msm_write(port, 0xF6, MSM_UART_NREG);
+ msm_write(port, 0x0F, MSM_UART_DREG);
+ msm_write(port, 0x0A, MSM_UART_MNDREG);
port->uartclk = 1843200;
}
static void msm_serial_set_mnd_regs(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
/*
* These registers don't exist so we change the clk input rate
@@ -392,35 +395,35 @@ static inline void msm_wait_for_xmitr(struct uart_port *port)
{
unsigned int timeout = 500000;
- while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
- if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_EMPTY)) {
+ if (msm_read(port, MSM_UART_ISR) & MSM_UART_ISR_TX_READY)
break;
udelay(1);
if (!timeout--)
break;
}
- msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_TX_READY, MSM_UART_CR);
}
static void msm_stop_tx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
- msm_port->imr &= ~UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr &= ~MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
}
static void msm_start_tx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->tx_dma;
/* Already started in DMA mode */
if (dma->count)
return;
- msm_port->imr |= UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
}
static void msm_reset_dm_count(struct uart_port *port, int count)
@@ -456,8 +459,8 @@ static void msm_complete_tx_dma(void *args)
msm_write(port, val, UARTDM_DMEN);
if (msm_port->is_uartdm > UARTDM_1P3) {
- msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
- msm_write(port, UART_CR_TX_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_TX_ENABLE, MSM_UART_CR);
}
count = dma->count - state.residue;
@@ -468,8 +471,8 @@ static void msm_complete_tx_dma(void *args)
xmit->tail &= UART_XMIT_SIZE - 1;
/* Restore "Tx FIFO below watermark" interrupt */
- msm_port->imr |= UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
@@ -516,8 +519,8 @@ static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
* Using DMA complete for Tx FIFO reload, no need for
* "Tx FIFO below watermark" one, disable it
*/
- msm_port->imr &= ~UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr &= ~MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
dma->count = count;
@@ -559,10 +562,10 @@ static void msm_complete_rx_dma(void *args)
val &= ~dma->enable_bit;
msm_write(port, val, UARTDM_DMEN);
- if (msm_read(port, UART_SR) & UART_SR_OVERRUN) {
+ if (msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
}
count = msm_read(port, UARTDM_RX_TOTAL_SNAP);
@@ -584,7 +587,7 @@ static void msm_complete_rx_dma(void *args)
continue;
}
- if (!(port->read_status_mask & UART_SR_RX_BREAK))
+ if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
spin_unlock_irqrestore(&port->lock, flags);
@@ -638,23 +641,23 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
* Using DMA for FIFO off-load, no need for "Rx FIFO over
* watermark" or "stale" interrupts, disable them
*/
- msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
+ msm_port->imr &= ~(MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE);
/*
* Well, when DMA is ADM3 engine(implied by <= UARTDM v1.3),
* we need RXSTALE to flush input DMA fifo to memory
*/
if (msm_port->is_uartdm < UARTDM_1P4)
- msm_port->imr |= UART_IMR_RXSTALE;
+ msm_port->imr |= MSM_UART_IMR_RXSTALE;
- msm_write(uart, msm_port->imr, UART_IMR);
+ msm_write(uart, msm_port->imr, MSM_UART_IMR);
dma->count = UARTDM_RX_SIZE;
dma_async_issue_pending(dma->chan);
- msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
- msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
val = msm_read(uart, UARTDM_DMEN);
val |= dma->enable_bit;
@@ -676,25 +679,25 @@ sw_mode:
* Switch from DMA to SW/FIFO mode. After clearing Rx BAM (UARTDM_DMEN),
* receiver must be reset.
*/
- msm_write(uart, UART_CR_CMD_RESET_RX, UART_CR);
- msm_write(uart, UART_CR_RX_ENABLE, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR);
+ msm_write(uart, MSM_UART_CR_RX_ENABLE, MSM_UART_CR);
- msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(uart, 0xFFFFFF, UARTDM_DMRX);
- msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
/* Re-enable RX interrupts */
- msm_port->imr |= (UART_IMR_RXLEV | UART_IMR_RXSTALE);
- msm_write(uart, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE;
+ msm_write(uart, msm_port->imr, MSM_UART_IMR);
}
static void msm_stop_rx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
- msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr &= ~(MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE);
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
if (dma->chan)
msm_stop_dma(port, dma);
@@ -702,10 +705,10 @@ static void msm_stop_rx(struct uart_port *port)
static void msm_enable_ms(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
- msm_port->imr |= UART_IMR_DELTA_CTS;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_DELTA_CTS;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
}
static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
@@ -714,20 +717,20 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
struct tty_port *tport = &port->state->port;
unsigned int sr;
int count = 0;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
- if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ if ((msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN)) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
}
- if (misr & UART_IMR_RXSTALE) {
+ if (misr & MSM_UART_IMR_RXSTALE) {
count = msm_read(port, UARTDM_RX_TOTAL_SNAP) -
msm_port->old_snap_state;
msm_port->old_snap_state = 0;
} else {
- count = 4 * (msm_read(port, UART_RFWR));
+ count = 4 * (msm_read(port, MSM_UART_RFWR));
msm_port->old_snap_state += count;
}
@@ -739,8 +742,8 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
unsigned char buf[4];
int sysrq, r_count, i;
- sr = msm_read(port, UART_SR);
- if ((sr & UART_SR_RX_READY) == 0) {
+ sr = msm_read(port, MSM_UART_SR);
+ if ((sr & MSM_UART_SR_RX_READY) == 0) {
msm_port->old_snap_state -= count;
break;
}
@@ -759,7 +762,7 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
continue;
}
- if (!(port->read_status_mask & UART_SR_RX_BREAK))
+ if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
spin_unlock(&port->lock);
@@ -773,10 +776,10 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
tty_flip_buffer_push(tport);
- if (misr & (UART_IMR_RXSTALE))
- msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ if (misr & (MSM_UART_IMR_RXSTALE))
+ msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
- msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
/* Try to use DMA */
msm_start_rx_dma(msm_port);
@@ -792,25 +795,25 @@ static void msm_handle_rx(struct uart_port *port)
* Handle overrun. My understanding of the hardware is that overrun
* is not tied to the RX buffer, so we handle the case out of band.
*/
- if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ if ((msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN)) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
}
/* and now the main RX loop */
- while ((sr = msm_read(port, UART_SR)) & UART_SR_RX_READY) {
+ while ((sr = msm_read(port, MSM_UART_SR)) & MSM_UART_SR_RX_READY) {
unsigned int c;
char flag = TTY_NORMAL;
int sysrq;
- c = msm_read(port, UART_RF);
+ c = msm_read(port, MSM_UART_RF);
- if (sr & UART_SR_RX_BREAK) {
+ if (sr & MSM_UART_SR_RX_BREAK) {
port->icount.brk++;
if (uart_handle_break(port))
continue;
- } else if (sr & UART_SR_PAR_FRAME_ERR) {
+ } else if (sr & MSM_UART_SR_PAR_FRAME_ERR) {
port->icount.frame++;
} else {
port->icount.rx++;
@@ -819,9 +822,9 @@ static void msm_handle_rx(struct uart_port *port)
/* Mask conditions we're ignorning. */
sr &= port->read_status_mask;
- if (sr & UART_SR_RX_BREAK)
+ if (sr & MSM_UART_SR_RX_BREAK)
flag = TTY_BREAK;
- else if (sr & UART_SR_PAR_FRAME_ERR)
+ else if (sr & MSM_UART_SR_PAR_FRAME_ERR)
flag = TTY_FRAME;
spin_unlock(&port->lock);
@@ -837,7 +840,7 @@ static void msm_handle_rx(struct uart_port *port)
static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
{
struct circ_buf *xmit = &port->state->xmit;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int num_chars;
unsigned int tf_pointer = 0;
void __iomem *tf;
@@ -845,7 +848,7 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
if (msm_port->is_uartdm)
tf = port->membase + UARTDM_TF;
else
- tf = port->membase + UART_TF;
+ tf = port->membase + MSM_UART_TF;
if (tx_count && msm_port->is_uartdm)
msm_reset_dm_count(port, tx_count);
@@ -854,7 +857,7 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
int i;
char buf[4] = { 0 };
- if (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
break;
if (msm_port->is_uartdm)
@@ -883,7 +886,7 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
static void msm_handle_tx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct circ_buf *xmit = &msm_port->uart.state->xmit;
struct msm_dma *dma = &msm_port->tx_dma;
unsigned int pio_count, dma_count, dma_min;
@@ -895,7 +898,7 @@ static void msm_handle_tx(struct uart_port *port)
if (msm_port->is_uartdm)
tf = port->membase + UARTDM_TF;
else
- tf = port->membase + UART_TF;
+ tf = port->membase + MSM_UART_TF;
buf[0] = port->x_char;
@@ -939,7 +942,7 @@ static void msm_handle_tx(struct uart_port *port)
static void msm_handle_delta_cts(struct uart_port *port)
{
- msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_CTS, MSM_UART_CR);
port->icount.cts++;
wake_up_interruptible(&port->state->port.delta_msr_wait);
}
@@ -947,27 +950,27 @@ static void msm_handle_delta_cts(struct uart_port *port)
static irqreturn_t msm_uart_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
unsigned long flags;
unsigned int misr;
u32 val;
spin_lock_irqsave(&port->lock, flags);
- misr = msm_read(port, UART_MISR);
- msm_write(port, 0, UART_IMR); /* disable interrupt */
+ misr = msm_read(port, MSM_UART_MISR);
+ msm_write(port, 0, MSM_UART_IMR); /* disable interrupt */
- if (misr & UART_IMR_RXBREAK_START) {
+ if (misr & MSM_UART_IMR_RXBREAK_START) {
msm_port->break_detected = true;
- msm_write(port, UART_CR_CMD_RESET_RXBREAK_START, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RXBREAK_START, MSM_UART_CR);
}
- if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) {
+ if (misr & (MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE)) {
if (dma->count) {
- val = UART_CR_CMD_STALE_EVENT_DISABLE;
- msm_write(port, val, UART_CR);
- val = UART_CR_CMD_RESET_STALE_INT;
- msm_write(port, val, UART_CR);
+ val = MSM_UART_CR_CMD_STALE_EVENT_DISABLE;
+ msm_write(port, val, MSM_UART_CR);
+ val = MSM_UART_CR_CMD_RESET_STALE_INT;
+ msm_write(port, val, MSM_UART_CR);
/*
* Flush DMA input fifo to memory, this will also
* trigger DMA RX completion
@@ -979,12 +982,12 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
msm_handle_rx(port);
}
}
- if (misr & UART_IMR_TXLEV)
+ if (misr & MSM_UART_IMR_TXLEV)
msm_handle_tx(port);
- if (misr & UART_IMR_DELTA_CTS)
+ if (misr & MSM_UART_IMR_DELTA_CTS)
msm_handle_delta_cts(port);
- msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
+ msm_write(port, msm_port->imr, MSM_UART_IMR); /* restore interrupt */
spin_unlock_irqrestore(&port->lock, flags);
return IRQ_HANDLED;
@@ -992,7 +995,7 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
static unsigned int msm_tx_empty(struct uart_port *port)
{
- return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
+ return (msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
}
static unsigned int msm_get_mctrl(struct uart_port *port)
@@ -1002,19 +1005,19 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
static void msm_reset(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int mr;
/* reset everything */
- msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
- mr = msm_read(port, UART_MR1);
- mr &= ~UART_MR1_RX_RDY_CTL;
- msm_write(port, mr, UART_MR1);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_BREAK_INT, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_CTS, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RFR, MSM_UART_CR);
+ mr = msm_read(port, MSM_UART_MR1);
+ mr &= ~MSM_UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, MSM_UART_MR1);
/* Disable DM modes */
if (msm_port->is_uartdm)
@@ -1025,24 +1028,24 @@ static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int mr;
- mr = msm_read(port, UART_MR1);
+ mr = msm_read(port, MSM_UART_MR1);
if (!(mctrl & TIOCM_RTS)) {
- mr &= ~UART_MR1_RX_RDY_CTL;
- msm_write(port, mr, UART_MR1);
- msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
+ mr &= ~MSM_UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, MSM_UART_MR1);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RFR, MSM_UART_CR);
} else {
- mr |= UART_MR1_RX_RDY_CTL;
- msm_write(port, mr, UART_MR1);
+ mr |= MSM_UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, MSM_UART_MR1);
}
}
static void msm_break_ctl(struct uart_port *port, int break_ctl)
{
if (break_ctl)
- msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_START_BREAK, MSM_UART_CR);
else
- msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STOP_BREAK, MSM_UART_CR);
}
struct msm_baud_map {
@@ -1055,7 +1058,7 @@ static const struct msm_baud_map *
msm_find_best_baud(struct uart_port *port, unsigned int baud,
unsigned long *rate)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int divisor, result;
unsigned long target, old, best_rate = 0, diff, best_diff = ULONG_MAX;
const struct msm_baud_map *entry, *end, *best;
@@ -1124,7 +1127,7 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
unsigned long *saved_flags)
{
unsigned int rxstale, watermark, mask;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
const struct msm_baud_map *entry;
unsigned long flags, rate;
@@ -1139,45 +1142,45 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
*saved_flags = flags;
port->uartclk = rate;
- msm_write(port, entry->code, UART_CSR);
+ msm_write(port, entry->code, MSM_UART_CSR);
/* RX stale watermark */
rxstale = entry->rxstale;
- watermark = UART_IPR_STALE_LSB & rxstale;
+ watermark = MSM_UART_IPR_STALE_LSB & rxstale;
if (msm_port->is_uartdm) {
- mask = UART_DM_IPR_STALE_TIMEOUT_MSB;
+ mask = MSM_UART_DM_IPR_STALE_TIMEOUT_MSB;
} else {
- watermark |= UART_IPR_RXSTALE_LAST;
- mask = UART_IPR_STALE_TIMEOUT_MSB;
+ watermark |= MSM_UART_IPR_RXSTALE_LAST;
+ mask = MSM_UART_IPR_STALE_TIMEOUT_MSB;
}
watermark |= mask & (rxstale << 2);
- msm_write(port, watermark, UART_IPR);
+ msm_write(port, watermark, MSM_UART_IPR);
/* set RX watermark */
watermark = (port->fifosize * 3) / 4;
- msm_write(port, watermark, UART_RFWR);
+ msm_write(port, watermark, MSM_UART_RFWR);
/* set TX watermark */
- msm_write(port, 10, UART_TFWR);
+ msm_write(port, 10, MSM_UART_TFWR);
- msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_PROTECTION_EN, MSM_UART_CR);
msm_reset(port);
/* Enable RX and TX */
- msm_write(port, UART_CR_TX_ENABLE | UART_CR_RX_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_TX_ENABLE | MSM_UART_CR_RX_ENABLE, MSM_UART_CR);
/* turn on RX and CTS interrupts */
- msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
- UART_IMR_CURRENT_CTS | UART_IMR_RXBREAK_START;
+ msm_port->imr = MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE |
+ MSM_UART_IMR_CURRENT_CTS | MSM_UART_IMR_RXBREAK_START;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
if (msm_port->is_uartdm) {
- msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
- msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
}
return baud;
@@ -1185,7 +1188,7 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
static void msm_init_clock(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
clk_prepare_enable(msm_port->clk);
clk_prepare_enable(msm_port->pclk);
@@ -1194,7 +1197,7 @@ static void msm_init_clock(struct uart_port *port)
static int msm_startup(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int data, rfr_level, mask;
int ret;
@@ -1209,18 +1212,18 @@ static int msm_startup(struct uart_port *port)
rfr_level = port->fifosize;
/* set automatic RFR level */
- data = msm_read(port, UART_MR1);
+ data = msm_read(port, MSM_UART_MR1);
if (msm_port->is_uartdm)
- mask = UART_DM_MR1_AUTO_RFR_LEVEL1;
+ mask = MSM_UART_DM_MR1_AUTO_RFR_LEVEL1;
else
- mask = UART_MR1_AUTO_RFR_LEVEL1;
+ mask = MSM_UART_MR1_AUTO_RFR_LEVEL1;
data &= ~mask;
- data &= ~UART_MR1_AUTO_RFR_LEVEL0;
+ data &= ~MSM_UART_MR1_AUTO_RFR_LEVEL0;
data |= mask & (rfr_level << 2);
- data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
- msm_write(port, data, UART_MR1);
+ data |= MSM_UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
+ msm_write(port, data, MSM_UART_MR1);
if (msm_port->is_uartdm) {
msm_request_tx_dma(msm_port, msm_port->uart.mapbase);
@@ -1246,10 +1249,10 @@ err_irq:
static void msm_shutdown(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
msm_port->imr = 0;
- msm_write(port, 0, UART_IMR); /* disable interrupts */
+ msm_write(port, 0, MSM_UART_IMR); /* disable interrupts */
if (msm_port->is_uartdm)
msm_release_dma(msm_port);
@@ -1262,7 +1265,7 @@ static void msm_shutdown(struct uart_port *port)
static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
unsigned long flags;
unsigned int baud, mr;
@@ -1279,60 +1282,60 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
tty_termios_encode_baud_rate(termios, baud, baud);
/* calculate parity */
- mr = msm_read(port, UART_MR2);
- mr &= ~UART_MR2_PARITY_MODE;
+ mr = msm_read(port, MSM_UART_MR2);
+ mr &= ~MSM_UART_MR2_PARITY_MODE;
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & PARODD)
- mr |= UART_MR2_PARITY_MODE_ODD;
+ mr |= MSM_UART_MR2_PARITY_MODE_ODD;
else if (termios->c_cflag & CMSPAR)
- mr |= UART_MR2_PARITY_MODE_SPACE;
+ mr |= MSM_UART_MR2_PARITY_MODE_SPACE;
else
- mr |= UART_MR2_PARITY_MODE_EVEN;
+ mr |= MSM_UART_MR2_PARITY_MODE_EVEN;
}
/* calculate bits per char */
- mr &= ~UART_MR2_BITS_PER_CHAR;
+ mr &= ~MSM_UART_MR2_BITS_PER_CHAR;
switch (termios->c_cflag & CSIZE) {
case CS5:
- mr |= UART_MR2_BITS_PER_CHAR_5;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_5;
break;
case CS6:
- mr |= UART_MR2_BITS_PER_CHAR_6;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_6;
break;
case CS7:
- mr |= UART_MR2_BITS_PER_CHAR_7;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_7;
break;
case CS8:
default:
- mr |= UART_MR2_BITS_PER_CHAR_8;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_8;
break;
}
/* calculate stop bits */
- mr &= ~(UART_MR2_STOP_BIT_LEN_ONE | UART_MR2_STOP_BIT_LEN_TWO);
+ mr &= ~(MSM_UART_MR2_STOP_BIT_LEN_ONE | MSM_UART_MR2_STOP_BIT_LEN_TWO);
if (termios->c_cflag & CSTOPB)
- mr |= UART_MR2_STOP_BIT_LEN_TWO;
+ mr |= MSM_UART_MR2_STOP_BIT_LEN_TWO;
else
- mr |= UART_MR2_STOP_BIT_LEN_ONE;
+ mr |= MSM_UART_MR2_STOP_BIT_LEN_ONE;
/* set parity, bits per char, and stop bit */
- msm_write(port, mr, UART_MR2);
+ msm_write(port, mr, MSM_UART_MR2);
/* calculate and set hardware flow control */
- mr = msm_read(port, UART_MR1);
- mr &= ~(UART_MR1_CTS_CTL | UART_MR1_RX_RDY_CTL);
+ mr = msm_read(port, MSM_UART_MR1);
+ mr &= ~(MSM_UART_MR1_CTS_CTL | MSM_UART_MR1_RX_RDY_CTL);
if (termios->c_cflag & CRTSCTS) {
- mr |= UART_MR1_CTS_CTL;
- mr |= UART_MR1_RX_RDY_CTL;
+ mr |= MSM_UART_MR1_CTS_CTL;
+ mr |= MSM_UART_MR1_RX_RDY_CTL;
}
- msm_write(port, mr, UART_MR1);
+ msm_write(port, mr, MSM_UART_MR1);
/* Configure status bits to ignore based on termio flags. */
port->read_status_mask = 0;
if (termios->c_iflag & INPCK)
- port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
+ port->read_status_mask |= MSM_UART_SR_PAR_FRAME_ERR;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
- port->read_status_mask |= UART_SR_RX_BREAK;
+ port->read_status_mask |= MSM_UART_SR_RX_BREAK;
uart_update_timeout(port, termios->c_cflag, baud);
@@ -1416,7 +1419,7 @@ static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
static void msm_power(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
switch (state) {
case 0:
@@ -1435,10 +1438,10 @@ static void msm_power(struct uart_port *port, unsigned int state,
#ifdef CONFIG_CONSOLE_POLL
static int msm_poll_get_char_single(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
- unsigned int rf_reg = msm_port->is_uartdm ? UARTDM_RF : UART_RF;
+ struct msm_port *msm_port = to_msm_port(port);
+ unsigned int rf_reg = msm_port->is_uartdm ? UARTDM_RF : MSM_UART_RF;
- if (!(msm_read(port, UART_SR) & UART_SR_RX_READY))
+ if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_RX_READY))
return NO_POLL_CHAR;
return msm_read(port, rf_reg) & 0xff;
@@ -1456,7 +1459,7 @@ static int msm_poll_get_char_dm(struct uart_port *port)
c = sp[sizeof(slop) - count];
count--;
/* Or if FIFO is empty */
- } else if (!(msm_read(port, UART_SR) & UART_SR_RX_READY)) {
+ } else if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_RX_READY)) {
/*
* If RX packing buffer has less than a word, force stale to
* push contents into RX FIFO
@@ -1464,14 +1467,13 @@ static int msm_poll_get_char_dm(struct uart_port *port)
count = msm_read(port, UARTDM_RXFS);
count = (count >> UARTDM_RXFS_BUF_SHIFT) & UARTDM_RXFS_BUF_MASK;
if (count) {
- msm_write(port, UART_CR_CMD_FORCE_STALE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_FORCE_STALE, MSM_UART_CR);
slop = msm_read(port, UARTDM_RF);
c = sp[0];
count--;
- msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
- msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE,
- UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
} else {
c = NO_POLL_CHAR;
}
@@ -1489,11 +1491,11 @@ static int msm_poll_get_char(struct uart_port *port)
{
u32 imr;
int c;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
/* Disable all interrupts */
- imr = msm_read(port, UART_IMR);
- msm_write(port, 0, UART_IMR);
+ imr = msm_read(port, MSM_UART_IMR);
+ msm_write(port, 0, MSM_UART_IMR);
if (msm_port->is_uartdm)
c = msm_poll_get_char_dm(port);
@@ -1501,7 +1503,7 @@ static int msm_poll_get_char(struct uart_port *port)
c = msm_poll_get_char_single(port);
/* Enable interrupts */
- msm_write(port, imr, UART_IMR);
+ msm_write(port, imr, MSM_UART_IMR);
return c;
}
@@ -1509,28 +1511,28 @@ static int msm_poll_get_char(struct uart_port *port)
static void msm_poll_put_char(struct uart_port *port, unsigned char c)
{
u32 imr;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
/* Disable all interrupts */
- imr = msm_read(port, UART_IMR);
- msm_write(port, 0, UART_IMR);
+ imr = msm_read(port, MSM_UART_IMR);
+ msm_write(port, 0, MSM_UART_IMR);
if (msm_port->is_uartdm)
msm_reset_dm_count(port, 1);
/* Wait until FIFO is empty */
- while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
cpu_relax();
/* Write a character */
- msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
+ msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : MSM_UART_TF);
/* Wait until FIFO is empty */
- while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
cpu_relax();
/* Enable interrupts */
- msm_write(port, imr, UART_IMR);
+ msm_write(port, imr, MSM_UART_IMR);
}
#endif
@@ -1588,7 +1590,7 @@ static struct msm_port msm_uart_ports[] = {
},
};
-#define UART_NR ARRAY_SIZE(msm_uart_ports)
+#define MSM_UART_NR ARRAY_SIZE(msm_uart_ports)
static inline struct uart_port *msm_get_port_from_line(unsigned int line)
{
@@ -1609,7 +1611,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
if (is_uartdm)
tf = port->membase + UARTDM_TF;
else
- tf = port->membase + UART_TF;
+ tf = port->membase + MSM_UART_TF;
/* Account for newlines that will get a carriage return added */
for (i = 0; i < count; i++)
@@ -1655,7 +1657,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
}
}
- while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
cpu_relax();
iowrite32_rep(tf, buf, 1);
@@ -1674,10 +1676,10 @@ static void msm_console_write(struct console *co, const char *s,
struct uart_port *port;
struct msm_port *msm_port;
- BUG_ON(co->index < 0 || co->index >= UART_NR);
+ BUG_ON(co->index < 0 || co->index >= MSM_UART_NR);
port = msm_get_port_from_line(co->index);
- msm_port = UART_TO_MSM(port);
+ msm_port = to_msm_port(port);
__msm_console_write(port, s, count, msm_port->is_uartdm);
}
@@ -1690,7 +1692,7 @@ static int msm_console_setup(struct console *co, char *options)
int parity = 'n';
int flow = 'n';
- if (unlikely(co->index >= UART_NR || co->index < 0))
+ if (unlikely(co->index >= MSM_UART_NR || co->index < 0))
return -ENXIO;
port = msm_get_port_from_line(co->index);
@@ -1771,7 +1773,7 @@ static struct uart_driver msm_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "msm_serial",
.dev_name = "ttyMSM",
- .nr = UART_NR,
+ .nr = MSM_UART_NR,
.cons = MSM_CONSOLE,
};
@@ -1801,14 +1803,14 @@ static int msm_serial_probe(struct platform_device *pdev)
if (line < 0)
line = atomic_inc_return(&msm_uart_next_id) - 1;
- if (unlikely(line < 0 || line >= UART_NR))
+ if (unlikely(line < 0 || line >= MSM_UART_NR))
return -ENXIO;
dev_info(&pdev->dev, "msm_serial: detected port #%d\n", line);
port = msm_get_port_from_line(line);
port->dev = &pdev->dev;
- msm_port = UART_TO_MSM(port);
+ msm_port = to_msm_port(port);
id = of_match_device(msm_uartdm_table, &pdev->dev);
if (id)
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 643dfbcc43f9..0ba0f4d9459d 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -481,12 +481,6 @@ static int __init mux_probe(struct parisc_device *dev)
port->line = port_cnt;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MUX_CONSOLE);
- /* The port->timeout needs to match what is present in
- * uart_wait_until_sent in serial_core.c. Otherwise
- * the time spent in msleep_interruptable will be very
- * long, causing the appearance of a console hang.
- */
- port->timeout = HZ / 50;
spin_lock_init(&port->lock);
status = uart_add_one_port(&mux_driver, port);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 93489fe334d0..65eaecd10b7c 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -265,6 +265,7 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
struct tty_port *tport = &port->state->port;
unsigned char ch = 0;
char flag = 0;
+ int ret;
do {
if (status & STAT_RX_RDY(port)) {
@@ -277,6 +278,16 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
port->icount.parity++;
}
+ /*
+ * For UART2, error bits are not cleared on buffer read.
+ * This causes interrupt loop and system hang.
+ */
+ if (IS_EXTENDED(port) && (status & STAT_BRK_ERR)) {
+ ret = readl(port->membase + UART_STAT);
+ ret |= STAT_BRK_ERR;
+ writel(ret, port->membase + UART_STAT);
+ }
+
if (status & STAT_BRK_DET) {
port->icount.brk++;
status &= ~(STAT_FRM_ERR | STAT_PAR_ERR);
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 46f4d4cacb6e..0aa666e247d5 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
+#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -1102,8 +1103,6 @@ serial_omap_type(struct uart_port *port)
return up->name;
}
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
{
unsigned int status, tmout = 10000;
@@ -1118,7 +1117,7 @@ static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
if (--tmout == 0)
break;
udelay(1);
- } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+ } while (!uart_lsr_tx_empty(status));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
@@ -1186,7 +1185,7 @@ static void omap_serial_early_putc(struct uart_port *port, unsigned char c)
for (;;) {
status = omap_serial_early_in(port, UART_LSR);
- if ((status & BOTH_EMPTY) == BOTH_EMPTY)
+ if (uart_lsr_tx_empty(status))
break;
cpu_relax();
}
@@ -1325,7 +1324,8 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
/* Enable or disable the rs485 support */
static int
-serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+serial_omap_config_rs485(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int mode;
@@ -1559,6 +1559,13 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
return 0;
}
+static const struct serial_rs485 serial_omap_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int serial_omap_probe(struct platform_device *pdev)
{
struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev);
@@ -1636,6 +1643,7 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.flags = omap_up_info->flags;
up->port.uartclk = omap_up_info->uartclk;
up->port.rs485_config = serial_omap_config_rs485;
+ up->port.rs485_supported = serial_omap_rs485_supported;
if (!up->port.uartclk) {
up->port.uartclk = DEFAULT_CLK_SPEED;
dev_warn(&pdev->dev,
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index 44d20e5a7dd3..888e17e3f25f 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -201,7 +201,7 @@ static void owl_uart_send_chars(struct uart_port *port)
ch = xmit->buf[xmit->tail];
owl_uart_write(port, ch, OWL_UART_TXDAT);
- xmit->tail = (xmit->tail + 1) & (SERIAL_XMIT_SIZE - 1);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 3b26524d48e3..8a9065e4a903 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -3,6 +3,7 @@
*Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*/
#include <linux/kernel.h>
+#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -189,8 +190,6 @@ enum {
#define PCH_UART_HAL_LOOP (PCH_UART_MCR_LOOP)
#define PCH_UART_HAL_AFE (PCH_UART_MCR_AFE)
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
#define DEFAULT_UARTCLK 1843200 /* 1.8432 MHz */
#define CMITC_UARTCLK 192000000 /* 192.0000 MHz */
#define FRI2_64_UARTCLK 64000000 /* 64.0000 MHz */
@@ -1516,7 +1515,7 @@ static void pch_uart_put_poll_char(struct uart_port *port,
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(priv, BOTH_EMPTY);
+ wait_for_xmitr(priv, UART_LSR_BOTH_EMPTY);
iowrite8(ier, priv->membase + UART_IER);
}
#endif /* CONFIG_CONSOLE_POLL */
@@ -1602,7 +1601,7 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(priv, BOTH_EMPTY);
+ wait_for_xmitr(priv, UART_LSR_BOTH_EMPTY);
iowrite8(ier, priv->membase + UART_IER);
if (port_locked)
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index b399aac530fe..f418f1de66b3 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -503,7 +503,7 @@ static int pic32_uart_startup(struct uart_port *port)
if (!sport->irq_fault_name) {
dev_err(port->dev, "%s: kasprintf err!", __func__);
ret = -ENOMEM;
- goto out_done;
+ goto out_disable_clk;
}
irq_set_status_flags(sport->irq_fault, IRQ_NOAUTOEN);
ret = request_irq(sport->irq_fault, pic32_uart_fault_interrupt,
@@ -579,6 +579,8 @@ out_r:
out_f:
free_irq(sport->irq_fault, port);
kfree(sport->irq_fault_name);
+out_disable_clk:
+ clk_disable_unprepare(sport->clk);
out_done:
return ret;
}
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 3133446e806c..f63257b8e872 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -52,7 +52,6 @@
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-#include <asm/dbdma.h>
#include <asm/macio.h>
#else
#include <linux/platform_device.h>
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index e80ba8e10407..9309ffd87c8e 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
+#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/circ_buf.h>
#include <linux/delay.h>
@@ -575,8 +576,6 @@ static struct uart_driver serial_pxa_reg;
#ifdef CONFIG_SERIAL_PXA_CONSOLE
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
/*
* Wait for transmitter & holding register to empty
*/
@@ -594,7 +593,7 @@ static void wait_for_xmitr(struct uart_pxa_port *up)
if (--tmout == 0)
break;
udelay(1);
- } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+ } while (!uart_lsr_tx_empty(status));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index f8f950641ad9..f4698a064a4d 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
+/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
+#define __DISABLE_TRACE_MMIO__
+
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/io.h>
@@ -940,52 +943,63 @@ static int qcom_geni_serial_startup(struct uart_port *uport)
return 0;
}
-static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud,
- unsigned int sampling_rate, unsigned int *clk_div)
+static unsigned long find_clk_rate_in_tol(struct clk *clk, unsigned int desired_clk,
+ unsigned int *clk_div, unsigned int percent_tol)
{
- unsigned long ser_clk;
- unsigned long desired_clk;
- unsigned long freq, prev;
+ unsigned long freq;
unsigned long div, maxdiv;
- int64_t mult;
-
- desired_clk = baud * sampling_rate;
- if (!desired_clk) {
- pr_err("%s: Invalid frequency\n", __func__);
- return 0;
- }
+ u64 mult;
+ unsigned long offset, abs_tol, achieved;
+ abs_tol = div_u64((u64)desired_clk * percent_tol, 100);
maxdiv = CLK_DIV_MSK >> CLK_DIV_SHFT;
- prev = 0;
-
- for (div = 1; div <= maxdiv; div++) {
- mult = div * desired_clk;
- if (mult > ULONG_MAX)
+ div = 1;
+ while (div <= maxdiv) {
+ mult = (u64)div * desired_clk;
+ if (mult != (unsigned long)mult)
break;
- freq = clk_round_rate(clk, (unsigned long)mult);
- if (!(freq % desired_clk)) {
- ser_clk = freq;
- break;
- }
+ offset = div * abs_tol;
+ freq = clk_round_rate(clk, mult - offset);
- if (!prev)
- ser_clk = freq;
- else if (prev == freq)
+ /* Can only get lower if we're done */
+ if (freq < mult - offset)
break;
- prev = freq;
- }
+ /*
+ * Re-calculate div in case rounding skipped rates but we
+ * ended up at a good one, then check for a match.
+ */
+ div = DIV_ROUND_CLOSEST(freq, desired_clk);
+ achieved = DIV_ROUND_CLOSEST(freq, div);
+ if (achieved <= desired_clk + abs_tol &&
+ achieved >= desired_clk - abs_tol) {
+ *clk_div = div;
+ return freq;
+ }
- if (!ser_clk) {
- pr_err("%s: Can't find matching DFS entry for baud %d\n",
- __func__, baud);
- return ser_clk;
+ div = DIV_ROUND_UP(freq, desired_clk);
}
- *clk_div = ser_clk / desired_clk;
- if (!(*clk_div))
- *clk_div = 1;
+ return 0;
+}
+
+static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud,
+ unsigned int sampling_rate, unsigned int *clk_div)
+{
+ unsigned long ser_clk;
+ unsigned long desired_clk;
+
+ desired_clk = baud * sampling_rate;
+ if (!desired_clk)
+ return 0;
+
+ /*
+ * try to find a clock rate within 2% tolerance, then within 5%
+ */
+ ser_clk = find_clk_rate_in_tol(clk, desired_clk, clk_div, 2);
+ if (!ser_clk)
+ ser_clk = find_clk_rate_in_tol(clk, desired_clk, clk_div, 5);
return ser_clk;
}
@@ -1020,8 +1034,15 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
clk_rate = get_clk_div_rate(port->se.clk, baud,
sampling_rate, &clk_div);
- if (!clk_rate)
+ if (!clk_rate) {
+ dev_err(port->se.dev,
+ "Couldn't find suitable clock rate for %u\n",
+ baud * sampling_rate);
goto out_restart_rx;
+ }
+
+ dev_dbg(port->se.dev, "desired_rate-%u, clk_rate-%lu, clk_div-%u\n",
+ baud * sampling_rate, clk_rate, clk_div);
uport->uartclk = clk_rate;
dev_pm_opp_set_rate(uport->dev, clk_rate);
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index f556b4955f59..feb2054aba37 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -353,7 +353,7 @@ static void rda_uart_send_chars(struct uart_port *port)
ch = xmit->buf[xmit->tail];
rda_uart_write(port, ch, RDA_UART_RXTX_BUFFER);
- xmit->tail = (xmit->tail + 1) & (SERIAL_XMIT_SIZE - 1);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 1afe47b62ad5..b7a4b47ce74e 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -48,6 +48,12 @@
#define S3C24XX_SERIAL_MAJOR 204
#define S3C24XX_SERIAL_MINOR 64
+#ifdef CONFIG_ARM64
+#define UART_NR 12
+#else
+#define UART_NR CONFIG_SERIAL_SAMSUNG_UARTS
+#endif
+
#define S3C24XX_TX_PIO 1
#define S3C24XX_TX_DMA 2
#define S3C24XX_RX_PIO 1
@@ -87,7 +93,7 @@ struct s3c24xx_uart_info {
struct s3c24xx_serial_drv_data {
const struct s3c24xx_uart_info info;
const struct s3c2410_uartcfg def_cfg;
- const unsigned int fifosize[CONFIG_SERIAL_SAMSUNG_UARTS];
+ const unsigned int fifosize[UART_NR];
};
struct s3c24xx_uart_dma {
@@ -1011,6 +1017,7 @@ static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int umcon = rd_regl(port, S3C2410_UMCON);
+ unsigned int ucon = rd_regl(port, S3C2410_UCON);
if (mctrl & TIOCM_RTS)
umcon |= S3C2410_UMCOM_RTS_LOW;
@@ -1018,6 +1025,13 @@ static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
umcon &= ~S3C2410_UMCOM_RTS_LOW;
wr_regl(port, S3C2410_UMCON, umcon);
+
+ if (mctrl & TIOCM_LOOP)
+ ucon |= S3C2410_UCON_LOOPBACK;
+ else
+ ucon &= ~S3C2410_UCON_LOOPBACK;
+
+ wr_regl(port, S3C2410_UCON, ucon);
}
static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
@@ -1801,67 +1815,27 @@ static const struct uart_ops apple_s5l_serial_ops = {
static struct uart_driver s3c24xx_uart_drv = {
.owner = THIS_MODULE,
.driver_name = "s3c2410_serial",
- .nr = CONFIG_SERIAL_SAMSUNG_UARTS,
+ .nr = UART_NR,
.cons = S3C24XX_SERIAL_CONSOLE,
.dev_name = S3C24XX_SERIAL_NAME,
.major = S3C24XX_SERIAL_MAJOR,
.minor = S3C24XX_SERIAL_MINOR,
};
-#define __PORT_LOCK_UNLOCKED(i) \
- __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[i].port.lock)
-static struct s3c24xx_uart_port
-s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS] = {
- [0] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(0),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 0,
- }
- },
- [1] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(1),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 1,
- }
- },
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 2
- [2] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(2),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 2,
- }
- },
-#endif
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
- [3] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(3),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 3,
- }
- }
-#endif
-};
-#undef __PORT_LOCK_UNLOCKED
+static struct s3c24xx_uart_port s3c24xx_serial_ports[UART_NR];
+
+static void s3c24xx_serial_init_port_default(int index) {
+ struct uart_port *port = &s3c24xx_serial_ports[index].port;
+
+ spin_lock_init(&port->lock);
+
+ port->iotype = UPIO_MEM;
+ port->uartclk = 0;
+ port->fifosize = 16;
+ port->ops = &s3c24xx_serial_ops;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->line = index;
+}
/* s3c24xx_serial_resetport
*
@@ -2177,6 +2151,8 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
}
ourport = &s3c24xx_serial_ports[index];
+ s3c24xx_serial_init_port_default(index);
+
ourport->drv_data = s3c24xx_get_driver_data(pdev);
if (!ourport->drv_data) {
dev_err(&pdev->dev, "could not find driver data\n");
@@ -2575,7 +2551,7 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
/* is this a valid port */
- if (co->index == -1 || co->index >= CONFIG_SERIAL_SAMSUNG_UARTS)
+ if (co->index == -1 || co->index >= UART_NR)
co->index = 0;
port = &s3c24xx_serial_ports[co->index].port;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 8472bf70477c..259e08cc347c 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1127,7 +1127,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
spin_unlock_irqrestore(&port->lock, flags);
}
-static int sc16is7xx_config_rs485(struct uart_port *port,
+static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
@@ -1143,7 +1143,6 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
return -EINVAL;
}
- port->rs485 = *rs485;
one->config.flags |= SC16IS7XX_RECONF_RS485;
kthread_queue_work(&s->kworker, &one->reg_work);
@@ -1354,6 +1353,12 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
}
#endif
+static const struct serial_rs485 sc16is7xx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1, /* Not supported but keep returning -EINVAL */
+};
+
static int sc16is7xx_probe(struct device *dev,
const struct sc16is7xx_devtype *devtype,
struct regmap *regmap, int irq)
@@ -1456,6 +1461,7 @@ static int sc16is7xx_probe(struct device *dev,
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.uartclk = freq;
s->p[i].port.rs485_config = sc16is7xx_config_rs485;
+ s->p[i].port.rs485_supported = sc16is7xx_rs485_supported;
s->p[i].port.ops = &sc16is7xx_ops;
s->p[i].old_mctrl = 0;
s->p[i].port.line = sc16is7xx_alloc_line();
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index d942ab152f5a..ad4f3567ff90 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -441,7 +441,7 @@ static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
if (lsr & UART_LSR_OE) {
- /* Overrrun error */
+ /* Overrun error */
flag = TTY_OVERRUN;
tup->uport.icount.overrun++;
dev_dbg(tup->uport.dev, "Got overrun errors\n");
@@ -1080,7 +1080,7 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
tup->rx_in_progress = 1;
/*
- * Enable IE_RXS for the receive status interrupts like line errros.
+ * Enable IE_RXS for the receive status interrupts like line errors.
* Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
*
* EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
@@ -1667,6 +1667,7 @@ static int __init tegra_uart_init(void)
node = of_find_matching_node(NULL, tegra_uart_of_match);
if (node)
match = of_match_node(tegra_uart_of_match, node);
+ of_node_put(node);
if (match)
cdata = match->data;
if (cdata)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 3dc926d6c00a..12c87cd201a7 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -97,9 +97,16 @@ static inline struct uart_port *uart_port_check(struct uart_state *state)
return state->uart_port;
}
-/*
- * This routine is used by the interrupt handler to schedule processing in
- * the software interrupt portion of the driver.
+/**
+ * uart_write_wakeup - schedule write processing
+ * @port: port to be processed
+ *
+ * This routine is used by the interrupt handler to schedule processing in the
+ * software interrupt portion of the driver. A driver is expected to call this
+ * function when the number of characters in the transmit buffer have dropped
+ * below a threshold.
+ *
+ * Locking: @port->lock should be held
*/
void uart_write_wakeup(struct uart_port *port)
{
@@ -327,13 +334,16 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
}
/**
- * uart_update_timeout - update per-port FIFO timeout.
- * @port: uart_port structure describing the port
- * @cflag: termios cflag value
- * @baud: speed of the port
+ * uart_update_timeout - update per-port frame timing information
+ * @port: uart_port structure describing the port
+ * @cflag: termios cflag value
+ * @baud: speed of the port
*
- * Set the port FIFO timeout value. The @cflag value should
- * reflect the actual hardware settings.
+ * Set the @port frame timing information from which the FIFO timeout value is
+ * derived. The @cflag value should reflect the actual hardware settings as
+ * number of bits, parity, stop bits and baud rate is taken into account here.
+ *
+ * Locking: caller is expected to take @port->lock
*/
void
uart_update_timeout(struct uart_port *port, unsigned int cflag,
@@ -343,35 +353,30 @@ uart_update_timeout(struct uart_port *port, unsigned int cflag,
u64 frame_time;
frame_time = (u64)size * NSEC_PER_SEC;
- size *= port->fifosize;
-
- /*
- * Figure the timeout to send the above number of bits.
- * Add .02 seconds of slop
- */
- port->timeout = (HZ * size) / baud + HZ/50;
port->frame_time = DIV64_U64_ROUND_UP(frame_time, baud);
}
EXPORT_SYMBOL(uart_update_timeout);
/**
- * uart_get_baud_rate - return baud rate for a particular port
- * @port: uart_port structure describing the port in question.
- * @termios: desired termios settings.
- * @old: old termios (or NULL)
- * @min: minimum acceptable baud rate
- * @max: maximum acceptable baud rate
+ * uart_get_baud_rate - return baud rate for a particular port
+ * @port: uart_port structure describing the port in question.
+ * @termios: desired termios settings
+ * @old: old termios (or %NULL)
+ * @min: minimum acceptable baud rate
+ * @max: maximum acceptable baud rate
+ *
+ * Decode the termios structure into a numeric baud rate, taking account of the
+ * magic 38400 baud rate (with spd_* flags), and mapping the %B0 rate to 9600
+ * baud.
*
- * Decode the termios structure into a numeric baud rate,
- * taking account of the magic 38400 baud rate (with spd_*
- * flags), and mapping the %B0 rate to 9600 baud.
+ * If the new baud rate is invalid, try the @old termios setting. If it's still
+ * invalid, we try 9600 baud.
*
- * If the new baud rate is invalid, try the old termios setting.
- * If it's still invalid, we try 9600 baud.
+ * The @termios structure is updated to reflect the baud rate we're actually
+ * going to be using. Don't do this for the case where B0 is requested ("hang
+ * up").
*
- * Update the @termios structure to reflect the baud rate
- * we're actually going to be using. Don't do this for the case
- * where B0 is requested ("hang up").
+ * Locking: caller dependent
*/
unsigned int
uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
@@ -456,11 +461,17 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
EXPORT_SYMBOL(uart_get_baud_rate);
/**
- * uart_get_divisor - return uart clock divisor
- * @port: uart_port structure describing the port.
- * @baud: desired baud rate
+ * uart_get_divisor - return uart clock divisor
+ * @port: uart_port structure describing the port
+ * @baud: desired baud rate
+ *
+ * Calculate the divisor (baud_base / baud) for the specified @baud,
+ * appropriately rounded.
*
- * Calculate the uart clock divisor for the port.
+ * If 38400 baud and custom divisor is selected, return the custom divisor
+ * instead.
+ *
+ * Locking: caller dependent
*/
unsigned int
uart_get_divisor(struct uart_port *port, unsigned int baud)
@@ -1023,10 +1034,10 @@ static int uart_set_info_user(struct tty_struct *tty, struct serial_struct *ss)
}
/**
- * uart_get_lsr_info - get line status register info
- * @tty: tty associated with the UART
- * @state: UART being queried
- * @value: returned modem value
+ * uart_get_lsr_info - get line status register info
+ * @tty: tty associated with the UART
+ * @state: UART being queried
+ * @value: returned modem value
*/
static int uart_get_lsr_info(struct tty_struct *tty,
struct uart_state *state, unsigned int __user *value)
@@ -1276,6 +1287,126 @@ static int uart_get_icount(struct tty_struct *tty,
return 0;
}
+#define SER_RS485_LEGACY_FLAGS (SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | \
+ SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX | \
+ SER_RS485_TERMINATE_BUS)
+
+static int uart_check_rs485_flags(struct uart_port *port, struct serial_rs485 *rs485)
+{
+ u32 flags = rs485->flags;
+
+ /* Don't return -EINVAL for unsupported legacy flags */
+ flags &= ~SER_RS485_LEGACY_FLAGS;
+
+ /*
+ * For any bit outside of the legacy ones that is not supported by
+ * the driver, return -EINVAL.
+ */
+ if (flags & ~port->rs485_supported.flags)
+ return -EINVAL;
+
+ /* Asking for address w/o addressing mode? */
+ if (!(rs485->flags & SER_RS485_ADDRB) &&
+ (rs485->flags & (SER_RS485_ADDR_RECV|SER_RS485_ADDR_DEST)))
+ return -EINVAL;
+
+ /* Address given but not enabled? */
+ if (!(rs485->flags & SER_RS485_ADDR_RECV) && rs485->addr_recv)
+ return -EINVAL;
+ if (!(rs485->flags & SER_RS485_ADDR_DEST) && rs485->addr_dest)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void uart_sanitize_serial_rs485_delays(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ if (!port->rs485_supported.delay_rts_before_send) {
+ if (rs485->delay_rts_before_send) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay before sending not supported\n",
+ port->name, port->line);
+ }
+ rs485->delay_rts_before_send = 0;
+ } else if (rs485->delay_rts_before_send > RS485_MAX_RTS_DELAY) {
+ rs485->delay_rts_before_send = RS485_MAX_RTS_DELAY;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay before sending clamped to %u ms\n",
+ port->name, port->line, rs485->delay_rts_before_send);
+ }
+
+ if (!port->rs485_supported.delay_rts_after_send) {
+ if (rs485->delay_rts_after_send) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay after sending not supported\n",
+ port->name, port->line);
+ }
+ rs485->delay_rts_after_send = 0;
+ } else if (rs485->delay_rts_after_send > RS485_MAX_RTS_DELAY) {
+ rs485->delay_rts_after_send = RS485_MAX_RTS_DELAY;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay after sending clamped to %u ms\n",
+ port->name, port->line, rs485->delay_rts_after_send);
+ }
+}
+
+static void uart_sanitize_serial_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+{
+ u32 supported_flags = port->rs485_supported.flags;
+
+ if (!(rs485->flags & SER_RS485_ENABLED)) {
+ memset(rs485, 0, sizeof(*rs485));
+ return;
+ }
+
+ /* Pick sane settings if the user hasn't */
+ if ((supported_flags & (SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND)) &&
+ !(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
+ port->name, port->line);
+ rs485->flags |= SER_RS485_RTS_ON_SEND;
+ rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+ supported_flags |= SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND;
+ }
+
+ rs485->flags &= supported_flags;
+
+ uart_sanitize_serial_rs485_delays(port, rs485);
+
+ /* Return clean padding area to userspace */
+ memset(rs485->padding0, 0, sizeof(rs485->padding0));
+ memset(rs485->padding1, 0, sizeof(rs485->padding1));
+}
+
+static void uart_set_rs485_termination(struct uart_port *port,
+ const struct serial_rs485 *rs485)
+{
+ if (!(rs485->flags & SER_RS485_ENABLED))
+ return;
+
+ gpiod_set_value_cansleep(port->rs485_term_gpio,
+ !!(rs485->flags & SER_RS485_TERMINATE_BUS));
+}
+
+int uart_rs485_config(struct uart_port *port)
+{
+ struct serial_rs485 *rs485 = &port->rs485;
+ int ret;
+
+ uart_sanitize_serial_rs485(port, rs485);
+ uart_set_rs485_termination(port, rs485);
+
+ ret = port->rs485_config(port, NULL, rs485);
+ if (ret)
+ memset(rs485, 0, sizeof(*rs485));
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(uart_rs485_config);
+
static int uart_get_rs485_config(struct uart_port *port,
struct serial_rs485 __user *rs485)
{
@@ -1292,7 +1423,7 @@ static int uart_get_rs485_config(struct uart_port *port,
return 0;
}
-static int uart_set_rs485_config(struct uart_port *port,
+static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
struct serial_rs485 __user *rs485_user)
{
struct serial_rs485 rs485;
@@ -1305,34 +1436,14 @@ static int uart_set_rs485_config(struct uart_port *port,
if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
return -EFAULT;
- /* pick sane settings if the user hasn't */
- if (!(rs485.flags & SER_RS485_RTS_ON_SEND) ==
- !(rs485.flags & SER_RS485_RTS_AFTER_SEND)) {
- dev_warn_ratelimited(port->dev,
- "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
- port->name, port->line);
- rs485.flags |= SER_RS485_RTS_ON_SEND;
- rs485.flags &= ~SER_RS485_RTS_AFTER_SEND;
- }
-
- if (rs485.delay_rts_before_send > RS485_MAX_RTS_DELAY) {
- rs485.delay_rts_before_send = RS485_MAX_RTS_DELAY;
- dev_warn_ratelimited(port->dev,
- "%s (%d): RTS delay before sending clamped to %u ms\n",
- port->name, port->line, rs485.delay_rts_before_send);
- }
-
- if (rs485.delay_rts_after_send > RS485_MAX_RTS_DELAY) {
- rs485.delay_rts_after_send = RS485_MAX_RTS_DELAY;
- dev_warn_ratelimited(port->dev,
- "%s (%d): RTS delay after sending clamped to %u ms\n",
- port->name, port->line, rs485.delay_rts_after_send);
- }
- /* Return clean padding area to userspace */
- memset(rs485.padding, 0, sizeof(rs485.padding));
+ ret = uart_check_rs485_flags(port, &rs485);
+ if (ret)
+ return ret;
+ uart_sanitize_serial_rs485(port, &rs485);
+ uart_set_rs485_termination(port, &rs485);
spin_lock_irqsave(&port->lock, flags);
- ret = port->rs485_config(port, &rs485);
+ ret = port->rs485_config(port, &tty->termios, &rs485);
if (!ret)
port->rs485 = rs485;
spin_unlock_irqrestore(&port->lock, flags);
@@ -1441,6 +1552,10 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
if (ret != -ENOIOCTLCMD)
goto out;
+ /* rs485_config requires more locking than others */
+ if (cmd == TIOCGRS485)
+ down_write(&tty->termios_rwsem);
+
mutex_lock(&port->mutex);
uport = uart_port_check(state);
@@ -1464,7 +1579,7 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
break;
case TIOCSRS485:
- ret = uart_set_rs485_config(uport, uarg);
+ ret = uart_set_rs485_config(tty, uport, uarg);
break;
case TIOCSISO7816:
@@ -1481,6 +1596,8 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
}
out_up:
mutex_unlock(&port->mutex);
+ if (cmd == TIOCGRS485)
+ up_write(&tty->termios_rwsem);
out:
return ret;
}
@@ -1628,7 +1745,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
- unsigned long char_time, expire;
+ unsigned long char_time, expire, fifo_timeout;
port = uart_port_ref(state);
if (!port)
@@ -1658,12 +1775,13 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
* amount of time to send the entire FIFO, it probably won't
* ever clear. This assumes the UART isn't doing flow
* control, which is currently the case. Hence, if it ever
- * takes longer than port->timeout, this is probably due to a
+ * takes longer than FIFO timeout, this is probably due to a
* UART bug of some kind. So, we clamp the timeout parameter at
- * 2*port->timeout.
+ * 2 * FIFO timeout.
*/
- if (timeout == 0 || timeout > 2 * port->timeout)
- timeout = 2 * port->timeout;
+ fifo_timeout = uart_fifo_timeout(port);
+ if (timeout == 0 || timeout > 2 * fifo_timeout)
+ timeout = 2 * fifo_timeout;
}
expire = jiffies + timeout;
@@ -1949,11 +2067,11 @@ static void uart_port_spin_lock_init(struct uart_port *port)
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
/**
- * uart_console_write - write a console message to a serial port
- * @port: the port to write the message
- * @s: array of characters
- * @count: number of characters in string to write
- * @putchar: function to write character to port
+ * uart_console_write - write a console message to a serial port
+ * @port: the port to write the message
+ * @s: array of characters
+ * @count: number of characters in string to write
+ * @putchar: function to write character to port
*/
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
@@ -1969,10 +2087,15 @@ void uart_console_write(struct uart_port *port, const char *s,
}
EXPORT_SYMBOL_GPL(uart_console_write);
-/*
- * Check whether an invalid uart number has been specified, and
- * if so, search for the first available port that does have
- * console support.
+/**
+ * uart_get_console - get uart port for console
+ * @ports: ports to search in
+ * @nr: number of @ports
+ * @co: console to search for
+ * Returns: uart_port for the console @co
+ *
+ * Check whether an invalid uart number has been specified (as @co->index), and
+ * if so, search for the first available port that does have console support.
*/
struct uart_port * __init
uart_get_console(struct uart_port *ports, int nr, struct console *co)
@@ -1992,24 +2115,23 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co)
}
/**
- * uart_parse_earlycon - Parse earlycon options
- * @p: ptr to 2nd field (ie., just beyond '<name>,')
- * @iotype: ptr for decoded iotype (out)
- * @addr: ptr for decoded mapbase/iobase (out)
- * @options: ptr for <options> field; NULL if not present (out)
- *
- * Decodes earlycon kernel command line parameters of the form
- * earlycon=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
- * console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
+ * uart_parse_earlycon - Parse earlycon options
+ * @p: ptr to 2nd field (ie., just beyond '<name>,')
+ * @iotype: ptr for decoded iotype (out)
+ * @addr: ptr for decoded mapbase/iobase (out)
+ * @options: ptr for <options> field; %NULL if not present (out)
*
- * The optional form
+ * Decodes earlycon kernel command line parameters of the form:
+ * * earlycon=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
+ * * console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
*
- * earlycon=<name>,0x<addr>,<options>
- * console=<name>,0x<addr>,<options>
+ * The optional form:
+ * * earlycon=<name>,0x<addr>,<options>
+ * * console=<name>,0x<addr>,<options>
*
- * is also accepted; the returned @iotype will be UPIO_MEM.
+ * is also accepted; the returned @iotype will be %UPIO_MEM.
*
- * Returns 0 on success or -EINVAL on failure
+ * Returns: 0 on success or -%EINVAL on failure
*/
int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
char **options)
@@ -2054,16 +2176,16 @@ int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
EXPORT_SYMBOL_GPL(uart_parse_earlycon);
/**
- * uart_parse_options - Parse serial port baud/parity/bits/flow control.
- * @options: pointer to option string
- * @baud: pointer to an 'int' variable for the baud rate.
- * @parity: pointer to an 'int' variable for the parity.
- * @bits: pointer to an 'int' variable for the number of data bits.
- * @flow: pointer to an 'int' variable for the flow control character.
+ * uart_parse_options - Parse serial port baud/parity/bits/flow control.
+ * @options: pointer to option string
+ * @baud: pointer to an 'int' variable for the baud rate.
+ * @parity: pointer to an 'int' variable for the parity.
+ * @bits: pointer to an 'int' variable for the number of data bits.
+ * @flow: pointer to an 'int' variable for the flow control character.
*
- * uart_parse_options decodes a string containing the serial console
- * options. The format of the string is <baud><parity><bits><flow>,
- * eg: 115200n8r
+ * uart_parse_options() decodes a string containing the serial console
+ * options. The format of the string is <baud><parity><bits><flow>,
+ * eg: 115200n8r
*/
void
uart_parse_options(const char *options, int *baud, int *parity,
@@ -2084,13 +2206,13 @@ uart_parse_options(const char *options, int *baud, int *parity,
EXPORT_SYMBOL_GPL(uart_parse_options);
/**
- * uart_set_options - setup the serial console parameters
- * @port: pointer to the serial ports uart_port structure
- * @co: console pointer
- * @baud: baud rate
- * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even)
- * @bits: number of data bits
- * @flow: flow control character - 'r' (rts)
+ * uart_set_options - setup the serial console parameters
+ * @port: pointer to the serial ports uart_port structure
+ * @co: console pointer
+ * @baud: baud rate
+ * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even)
+ * @bits: number of data bits
+ * @flow: flow control character - 'r' (rts)
*/
int
uart_set_options(struct uart_port *port, struct console *co,
@@ -2577,17 +2699,19 @@ static const struct tty_port_operations uart_port_ops = {
};
/**
- * uart_register_driver - register a driver with the uart core layer
- * @drv: low level driver structure
+ * uart_register_driver - register a driver with the uart core layer
+ * @drv: low level driver structure
+ *
+ * Register a uart driver with the core driver. We in turn register with the
+ * tty layer, and initialise the core driver per-port state.
*
- * Register a uart driver with the core driver. We in turn register
- * with the tty layer, and initialise the core driver per-port state.
+ * We have a proc file in /proc/tty/driver which is named after the normal
+ * driver.
*
- * We have a proc file in /proc/tty/driver which is named after the
- * normal driver.
+ * @drv->port should be %NULL, and the per-port structures should be registered
+ * using uart_add_one_port() after this call has succeeded.
*
- * drv->port should be NULL, and the per-port structures should be
- * registered using uart_add_one_port after this call has succeeded.
+ * Locking: none, Interrupts: enabled
*/
int uart_register_driver(struct uart_driver *drv)
{
@@ -2651,13 +2775,14 @@ out:
EXPORT_SYMBOL(uart_register_driver);
/**
- * uart_unregister_driver - remove a driver from the uart core layer
- * @drv: low level driver structure
+ * uart_unregister_driver - remove a driver from the uart core layer
+ * @drv: low level driver structure
*
- * Remove all references to a driver from the core driver. The low
- * level driver must have removed all its ports via the
- * uart_remove_one_port() if it registered them with uart_add_one_port().
- * (ie, drv->port == NULL)
+ * Remove all references to a driver from the core driver. The low level
+ * driver must have removed all its ports via the uart_remove_one_port() if it
+ * registered them with uart_add_one_port(). (I.e. @drv->port is %NULL.)
+ *
+ * Locking: none, Interrupts: enabled
*/
void uart_unregister_driver(struct uart_driver *drv)
{
@@ -2906,16 +3031,15 @@ static const struct attribute_group tty_dev_attr_group = {
};
/**
- * uart_add_one_port - attach a driver-defined port structure
- * @drv: pointer to the uart low level driver structure for this port
- * @uport: uart port structure to use for this port.
+ * uart_add_one_port - attach a driver-defined port structure
+ * @drv: pointer to the uart low level driver structure for this port
+ * @uport: uart port structure to use for this port.
*
- * Context: task context, might sleep
+ * Context: task context, might sleep
*
- * This allows the driver to register its own uart_port structure
- * with the core driver. The main purpose is to allow the low
- * level uart drivers to expand uart_port, rather than having yet
- * more levels of structures.
+ * This allows the driver @drv to register its own uart_port structure with the
+ * core driver. The main purpose is to allow the low level uart drivers to
+ * expand uart_port, rather than having yet more levels of structures.
*/
int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
{
@@ -3010,15 +3134,14 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
EXPORT_SYMBOL(uart_add_one_port);
/**
- * uart_remove_one_port - detach a driver defined port structure
- * @drv: pointer to the uart low level driver structure for this port
- * @uport: uart port structure for this port
+ * uart_remove_one_port - detach a driver defined port structure
+ * @drv: pointer to the uart low level driver structure for this port
+ * @uport: uart port structure for this port
*
- * Context: task context, might sleep
+ * Context: task context, might sleep
*
- * This unhooks (and hangs up) the specified port structure from the
- * core driver. No further calls will be made to the low-level code
- * for this port.
+ * This unhooks (and hangs up) the specified port structure from the core
+ * driver. No further calls will be made to the low-level code for this port.
*/
int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
{
@@ -3090,8 +3213,13 @@ out:
}
EXPORT_SYMBOL(uart_remove_one_port);
-/*
- * Are the two ports equivalent?
+/**
+ * uart_match_port - are the two ports equivalent?
+ * @port1: first port
+ * @port2: second port
+ *
+ * This utility function can be used to determine whether two uart_port
+ * structures describe the same port.
*/
bool uart_match_port(const struct uart_port *port1,
const struct uart_port *port2)
@@ -3119,11 +3247,11 @@ bool uart_match_port(const struct uart_port *port1,
EXPORT_SYMBOL(uart_match_port);
/**
- * uart_handle_dcd_change - handle a change of carrier detect state
- * @uport: uart_port structure for the open port
- * @status: new carrier detect status, nonzero if active
+ * uart_handle_dcd_change - handle a change of carrier detect state
+ * @uport: uart_port structure for the open port
+ * @status: new carrier detect status, nonzero if active
*
- * Caller must hold uport->lock
+ * Caller must hold uport->lock.
*/
void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
{
@@ -3154,11 +3282,11 @@ void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
EXPORT_SYMBOL_GPL(uart_handle_dcd_change);
/**
- * uart_handle_cts_change - handle a change of clear-to-send state
- * @uport: uart_port structure for the open port
- * @status: new clear to send status, nonzero if active
+ * uart_handle_cts_change - handle a change of clear-to-send state
+ * @uport: uart_port structure for the open port
+ * @status: new clear to send status, nonzero if active
*
- * Caller must hold uport->lock
+ * Caller must hold uport->lock.
*/
void uart_handle_cts_change(struct uart_port *uport, unsigned int status)
{
@@ -3229,15 +3357,15 @@ static void uart_sysrq_on(struct work_struct *w)
static DECLARE_WORK(sysrq_enable_work, uart_sysrq_on);
/**
- * uart_try_toggle_sysrq - Enables SysRq from serial line
- * @port: uart_port structure where char(s) after BREAK met
- * @ch: new character in the sequence after received BREAK
+ * uart_try_toggle_sysrq - Enables SysRq from serial line
+ * @port: uart_port structure where char(s) after BREAK met
+ * @ch: new character in the sequence after received BREAK
*
- * Enables magic SysRq when the required sequence is met on port
- * (see CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE).
+ * Enables magic SysRq when the required sequence is met on port
+ * (see CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE).
*
- * Returns false if @ch is out of enabling sequence and should be
- * handled some other way, true if @ch was consumed.
+ * Returns: %false if @ch is out of enabling sequence and should be
+ * handled some other way, %true if @ch was consumed.
*/
bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
{
@@ -3289,6 +3417,8 @@ int uart_get_rs485_mode(struct uart_port *port)
rs485conf->delay_rts_after_send = 0;
}
+ uart_sanitize_serial_rs485_delays(port, rs485conf);
+
/*
* Clear full-duplex and enabled flags, set RTS polarity to active high
* to get to a defined state with the following properties:
@@ -3321,10 +3451,20 @@ int uart_get_rs485_mode(struct uart_port *port)
port->rs485_term_gpio = NULL;
return dev_err_probe(dev, ret, "Cannot get rs485-term-gpios\n");
}
+ if (port->rs485_term_gpio)
+ port->rs485_supported.flags |= SER_RS485_TERMINATE_BUS;
return 0;
}
EXPORT_SYMBOL_GPL(uart_get_rs485_mode);
+/* Compile-time assertions for serial_rs485 layout */
+static_assert(offsetof(struct serial_rs485, padding) ==
+ (offsetof(struct serial_rs485, delay_rts_after_send) + sizeof(__u32)));
+static_assert(offsetof(struct serial_rs485, padding1) ==
+ offsetof(struct serial_rs485, padding[1]));
+static_assert((offsetof(struct serial_rs485, padding[4]) + sizeof(__u32)) ==
+ sizeof(struct serial_rs485));
+
MODULE_DESCRIPTION("Serial driver core");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 1663b3afc3a0..7d5aaa8d422b 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -42,6 +42,13 @@ static bool mctrl_gpio_flags_is_dir_out(unsigned int idx)
return mctrl_gpios_desc[idx].flags & GPIOD_FLAGS_BIT_DIR_OUT;
}
+/**
+ * mctrl_gpio_set - set gpios according to mctrl state
+ * @gpios: gpios to set
+ * @mctrl: state to set
+ *
+ * Set the gpios according to the mctrl state.
+ */
void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
{
enum mctrl_gpio_idx i;
@@ -63,6 +70,12 @@ void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_set);
+/**
+ * mctrl_gpio_to_gpiod - obtain gpio_desc of modem line index
+ * @gpios: gpios to look into
+ * @gidx: index of the modem line
+ * Returns: the gpio_desc structure associated to the modem line index
+ */
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx)
{
@@ -73,6 +86,14 @@ struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
}
EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod);
+/**
+ * mctrl_gpio_get - update mctrl with the gpios values.
+ * @gpios: gpios to get the info from
+ * @mctrl: mctrl to set
+ * Returns: modified mctrl (the same value as in @mctrl)
+ *
+ * Update mctrl with the gpios values.
+ */
unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
{
enum mctrl_gpio_idx i;
@@ -189,6 +210,17 @@ static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context)
return IRQ_HANDLED;
}
+/**
+ * mctrl_gpio_init - initialize uart gpios
+ * @port: port to initialize gpios for
+ * @idx: index of the gpio in the @port's device
+ *
+ * This will get the {cts,rts,...}-gpios from device tree if they are present
+ * and request them, set direction etc, and return an allocated structure.
+ * `devm_*` functions are used, so there's no need to call mctrl_gpio_free().
+ * As this sets up the irq handling, make sure to not handle changes to the
+ * gpio input lines in your driver, too.
+ */
struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
{
struct mctrl_gpios *gpios;
@@ -235,6 +267,14 @@ struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_init);
+/**
+ * mctrl_gpio_free - explicitly free uart gpios
+ * @dev: uart port's device
+ * @gpios: gpios structure to be freed
+ *
+ * This will free the requested gpios in mctrl_gpio_init(). As `devm_*`
+ * functions are used, there's generally no need to call this function.
+ */
void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
@@ -253,6 +293,10 @@ void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_free);
+/**
+ * mctrl_gpio_enable_ms - enable irqs and handling of changes to the ms lines
+ * @gpios: gpios to enable
+ */
void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
@@ -278,6 +322,10 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_enable_ms);
+/**
+ * mctrl_gpio_disable_ms - disable irqs and handling of changes to the ms lines
+ * @gpios: gpios to disable
+ */
void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index c0869b080cc3..5c3a07546a58 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -4,16 +4,6 @@
* Copyright (C) 2018 Paul Walmsley <paul@pwsan.com>
* Copyright (C) 2018-2019 SiFive
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Based partially on:
* - drivers/tty/serial/pxa.c
* - drivers/tty/serial/amba-pl011.c
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index 1b0da603ab54..cce42f4c9bc2 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -17,7 +17,6 @@
#include <linux/tty_flip.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
-#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 0973b03eeeaa..2c85dbf165c4 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -35,6 +35,75 @@
#include "serial_mctrl_gpio.h"
#include "stm32-usart.h"
+
+/* Register offsets */
+static struct stm32_usart_info stm32f4_info = {
+ .ofs = {
+ .isr = 0x00,
+ .rdr = 0x04,
+ .tdr = 0x04,
+ .brr = 0x08,
+ .cr1 = 0x0c,
+ .cr2 = 0x10,
+ .cr3 = 0x14,
+ .gtpr = 0x18,
+ .rtor = UNDEF_REG,
+ .rqr = UNDEF_REG,
+ .icr = UNDEF_REG,
+ },
+ .cfg = {
+ .uart_enable_bit = 13,
+ .has_7bits_data = false,
+ .fifosize = 1,
+ }
+};
+
+static struct stm32_usart_info stm32f7_info = {
+ .ofs = {
+ .cr1 = 0x00,
+ .cr2 = 0x04,
+ .cr3 = 0x08,
+ .brr = 0x0c,
+ .gtpr = 0x10,
+ .rtor = 0x14,
+ .rqr = 0x18,
+ .isr = 0x1c,
+ .icr = 0x20,
+ .rdr = 0x24,
+ .tdr = 0x28,
+ },
+ .cfg = {
+ .uart_enable_bit = 0,
+ .has_7bits_data = true,
+ .has_swap = true,
+ .fifosize = 1,
+ }
+};
+
+static struct stm32_usart_info stm32h7_info = {
+ .ofs = {
+ .cr1 = 0x00,
+ .cr2 = 0x04,
+ .cr3 = 0x08,
+ .brr = 0x0c,
+ .gtpr = 0x10,
+ .rtor = 0x14,
+ .rqr = 0x18,
+ .isr = 0x1c,
+ .icr = 0x20,
+ .rdr = 0x24,
+ .tdr = 0x28,
+ },
+ .cfg = {
+ .uart_enable_bit = 0,
+ .has_7bits_data = true,
+ .has_swap = true,
+ .has_wakeup = true,
+ .has_fifo = true,
+ .fifosize = 16,
+ }
+};
+
static void stm32_usart_stop_tx(struct uart_port *port);
static void stm32_usart_transmit_chars(struct uart_port *port);
static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
@@ -99,7 +168,7 @@ static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
*cr1 |= rs485_deat_dedt;
}
-static int stm32_usart_config_rs485(struct uart_port *port,
+static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -1377,6 +1446,13 @@ static void stm32_usart_deinit_port(struct stm32_port *stm32port)
clk_disable_unprepare(stm32port->clk);
}
+static const struct serial_rs485 stm32_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int stm32_usart_init_port(struct stm32_port *stm32port,
struct platform_device *pdev)
{
@@ -1396,6 +1472,7 @@ static int stm32_usart_init_port(struct stm32_port *stm32port,
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
port->irq = irq;
port->rs485_config = stm32_usart_config_rs485;
+ port->rs485_supported = stm32_rs485_supported;
ret = stm32_usart_init_rs485(port, pdev);
if (ret)
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index ee69c203b926..0ec41a732c88 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -38,74 +38,6 @@ struct stm32_usart_info {
#define UNDEF_REG 0xff
-/* Register offsets */
-struct stm32_usart_info stm32f4_info = {
- .ofs = {
- .isr = 0x00,
- .rdr = 0x04,
- .tdr = 0x04,
- .brr = 0x08,
- .cr1 = 0x0c,
- .cr2 = 0x10,
- .cr3 = 0x14,
- .gtpr = 0x18,
- .rtor = UNDEF_REG,
- .rqr = UNDEF_REG,
- .icr = UNDEF_REG,
- },
- .cfg = {
- .uart_enable_bit = 13,
- .has_7bits_data = false,
- .fifosize = 1,
- }
-};
-
-struct stm32_usart_info stm32f7_info = {
- .ofs = {
- .cr1 = 0x00,
- .cr2 = 0x04,
- .cr3 = 0x08,
- .brr = 0x0c,
- .gtpr = 0x10,
- .rtor = 0x14,
- .rqr = 0x18,
- .isr = 0x1c,
- .icr = 0x20,
- .rdr = 0x24,
- .tdr = 0x28,
- },
- .cfg = {
- .uart_enable_bit = 0,
- .has_7bits_data = true,
- .has_swap = true,
- .fifosize = 1,
- }
-};
-
-struct stm32_usart_info stm32h7_info = {
- .ofs = {
- .cr1 = 0x00,
- .cr2 = 0x04,
- .cr3 = 0x08,
- .brr = 0x0c,
- .gtpr = 0x10,
- .rtor = 0x14,
- .rqr = 0x18,
- .isr = 0x1c,
- .icr = 0x20,
- .rdr = 0x24,
- .tdr = 0x28,
- },
- .cfg = {
- .uart_enable_bit = 0,
- .has_7bits_data = true,
- .has_swap = true,
- .has_wakeup = true,
- .has_fifo = true,
- .fifosize = 16,
- }
-};
-
/* USART_SR (F4) / USART_ISR (F7) */
#define USART_SR_PE BIT(0)
#define USART_SR_FE BIT(1)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index fff50b5b82eb..84d545e5a8c7 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1249,8 +1249,6 @@ static int sunsu_kbd_ms_init(struct uart_sunsu_port *up)
#ifdef CONFIG_SERIAL_SUNSU_CONSOLE
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
/*
* Wait for transmitter & holding register to empty
*/
@@ -1268,7 +1266,7 @@ static void wait_for_xmitr(struct uart_sunsu_port *up)
if (--tmout == 0)
break;
udelay(1);
- } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+ } while (!uart_lsr_tx_empty(status));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 6000853973c1..3cc9ef08455c 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1137,6 +1137,8 @@ static unsigned int soc_info(unsigned int *rev_h, unsigned int *rev_l)
/* No compatible property, so try the name. */
soc_string = np->name;
+ of_node_put(np);
+
/* Extract the SOC number from the "PowerPC," string */
if ((sscanf(soc_string, "PowerPC,%u", &soc) != 1) || !soc)
return 0;
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
deleted file mode 100644
index e0bf003ca3a1..000000000000
--- a/drivers/tty/serial/vr41xx_siu.c
+++ /dev/null
@@ -1,934 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Driver for NEC VR4100 series Serial Interface Unit.
- *
- * Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org>
- *
- * Based on drivers/serial/8250.c, by Russell King.
- */
-
-#include <linux/console.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <linux/serial_reg.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-
-#include <linux/io.h>
-#include <asm/vr41xx/siu.h>
-#include <asm/vr41xx/vr41xx.h>
-
-#define SIU_BAUD_BASE 1152000
-#define SIU_MAJOR 204
-#define SIU_MINOR_BASE 82
-
-#define RX_MAX_COUNT 256
-#define TX_MAX_COUNT 15
-
-#define SIUIRSEL 0x08
- #define TMICMODE 0x20
- #define TMICTX 0x10
- #define IRMSEL 0x0c
- #define IRMSEL_HP 0x08
- #define IRMSEL_TEMIC 0x04
- #define IRMSEL_SHARP 0x00
- #define IRUSESEL 0x02
- #define SIRSEL 0x01
-
-static struct uart_port siu_uart_ports[SIU_PORTS_MAX] = {
- [0 ... SIU_PORTS_MAX-1] = {
- .lock = __SPIN_LOCK_UNLOCKED(siu_uart_ports->lock),
- .irq = 0,
- },
-};
-
-#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
-static uint8_t lsr_break_flag[SIU_PORTS_MAX];
-#endif
-
-#define siu_read(port, offset) readb((port)->membase + (offset))
-#define siu_write(port, offset, value) writeb((value), (port)->membase + (offset))
-
-void vr41xx_select_siu_interface(siu_interface_t interface)
-{
- struct uart_port *port;
- unsigned long flags;
- uint8_t irsel;
-
- port = &siu_uart_ports[0];
-
- spin_lock_irqsave(&port->lock, flags);
-
- irsel = siu_read(port, SIUIRSEL);
- if (interface == SIU_INTERFACE_IRDA)
- irsel |= SIRSEL;
- else
- irsel &= ~SIRSEL;
- siu_write(port, SIUIRSEL, irsel);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-EXPORT_SYMBOL_GPL(vr41xx_select_siu_interface);
-
-void vr41xx_use_irda(irda_use_t use)
-{
- struct uart_port *port;
- unsigned long flags;
- uint8_t irsel;
-
- port = &siu_uart_ports[0];
-
- spin_lock_irqsave(&port->lock, flags);
-
- irsel = siu_read(port, SIUIRSEL);
- if (use == FIR_USE_IRDA)
- irsel |= IRUSESEL;
- else
- irsel &= ~IRUSESEL;
- siu_write(port, SIUIRSEL, irsel);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-EXPORT_SYMBOL_GPL(vr41xx_use_irda);
-
-void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed)
-{
- struct uart_port *port;
- unsigned long flags;
- uint8_t irsel;
-
- port = &siu_uart_ports[0];
-
- spin_lock_irqsave(&port->lock, flags);
-
- irsel = siu_read(port, SIUIRSEL);
- irsel &= ~(IRMSEL | TMICTX | TMICMODE);
- switch (module) {
- case SHARP_IRDA:
- irsel |= IRMSEL_SHARP;
- break;
- case TEMIC_IRDA:
- irsel |= IRMSEL_TEMIC | TMICMODE;
- if (speed == IRDA_TX_4MBPS)
- irsel |= TMICTX;
- break;
- case HP_IRDA:
- irsel |= IRMSEL_HP;
- break;
- default:
- break;
- }
- siu_write(port, SIUIRSEL, irsel);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-EXPORT_SYMBOL_GPL(vr41xx_select_irda_module);
-
-static inline void siu_clear_fifo(struct uart_port *port)
-{
- siu_write(port, UART_FCR, UART_FCR_ENABLE_FIFO);
- siu_write(port, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT);
- siu_write(port, UART_FCR, 0);
-}
-
-static inline unsigned long siu_port_size(struct uart_port *port)
-{
- switch (port->type) {
- case PORT_VR41XX_SIU:
- return 11UL;
- case PORT_VR41XX_DSIU:
- return 8UL;
- }
-
- return 0;
-}
-
-static inline unsigned int siu_check_type(struct uart_port *port)
-{
- if (port->line == 0)
- return PORT_VR41XX_SIU;
- if (port->line == 1 && port->irq)
- return PORT_VR41XX_DSIU;
-
- return PORT_UNKNOWN;
-}
-
-static inline const char *siu_type_name(struct uart_port *port)
-{
- switch (port->type) {
- case PORT_VR41XX_SIU:
- return "SIU";
- case PORT_VR41XX_DSIU:
- return "DSIU";
- }
-
- return NULL;
-}
-
-static unsigned int siu_tx_empty(struct uart_port *port)
-{
- uint8_t lsr;
-
- lsr = siu_read(port, UART_LSR);
- if (lsr & UART_LSR_TEMT)
- return TIOCSER_TEMT;
-
- return 0;
-}
-
-static void siu_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- uint8_t mcr = 0;
-
- if (mctrl & TIOCM_DTR)
- mcr |= UART_MCR_DTR;
- if (mctrl & TIOCM_RTS)
- mcr |= UART_MCR_RTS;
- if (mctrl & TIOCM_OUT1)
- mcr |= UART_MCR_OUT1;
- if (mctrl & TIOCM_OUT2)
- mcr |= UART_MCR_OUT2;
- if (mctrl & TIOCM_LOOP)
- mcr |= UART_MCR_LOOP;
-
- siu_write(port, UART_MCR, mcr);
-}
-
-static unsigned int siu_get_mctrl(struct uart_port *port)
-{
- uint8_t msr;
- unsigned int mctrl = 0;
-
- msr = siu_read(port, UART_MSR);
- if (msr & UART_MSR_DCD)
- mctrl |= TIOCM_CAR;
- if (msr & UART_MSR_RI)
- mctrl |= TIOCM_RNG;
- if (msr & UART_MSR_DSR)
- mctrl |= TIOCM_DSR;
- if (msr & UART_MSR_CTS)
- mctrl |= TIOCM_CTS;
-
- return mctrl;
-}
-
-static void siu_stop_tx(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier &= ~UART_IER_THRI;
- siu_write(port, UART_IER, ier);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_start_tx(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier |= UART_IER_THRI;
- siu_write(port, UART_IER, ier);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_stop_rx(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier &= ~UART_IER_RLSI;
- siu_write(port, UART_IER, ier);
-
- port->read_status_mask &= ~UART_LSR_DR;
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_enable_ms(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier |= UART_IER_MSI;
- siu_write(port, UART_IER, ier);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_break_ctl(struct uart_port *port, int ctl)
-{
- unsigned long flags;
- uint8_t lcr;
-
- spin_lock_irqsave(&port->lock, flags);
-
- lcr = siu_read(port, UART_LCR);
- if (ctl == -1)
- lcr |= UART_LCR_SBC;
- else
- lcr &= ~UART_LCR_SBC;
- siu_write(port, UART_LCR, lcr);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static inline void receive_chars(struct uart_port *port, uint8_t *status)
-{
- uint8_t lsr, ch;
- char flag;
- int max_count = RX_MAX_COUNT;
-
- lsr = *status;
-
- do {
- ch = siu_read(port, UART_RX);
- port->icount.rx++;
- flag = TTY_NORMAL;
-
-#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
- lsr |= lsr_break_flag[port->line];
- lsr_break_flag[port->line] = 0;
-#endif
- if (unlikely(lsr & (UART_LSR_BI | UART_LSR_FE |
- UART_LSR_PE | UART_LSR_OE))) {
- if (lsr & UART_LSR_BI) {
- lsr &= ~(UART_LSR_FE | UART_LSR_PE);
- port->icount.brk++;
-
- if (uart_handle_break(port))
- goto ignore_char;
- }
-
- if (lsr & UART_LSR_FE)
- port->icount.frame++;
- if (lsr & UART_LSR_PE)
- port->icount.parity++;
- if (lsr & UART_LSR_OE)
- port->icount.overrun++;
-
- lsr &= port->read_status_mask;
- if (lsr & UART_LSR_BI)
- flag = TTY_BREAK;
- if (lsr & UART_LSR_FE)
- flag = TTY_FRAME;
- if (lsr & UART_LSR_PE)
- flag = TTY_PARITY;
- }
-
- if (uart_handle_sysrq_char(port, ch))
- goto ignore_char;
-
- uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
-
- ignore_char:
- lsr = siu_read(port, UART_LSR);
- } while ((lsr & UART_LSR_DR) && (max_count-- > 0));
-
- tty_flip_buffer_push(&port->state->port);
-
- *status = lsr;
-}
-
-static inline void check_modem_status(struct uart_port *port)
-{
- uint8_t msr;
-
- msr = siu_read(port, UART_MSR);
- if ((msr & UART_MSR_ANY_DELTA) == 0)
- return;
- if (msr & UART_MSR_DDCD)
- uart_handle_dcd_change(port, msr & UART_MSR_DCD);
- if (msr & UART_MSR_TERI)
- port->icount.rng++;
- if (msr & UART_MSR_DDSR)
- port->icount.dsr++;
- if (msr & UART_MSR_DCTS)
- uart_handle_cts_change(port, msr & UART_MSR_CTS);
-
- wake_up_interruptible(&port->state->port.delta_msr_wait);
-}
-
-static inline void transmit_chars(struct uart_port *port)
-{
- struct circ_buf *xmit;
- int max_count = TX_MAX_COUNT;
-
- xmit = &port->state->xmit;
-
- if (port->x_char) {
- siu_write(port, UART_TX, port->x_char);
- port->icount.tx++;
- port->x_char = 0;
- return;
- }
-
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- siu_stop_tx(port);
- return;
- }
-
- do {
- siu_write(port, UART_TX, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- if (uart_circ_empty(xmit))
- break;
- } while (max_count-- > 0);
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-
- if (uart_circ_empty(xmit))
- siu_stop_tx(port);
-}
-
-static irqreturn_t siu_interrupt(int irq, void *dev_id)
-{
- struct uart_port *port;
- uint8_t iir, lsr;
-
- port = (struct uart_port *)dev_id;
-
- iir = siu_read(port, UART_IIR);
- if (iir & UART_IIR_NO_INT)
- return IRQ_NONE;
-
- lsr = siu_read(port, UART_LSR);
- if (lsr & UART_LSR_DR)
- receive_chars(port, &lsr);
-
- check_modem_status(port);
-
- if (lsr & UART_LSR_THRE)
- transmit_chars(port);
-
- return IRQ_HANDLED;
-}
-
-static int siu_startup(struct uart_port *port)
-{
- int retval;
-
- if (port->membase == NULL)
- return -ENODEV;
-
- siu_clear_fifo(port);
-
- (void)siu_read(port, UART_LSR);
- (void)siu_read(port, UART_RX);
- (void)siu_read(port, UART_IIR);
- (void)siu_read(port, UART_MSR);
-
- if (siu_read(port, UART_LSR) == 0xff)
- return -ENODEV;
-
- retval = request_irq(port->irq, siu_interrupt, 0, siu_type_name(port), port);
- if (retval)
- return retval;
-
- if (port->type == PORT_VR41XX_DSIU)
- vr41xx_enable_dsiuint(DSIUINT_ALL);
-
- siu_write(port, UART_LCR, UART_LCR_WLEN8);
-
- spin_lock_irq(&port->lock);
- siu_set_mctrl(port, port->mctrl);
- spin_unlock_irq(&port->lock);
-
- siu_write(port, UART_IER, UART_IER_RLSI | UART_IER_RDI);
-
- (void)siu_read(port, UART_LSR);
- (void)siu_read(port, UART_RX);
- (void)siu_read(port, UART_IIR);
- (void)siu_read(port, UART_MSR);
-
- return 0;
-}
-
-static void siu_shutdown(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t lcr;
-
- siu_write(port, UART_IER, 0);
-
- spin_lock_irqsave(&port->lock, flags);
-
- port->mctrl &= ~TIOCM_OUT2;
- siu_set_mctrl(port, port->mctrl);
-
- spin_unlock_irqrestore(&port->lock, flags);
-
- lcr = siu_read(port, UART_LCR);
- lcr &= ~UART_LCR_SBC;
- siu_write(port, UART_LCR, lcr);
-
- siu_clear_fifo(port);
-
- (void)siu_read(port, UART_RX);
-
- if (port->type == PORT_VR41XX_DSIU)
- vr41xx_disable_dsiuint(DSIUINT_ALL);
-
- free_irq(port->irq, port);
-}
-
-static void siu_set_termios(struct uart_port *port, struct ktermios *new,
- struct ktermios *old)
-{
- tcflag_t c_cflag, c_iflag;
- uint8_t lcr, fcr, ier;
- unsigned int baud, quot;
- unsigned long flags;
-
- c_cflag = new->c_cflag;
- lcr = UART_LCR_WLEN(tty_get_char_size(c_cflag));
-
- if (c_cflag & CSTOPB)
- lcr |= UART_LCR_STOP;
- if (c_cflag & PARENB)
- lcr |= UART_LCR_PARITY;
- if ((c_cflag & PARODD) != PARODD)
- lcr |= UART_LCR_EPAR;
- if (c_cflag & CMSPAR)
- lcr |= UART_LCR_SPAR;
-
- baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16);
- quot = uart_get_divisor(port, baud);
-
- fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10;
-
- spin_lock_irqsave(&port->lock, flags);
-
- uart_update_timeout(port, c_cflag, baud);
-
- c_iflag = new->c_iflag;
-
- port->read_status_mask = UART_LSR_THRE | UART_LSR_OE | UART_LSR_DR;
- if (c_iflag & INPCK)
- port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (c_iflag & (IGNBRK | BRKINT | PARMRK))
- port->read_status_mask |= UART_LSR_BI;
-
- port->ignore_status_mask = 0;
- if (c_iflag & IGNPAR)
- port->ignore_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (c_iflag & IGNBRK) {
- port->ignore_status_mask |= UART_LSR_BI;
- if (c_iflag & IGNPAR)
- port->ignore_status_mask |= UART_LSR_OE;
- }
-
- if ((c_cflag & CREAD) == 0)
- port->ignore_status_mask |= UART_LSR_DR;
-
- ier = siu_read(port, UART_IER);
- ier &= ~UART_IER_MSI;
- if (UART_ENABLE_MS(port, c_cflag))
- ier |= UART_IER_MSI;
- siu_write(port, UART_IER, ier);
-
- siu_write(port, UART_LCR, lcr | UART_LCR_DLAB);
-
- siu_write(port, UART_DLL, (uint8_t)quot);
- siu_write(port, UART_DLM, (uint8_t)(quot >> 8));
-
- siu_write(port, UART_LCR, lcr);
-
- siu_write(port, UART_FCR, fcr);
-
- siu_set_mctrl(port, port->mctrl);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_pm(struct uart_port *port, unsigned int state, unsigned int oldstate)
-{
- switch (state) {
- case 0:
- switch (port->type) {
- case PORT_VR41XX_SIU:
- vr41xx_supply_clock(SIU_CLOCK);
- break;
- case PORT_VR41XX_DSIU:
- vr41xx_supply_clock(DSIU_CLOCK);
- break;
- }
- break;
- case 3:
- switch (port->type) {
- case PORT_VR41XX_SIU:
- vr41xx_mask_clock(SIU_CLOCK);
- break;
- case PORT_VR41XX_DSIU:
- vr41xx_mask_clock(DSIU_CLOCK);
- break;
- }
- break;
- }
-}
-
-static const char *siu_type(struct uart_port *port)
-{
- return siu_type_name(port);
-}
-
-static void siu_release_port(struct uart_port *port)
-{
- unsigned long size;
-
- if (port->flags & UPF_IOREMAP) {
- iounmap(port->membase);
- port->membase = NULL;
- }
-
- size = siu_port_size(port);
- release_mem_region(port->mapbase, size);
-}
-
-static int siu_request_port(struct uart_port *port)
-{
- unsigned long size;
- struct resource *res;
-
- size = siu_port_size(port);
- res = request_mem_region(port->mapbase, size, siu_type_name(port));
- if (res == NULL)
- return -EBUSY;
-
- if (port->flags & UPF_IOREMAP) {
- port->membase = ioremap(port->mapbase, size);
- if (port->membase == NULL) {
- release_resource(res);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-static void siu_config_port(struct uart_port *port, int flags)
-{
- if (flags & UART_CONFIG_TYPE) {
- port->type = siu_check_type(port);
- (void)siu_request_port(port);
- }
-}
-
-static int siu_verify_port(struct uart_port *port, struct serial_struct *serial)
-{
- if (port->type != PORT_VR41XX_SIU && port->type != PORT_VR41XX_DSIU)
- return -EINVAL;
- if (port->irq != serial->irq)
- return -EINVAL;
- if (port->iotype != serial->io_type)
- return -EINVAL;
- if (port->mapbase != (unsigned long)serial->iomem_base)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct uart_ops siu_uart_ops = {
- .tx_empty = siu_tx_empty,
- .set_mctrl = siu_set_mctrl,
- .get_mctrl = siu_get_mctrl,
- .stop_tx = siu_stop_tx,
- .start_tx = siu_start_tx,
- .stop_rx = siu_stop_rx,
- .enable_ms = siu_enable_ms,
- .break_ctl = siu_break_ctl,
- .startup = siu_startup,
- .shutdown = siu_shutdown,
- .set_termios = siu_set_termios,
- .pm = siu_pm,
- .type = siu_type,
- .release_port = siu_release_port,
- .request_port = siu_request_port,
- .config_port = siu_config_port,
- .verify_port = siu_verify_port,
-};
-
-static int siu_init_ports(struct platform_device *pdev)
-{
- struct uart_port *port;
- struct resource *res;
- int *type = dev_get_platdata(&pdev->dev);
- int i;
-
- if (!type)
- return 0;
-
- port = siu_uart_ports;
- for (i = 0; i < SIU_PORTS_MAX; i++) {
- port->type = type[i];
- if (port->type == PORT_UNKNOWN)
- continue;
- port->irq = platform_get_irq(pdev, i);
- port->uartclk = SIU_BAUD_BASE * 16;
- port->fifosize = 16;
- port->regshift = 0;
- port->iotype = UPIO_MEM;
- port->flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
- port->line = i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- port->mapbase = res->start;
- port++;
- }
-
- return i;
-}
-
-#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
-
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
-static void wait_for_xmitr(struct uart_port *port)
-{
- int timeout = 10000;
- uint8_t lsr, msr;
-
- do {
- lsr = siu_read(port, UART_LSR);
- if (lsr & UART_LSR_BI)
- lsr_break_flag[port->line] = UART_LSR_BI;
-
- if ((lsr & BOTH_EMPTY) == BOTH_EMPTY)
- break;
- } while (timeout-- > 0);
-
- if (port->flags & UPF_CONS_FLOW) {
- timeout = 1000000;
-
- do {
- msr = siu_read(port, UART_MSR);
- if ((msr & UART_MSR_CTS) != 0)
- break;
- } while (timeout-- > 0);
- }
-}
-
-static void siu_console_putchar(struct uart_port *port, unsigned char ch)
-{
- wait_for_xmitr(port);
- siu_write(port, UART_TX, ch);
-}
-
-static void siu_console_write(struct console *con, const char *s, unsigned count)
-{
- struct uart_port *port;
- uint8_t ier;
-
- port = &siu_uart_ports[con->index];
-
- ier = siu_read(port, UART_IER);
- siu_write(port, UART_IER, 0);
-
- uart_console_write(port, s, count, siu_console_putchar);
-
- wait_for_xmitr(port);
- siu_write(port, UART_IER, ier);
-}
-
-static int __init siu_console_setup(struct console *con, char *options)
-{
- struct uart_port *port;
- int baud = 9600;
- int parity = 'n';
- int bits = 8;
- int flow = 'n';
-
- if (con->index >= SIU_PORTS_MAX)
- con->index = 0;
-
- port = &siu_uart_ports[con->index];
- if (port->membase == NULL) {
- if (port->mapbase == 0)
- return -ENODEV;
- port->membase = ioremap(port->mapbase, siu_port_size(port));
- }
-
- if (port->type == PORT_VR41XX_SIU)
- vr41xx_select_siu_interface(SIU_INTERFACE_RS232C);
-
- if (options != NULL)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
-
- return uart_set_options(port, con, baud, parity, bits, flow);
-}
-
-static struct uart_driver siu_uart_driver;
-
-static struct console siu_console = {
- .name = "ttyVR",
- .write = siu_console_write,
- .device = uart_console_device,
- .setup = siu_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &siu_uart_driver,
-};
-
-static int siu_console_init(void)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < SIU_PORTS_MAX; i++) {
- port = &siu_uart_ports[i];
- port->ops = &siu_uart_ops;
- }
-
- register_console(&siu_console);
-
- return 0;
-}
-
-console_initcall(siu_console_init);
-
-void __init vr41xx_siu_early_setup(struct uart_port *port)
-{
- if (port->type == PORT_UNKNOWN)
- return;
-
- siu_uart_ports[port->line].line = port->line;
- siu_uart_ports[port->line].type = port->type;
- siu_uart_ports[port->line].uartclk = SIU_BAUD_BASE * 16;
- siu_uart_ports[port->line].mapbase = port->mapbase;
- siu_uart_ports[port->line].ops = &siu_uart_ops;
-}
-
-#define SERIAL_VR41XX_CONSOLE &siu_console
-#else
-#define SERIAL_VR41XX_CONSOLE NULL
-#endif
-
-static struct uart_driver siu_uart_driver = {
- .owner = THIS_MODULE,
- .driver_name = "SIU",
- .dev_name = "ttyVR",
- .major = SIU_MAJOR,
- .minor = SIU_MINOR_BASE,
- .cons = SERIAL_VR41XX_CONSOLE,
-};
-
-static int siu_probe(struct platform_device *dev)
-{
- struct uart_port *port;
- int num, i, retval;
-
- num = siu_init_ports(dev);
- if (num <= 0)
- return -ENODEV;
-
- siu_uart_driver.nr = num;
- retval = uart_register_driver(&siu_uart_driver);
- if (retval)
- return retval;
-
- for (i = 0; i < num; i++) {
- port = &siu_uart_ports[i];
- port->ops = &siu_uart_ops;
- port->dev = &dev->dev;
- port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_VR41XX_CONSOLE);
-
- retval = uart_add_one_port(&siu_uart_driver, port);
- if (retval < 0) {
- port->dev = NULL;
- break;
- }
- }
-
- if (i == 0 && retval < 0) {
- uart_unregister_driver(&siu_uart_driver);
- return retval;
- }
-
- return 0;
-}
-
-static int siu_remove(struct platform_device *dev)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < siu_uart_driver.nr; i++) {
- port = &siu_uart_ports[i];
- if (port->dev == &dev->dev) {
- uart_remove_one_port(&siu_uart_driver, port);
- port->dev = NULL;
- }
- }
-
- uart_unregister_driver(&siu_uart_driver);
-
- return 0;
-}
-
-static int siu_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < siu_uart_driver.nr; i++) {
- port = &siu_uart_ports[i];
- if ((port->type == PORT_VR41XX_SIU ||
- port->type == PORT_VR41XX_DSIU) && port->dev == &dev->dev)
- uart_suspend_port(&siu_uart_driver, port);
-
- }
-
- return 0;
-}
-
-static int siu_resume(struct platform_device *dev)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < siu_uart_driver.nr; i++) {
- port = &siu_uart_ports[i];
- if ((port->type == PORT_VR41XX_SIU ||
- port->type == PORT_VR41XX_DSIU) && port->dev == &dev->dev)
- uart_resume_port(&siu_uart_driver, port);
- }
-
- return 0;
-}
-
-static struct platform_driver siu_device_driver = {
- .probe = siu_probe,
- .remove = siu_remove,
- .suspend = siu_suspend,
- .resume = siu_resume,
- .driver = {
- .name = "SIU",
- },
-};
-
-module_platform_driver(siu_device_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:SIU");
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 595d8b49c745..5e287dedce01 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/errno.h>
+#include <linux/minmax.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
@@ -104,6 +105,7 @@ static void tty_buffer_reset(struct tty_buffer *p, size_t size)
p->size = size;
p->next = NULL;
p->commit = 0;
+ p->lookahead = 0;
p->read = 0;
p->flags = 0;
}
@@ -234,6 +236,7 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
buf->head = next;
}
buf->head->read = buf->head->commit;
+ buf->head->lookahead = buf->head->read;
if (ld && ld->ops->flush_buffer)
ld->ops->flush_buffer(tty);
@@ -276,13 +279,15 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
if (n != NULL) {
n->flags = flags;
buf->tail = n;
- /* paired w/ acquire in flush_to_ldisc(); ensures
- * flush_to_ldisc() sees buffer data.
+ /*
+ * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
+ * ensures they see all buffer data.
*/
smp_store_release(&b->commit, b->used);
- /* paired w/ acquire in flush_to_ldisc(); ensures the
- * latest commit value can be read before the head is
- * advanced to the next buffer
+ /*
+ * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
+ * ensures the latest commit value can be read before the head
+ * is advanced to the next buffer.
*/
smp_store_release(&b->next, n);
} else if (change)
@@ -459,6 +464,44 @@ int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
}
EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf);
+static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head)
+{
+ head->lookahead = max(head->lookahead, head->read);
+
+ while (head) {
+ struct tty_buffer *next;
+ unsigned int count;
+
+ /*
+ * Paired w/ release in __tty_buffer_request_room();
+ * ensures commit value read is not stale if the head
+ * is advancing to the next buffer.
+ */
+ next = smp_load_acquire(&head->next);
+ /*
+ * Paired w/ release in __tty_buffer_request_room() or in
+ * tty_buffer_flush(); ensures we see the committed buffer data.
+ */
+ count = smp_load_acquire(&head->commit) - head->lookahead;
+ if (!count) {
+ head = next;
+ continue;
+ }
+
+ if (port->client_ops->lookahead_buf) {
+ unsigned char *p, *f = NULL;
+
+ p = char_buf_ptr(head, head->lookahead);
+ if (~head->flags & TTYB_NORMAL)
+ f = flag_buf_ptr(head, head->lookahead);
+
+ port->client_ops->lookahead_buf(port, p, f, count);
+ }
+
+ head->lookahead += count;
+ }
+}
+
static int
receive_buf(struct tty_port *port, struct tty_buffer *head, int count)
{
@@ -496,7 +539,7 @@ static void flush_to_ldisc(struct work_struct *work)
while (1) {
struct tty_buffer *head = buf->head;
struct tty_buffer *next;
- int count;
+ int count, rcvd;
/* Ldisc or user is trying to gain exclusive access */
if (atomic_read(&buf->priority))
@@ -519,10 +562,12 @@ static void flush_to_ldisc(struct work_struct *work)
continue;
}
- count = receive_buf(port, head, count);
- if (!count)
+ rcvd = receive_buf(port, head, count);
+ head->read += rcvd;
+ if (rcvd < count)
+ lookahead_bufs(port, head);
+ if (!rcvd)
break;
- head->read += count;
if (need_resched())
cond_resched();
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8fec1d8648f5..82a8855981f7 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1663,7 +1663,7 @@ void tty_kclose(struct tty_struct *tty)
*/
tty_ldisc_release(tty);
- /* Wait for pending work before tty destruction commmences */
+ /* Wait for pending work before tty destruction commences */
tty_flush_works(tty);
tty_debug_hangup(tty, "freeing structure\n");
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index adae687f654b..2a76b330e108 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -319,6 +319,8 @@ unsigned char tty_get_frame_size(unsigned int cflag)
bits++;
if (cflag & PARENB)
bits++;
+ if (cflag & ADDRB)
+ bits++;
return bits;
}
@@ -353,6 +355,8 @@ int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
old_termios = tty->termios;
tty->termios = *new_termios;
unset_locked_termios(tty, &old_termios);
+ /* Reset any ADDRB changes, ADDRB is changed through ->rs485_config() */
+ tty->termios.c_cflag ^= (tty->termios.c_cflag ^ old_termios.c_cflag) & ADDRB;
if (tty->ops->set_termios)
tty->ops->set_termios(tty, &old_termios);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 880608a65773..dce08a6d7b5e 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -43,6 +43,26 @@ static int tty_port_default_receive_buf(struct tty_port *port,
return ret;
}
+static void tty_port_default_lookahead_buf(struct tty_port *port, const unsigned char *p,
+ const unsigned char *f, unsigned int count)
+{
+ struct tty_struct *tty;
+ struct tty_ldisc *disc;
+
+ tty = READ_ONCE(port->itty);
+ if (!tty)
+ return;
+
+ disc = tty_ldisc_ref(tty);
+ if (!disc)
+ return;
+
+ if (disc->ops->lookahead_buf)
+ disc->ops->lookahead_buf(disc->tty, p, f, count);
+
+ tty_ldisc_deref(disc);
+}
+
static void tty_port_default_wakeup(struct tty_port *port)
{
struct tty_struct *tty = tty_port_tty_get(port);
@@ -55,6 +75,7 @@ static void tty_port_default_wakeup(struct tty_port *port)
const struct tty_port_client_operations tty_port_default_client_ops = {
.receive_buf = tty_port_default_receive_buf,
+ .lookahead_buf = tty_port_default_lookahead_buf,
.write_wakeup = tty_port_default_wakeup,
};
EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
index fe30ce512819..b3dfe9d5717e 100644
--- a/drivers/tty/vt/Makefile
+++ b/drivers/tty/vt/Makefile
@@ -30,6 +30,6 @@ $(obj)/defkeymap.o: $(obj)/defkeymap.c
ifdef GENERATE_KEYMAP
$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
- loadkeys --mktable $< > $@
+ loadkeys --mktable --unicode $< > $@
endif
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index d815ac98b39e..f02d21e2a96e 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -23,6 +23,8 @@
* stack overflow.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/kd.h>
#include <linux/errno.h>
@@ -36,9 +38,9 @@
#include <linux/vt_kern.h>
#include <linux/string.h>
-static unsigned short translations[][256] = {
+static unsigned short translations[][E_TABSZ] = {
/* 8-bit Latin-1 mapped to Unicode -- trivial mapping */
- {
+ [LAT1_MAP] = {
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
@@ -71,9 +73,9 @@ static unsigned short translations[][256] = {
0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7,
0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff
- },
+ },
/* VT100 graphics mapped to Unicode */
- {
+ [GRAF_MAP] = {
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
@@ -108,8 +110,8 @@ static unsigned short translations[][256] = {
0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff
},
/* IBM Codepage 437 mapped to Unicode */
- {
- 0x0000, 0x263a, 0x263b, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022,
+ [IBMPC_MAP] = {
+ 0x0000, 0x263a, 0x263b, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022,
0x25d8, 0x25cb, 0x25d9, 0x2642, 0x2640, 0x266a, 0x266b, 0x263c,
0x25b6, 0x25c0, 0x2195, 0x203c, 0x00b6, 0x00a7, 0x25ac, 0x21a8,
0x2191, 0x2193, 0x2192, 0x2190, 0x221f, 0x2194, 0x25b2, 0x25bc,
@@ -141,9 +143,9 @@ static unsigned short translations[][256] = {
0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229,
0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248,
0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0
- },
+ },
/* User mapping -- default to codes for direct font mapping */
- {
+ [USER_MAP] = {
0xf000, 0xf001, 0xf002, 0xf003, 0xf004, 0xf005, 0xf006, 0xf007,
0xf008, 0xf009, 0xf00a, 0xf00b, 0xf00c, 0xf00d, 0xf00e, 0xf00f,
0xf010, 0xf011, 0xf012, 0xf013, 0xf014, 0xf015, 0xf016, 0xf017,
@@ -184,78 +186,105 @@ static unsigned short translations[][256] = {
#define MAX_GLYPH 512 /* Max possible glyph value */
-static int inv_translate[MAX_NR_CONSOLES];
+static enum translation_map inv_translate[MAX_NR_CONSOLES];
+
+#define UNI_DIRS 32U
+#define UNI_DIR_ROWS 32U
+#define UNI_ROW_GLYPHS 64U
+
+#define UNI_DIR_BITS GENMASK(15, 11)
+#define UNI_ROW_BITS GENMASK(10, 6)
+#define UNI_GLYPH_BITS GENMASK( 5, 0)
-struct uni_pagedir {
- u16 **uni_pgdir[32];
+#define UNI_DIR(uni) FIELD_GET(UNI_DIR_BITS, (uni))
+#define UNI_ROW(uni) FIELD_GET(UNI_ROW_BITS, (uni))
+#define UNI_GLYPH(uni) FIELD_GET(UNI_GLYPH_BITS, (uni))
+
+#define UNI(dir, row, glyph) (FIELD_PREP(UNI_DIR_BITS, (dir)) | \
+ FIELD_PREP(UNI_ROW_BITS, (row)) | \
+ FIELD_PREP(UNI_GLYPH_BITS, (glyph)))
+
+/**
+ * struct uni_pagedict -- unicode directory
+ *
+ * @uni_pgdir: 32*32*64 table with glyphs
+ * @refcount: reference count of this structure
+ * @sum: checksum
+ * @inverse_translations: best-effort inverse mapping
+ * @inverse_trans_unicode: best-effort inverse mapping to unicode
+ */
+struct uni_pagedict {
+ u16 **uni_pgdir[UNI_DIRS];
unsigned long refcount;
unsigned long sum;
- unsigned char *inverse_translations[4];
+ unsigned char *inverse_translations[LAST_MAP + 1];
u16 *inverse_trans_unicode;
};
-static struct uni_pagedir *dflt;
+static struct uni_pagedict *dflt;
-static void set_inverse_transl(struct vc_data *conp, struct uni_pagedir *p, int i)
+static void set_inverse_transl(struct vc_data *conp, struct uni_pagedict *dict,
+ enum translation_map m)
{
- int j, glyph;
- unsigned short *t = translations[i];
- unsigned char *q;
-
- if (!p) return;
- q = p->inverse_translations[i];
-
- if (!q) {
- q = p->inverse_translations[i] = kmalloc(MAX_GLYPH, GFP_KERNEL);
- if (!q) return;
+ unsigned short *t = translations[m];
+ unsigned char *inv;
+
+ if (!dict)
+ return;
+ inv = dict->inverse_translations[m];
+
+ if (!inv) {
+ inv = dict->inverse_translations[m] = kmalloc(MAX_GLYPH,
+ GFP_KERNEL);
+ if (!inv)
+ return;
}
- memset(q, 0, MAX_GLYPH);
+ memset(inv, 0, MAX_GLYPH);
- for (j = 0; j < E_TABSZ; j++) {
- glyph = conv_uni_to_pc(conp, t[j]);
- if (glyph >= 0 && glyph < MAX_GLYPH && q[glyph] < 32) {
+ for (unsigned int ch = 0; ch < ARRAY_SIZE(translations[m]); ch++) {
+ int glyph = conv_uni_to_pc(conp, t[ch]);
+ if (glyph >= 0 && glyph < MAX_GLYPH && inv[glyph] < 32) {
/* prefer '-' above SHY etc. */
- q[glyph] = j;
+ inv[glyph] = ch;
}
}
}
-static void set_inverse_trans_unicode(struct vc_data *conp,
- struct uni_pagedir *p)
+static void set_inverse_trans_unicode(struct uni_pagedict *dict)
{
- int i, j, k, glyph;
- u16 **p1, *p2;
- u16 *q;
-
- if (!p) return;
- q = p->inverse_trans_unicode;
- if (!q) {
- q = p->inverse_trans_unicode =
- kmalloc_array(MAX_GLYPH, sizeof(u16), GFP_KERNEL);
- if (!q)
+ unsigned int d, r, g;
+ u16 *inv;
+
+ if (!dict)
+ return;
+
+ inv = dict->inverse_trans_unicode;
+ if (!inv) {
+ inv = dict->inverse_trans_unicode = kmalloc_array(MAX_GLYPH,
+ sizeof(*inv), GFP_KERNEL);
+ if (!inv)
return;
}
- memset(q, 0, MAX_GLYPH * sizeof(u16));
+ memset(inv, 0, MAX_GLYPH * sizeof(*inv));
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (!p1)
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = dict->uni_pgdir[d];
+ if (!dir)
continue;
- for (j = 0; j < 32; j++) {
- p2 = p1[j];
- if (!p2)
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ u16 *row = dir[r];
+ if (!row)
continue;
- for (k = 0; k < 64; k++) {
- glyph = p2[k];
- if (glyph >= 0 && glyph < MAX_GLYPH
- && q[glyph] < 32)
- q[glyph] = (i << 11) + (j << 6) + k;
+ for (g = 0; g < UNI_ROW_GLYPHS; g++) {
+ u16 glyph = row[g];
+ if (glyph < MAX_GLYPH && inv[glyph] < 32)
+ inv[glyph] = UNI(d, r, g);
}
}
}
}
-unsigned short *set_translate(int m, struct vc_data *vc)
+unsigned short *set_translate(enum translation_map m, struct vc_data *vc)
{
inv_translate[vc->vc_num] = m;
return translations[m];
@@ -268,44 +297,45 @@ unsigned short *set_translate(int m, struct vc_data *vc)
* was active.
* Still, it is now possible to a certain extent to cut and paste non-ASCII.
*/
-u16 inverse_translate(const struct vc_data *conp, int glyph, int use_unicode)
+u16 inverse_translate(const struct vc_data *conp, u16 glyph, bool use_unicode)
{
- struct uni_pagedir *p;
- int m;
- if (glyph < 0 || glyph >= MAX_GLYPH)
+ struct uni_pagedict *p;
+ enum translation_map m;
+
+ if (glyph >= MAX_GLYPH)
return 0;
- else {
- p = *conp->vc_uni_pagedir_loc;
- if (!p)
+
+ p = *conp->uni_pagedict_loc;
+ if (!p)
+ return glyph;
+
+ if (use_unicode) {
+ if (!p->inverse_trans_unicode)
return glyph;
- else if (use_unicode) {
- if (!p->inverse_trans_unicode)
- return glyph;
- else
- return p->inverse_trans_unicode[glyph];
- } else {
- m = inv_translate[conp->vc_num];
- if (!p->inverse_translations[m])
- return glyph;
- else
- return p->inverse_translations[m][glyph];
- }
+
+ return p->inverse_trans_unicode[glyph];
}
+
+ m = inv_translate[conp->vc_num];
+ if (!p->inverse_translations[m])
+ return glyph;
+
+ return p->inverse_translations[m][glyph];
}
EXPORT_SYMBOL_GPL(inverse_translate);
static void update_user_maps(void)
{
int i;
- struct uni_pagedir *p, *q = NULL;
-
+ struct uni_pagedict *p, *q = NULL;
+
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (!vc_cons_allocated(i))
continue;
- p = *vc_cons[i].d->vc_uni_pagedir_loc;
+ p = *vc_cons[i].d->uni_pagedict_loc;
if (p && p != q) {
set_inverse_transl(vc_cons[i].d, p, USER_MAP);
- set_inverse_trans_unicode(vc_cons[i].d, p);
+ set_inverse_trans_unicode(p);
q = p;
}
}
@@ -321,15 +351,15 @@ static void update_user_maps(void)
*/
int con_set_trans_old(unsigned char __user * arg)
{
- int i;
unsigned short inbuf[E_TABSZ];
- unsigned char ubuf[E_TABSZ];
-
- if (copy_from_user(ubuf, arg, E_TABSZ))
- return -EFAULT;
+ unsigned int i;
+ unsigned char ch;
- for (i = 0; i < E_TABSZ ; i++)
- inbuf[i] = UNI_DIRECT_BASE | ubuf[i];
+ for (i = 0; i < ARRAY_SIZE(inbuf); i++) {
+ if (get_user(ch, &arg[i]))
+ return -EFAULT;
+ inbuf[i] = UNI_DIRECT_BASE | ch;
+ }
console_lock();
memcpy(translations[USER_MAP], inbuf, sizeof(inbuf));
@@ -345,7 +375,7 @@ int con_get_trans_old(unsigned char __user * arg)
unsigned char outbuf[E_TABSZ];
console_lock();
- for (i = 0; i < E_TABSZ ; i++)
+ for (i = 0; i < ARRAY_SIZE(outbuf); i++)
{
ch = conv_uni_to_pc(vc_cons[fg_console].d, p[i]);
outbuf[i] = (ch & ~0xff) ? 0 : ch;
@@ -381,7 +411,7 @@ int con_get_trans_new(ushort __user * arg)
}
/*
- * Unicode -> current font conversion
+ * Unicode -> current font conversion
*
* A font has at most 512 chars, usually 256.
* But one font position may represent several Unicode chars.
@@ -393,78 +423,82 @@ int con_get_trans_new(ushort __user * arg)
extern u8 dfont_unicount[]; /* Defined in console_defmap.c */
extern u16 dfont_unitable[];
-static void con_release_unimap(struct uni_pagedir *p)
+static void con_release_unimap(struct uni_pagedict *dict)
{
- u16 **p1;
- int i, j;
-
- if (p == dflt) dflt = NULL;
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (p1 != NULL) {
- for (j = 0; j < 32; j++)
- kfree(p1[j]);
- kfree(p1);
+ unsigned int d, r;
+
+ if (dict == dflt)
+ dflt = NULL;
+
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = dict->uni_pgdir[d];
+ if (dir != NULL) {
+ for (r = 0; r < UNI_DIR_ROWS; r++)
+ kfree(dir[r]);
+ kfree(dir);
}
- p->uni_pgdir[i] = NULL;
+ dict->uni_pgdir[d] = NULL;
}
- for (i = 0; i < 4; i++) {
- kfree(p->inverse_translations[i]);
- p->inverse_translations[i] = NULL;
+
+ for (r = 0; r < ARRAY_SIZE(dict->inverse_translations); r++) {
+ kfree(dict->inverse_translations[r]);
+ dict->inverse_translations[r] = NULL;
}
- kfree(p->inverse_trans_unicode);
- p->inverse_trans_unicode = NULL;
+
+ kfree(dict->inverse_trans_unicode);
+ dict->inverse_trans_unicode = NULL;
}
/* Caller must hold the console lock */
void con_free_unimap(struct vc_data *vc)
{
- struct uni_pagedir *p;
+ struct uni_pagedict *p;
- p = *vc->vc_uni_pagedir_loc;
+ p = *vc->uni_pagedict_loc;
if (!p)
return;
- *vc->vc_uni_pagedir_loc = NULL;
+ *vc->uni_pagedict_loc = NULL;
if (--p->refcount)
return;
con_release_unimap(p);
kfree(p);
}
-
-static int con_unify_unimap(struct vc_data *conp, struct uni_pagedir *p)
+
+static int con_unify_unimap(struct vc_data *conp, struct uni_pagedict *dict1)
{
- int i, j, k;
- struct uni_pagedir *q;
-
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
- if (!vc_cons_allocated(i))
+ struct uni_pagedict *dict2;
+ unsigned int cons, d, r;
+
+ for (cons = 0; cons < MAX_NR_CONSOLES; cons++) {
+ if (!vc_cons_allocated(cons))
continue;
- q = *vc_cons[i].d->vc_uni_pagedir_loc;
- if (!q || q == p || q->sum != p->sum)
+ dict2 = *vc_cons[cons].d->uni_pagedict_loc;
+ if (!dict2 || dict2 == dict1 || dict2->sum != dict1->sum)
continue;
- for (j = 0; j < 32; j++) {
- u16 **p1, **q1;
- p1 = p->uni_pgdir[j]; q1 = q->uni_pgdir[j];
- if (!p1 && !q1)
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir1 = dict1->uni_pgdir[d];
+ u16 **dir2 = dict2->uni_pgdir[d];
+ if (!dir1 && !dir2)
continue;
- if (!p1 || !q1)
+ if (!dir1 || !dir2)
break;
- for (k = 0; k < 32; k++) {
- if (!p1[k] && !q1[k])
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ if (!dir1[r] && !dir2[r])
continue;
- if (!p1[k] || !q1[k])
+ if (!dir1[r] || !dir2[r])
break;
- if (memcmp(p1[k], q1[k], 64*sizeof(u16)))
+ if (memcmp(dir1[r], dir2[r], UNI_ROW_GLYPHS *
+ sizeof(*dir1[r])))
break;
}
- if (k < 32)
+ if (r < UNI_DIR_ROWS)
break;
}
- if (j == 32) {
- q->refcount++;
- *conp->vc_uni_pagedir_loc = q;
- con_release_unimap(p);
- kfree(p);
+ if (d == UNI_DIRS) {
+ dict2->refcount++;
+ *conp->uni_pagedict_loc = dict2;
+ con_release_unimap(dict1);
+ kfree(dict1);
return 1;
}
}
@@ -472,55 +506,66 @@ static int con_unify_unimap(struct vc_data *conp, struct uni_pagedir *p)
}
static int
-con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
+con_insert_unipair(struct uni_pagedict *p, u_short unicode, u_short fontpos)
{
- int i, n;
- u16 **p1, *p2;
-
- p1 = p->uni_pgdir[n = unicode >> 11];
- if (!p1) {
- p1 = p->uni_pgdir[n] = kmalloc_array(32, sizeof(u16 *),
- GFP_KERNEL);
- if (!p1) return -ENOMEM;
- for (i = 0; i < 32; i++)
- p1[i] = NULL;
+ u16 **dir, *row;
+ unsigned int n;
+
+ n = UNI_DIR(unicode);
+ dir = p->uni_pgdir[n];
+ if (!dir) {
+ dir = p->uni_pgdir[n] = kcalloc(UNI_DIR_ROWS, sizeof(*dir),
+ GFP_KERNEL);
+ if (!dir)
+ return -ENOMEM;
}
- p2 = p1[n = (unicode >> 6) & 0x1f];
- if (!p2) {
- p2 = p1[n] = kmalloc_array(64, sizeof(u16), GFP_KERNEL);
- if (!p2) return -ENOMEM;
- memset(p2, 0xff, 64*sizeof(u16)); /* No glyphs for the characters (yet) */
+ n = UNI_ROW(unicode);
+ row = dir[n];
+ if (!row) {
+ row = dir[n] = kmalloc_array(UNI_ROW_GLYPHS, sizeof(*row),
+ GFP_KERNEL);
+ if (!row)
+ return -ENOMEM;
+ /* No glyphs for the characters (yet) */
+ memset(row, 0xff, UNI_ROW_GLYPHS * sizeof(*row));
}
- p2[unicode & 0x3f] = fontpos;
-
+ row[UNI_GLYPH(unicode)] = fontpos;
+
p->sum += (fontpos << 20U) + unicode;
return 0;
}
+static int con_allocate_new(struct vc_data *vc)
+{
+ struct uni_pagedict *new, *old = *vc->uni_pagedict_loc;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ new->refcount = 1;
+ *vc->uni_pagedict_loc = new;
+
+ if (old)
+ old->refcount--;
+
+ return 0;
+}
+
/* Caller must hold the lock */
static int con_do_clear_unimap(struct vc_data *vc)
{
- struct uni_pagedir *p, *q;
-
- p = *vc->vc_uni_pagedir_loc;
- if (!p || --p->refcount) {
- q = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!q) {
- if (p)
- p->refcount++;
- return -ENOMEM;
- }
- q->refcount=1;
- *vc->vc_uni_pagedir_loc = q;
- } else {
- if (p == dflt) dflt = NULL;
- p->refcount++;
- p->sum = 0;
- con_release_unimap(p);
- }
+ struct uni_pagedict *old = *vc->uni_pagedict_loc;
+
+ if (!old || old->refcount > 1)
+ return con_allocate_new(vc);
+
+ old->sum = 0;
+ con_release_unimap(old);
+
return 0;
}
@@ -532,91 +577,93 @@ int con_clear_unimap(struct vc_data *vc)
console_unlock();
return ret;
}
-
+
+static struct uni_pagedict *con_unshare_unimap(struct vc_data *vc,
+ struct uni_pagedict *old)
+{
+ struct uni_pagedict *new;
+ unsigned int d, r, g;
+ int ret;
+ u16 uni = 0;
+
+ ret = con_allocate_new(vc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ new = *vc->uni_pagedict_loc;
+
+ /*
+ * uni_pgdir is a 32*32*64 table with rows allocated when its first
+ * entry is added. The unicode value must still be incremented for
+ * empty rows. We are copying entries from "old" to "new".
+ */
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = old->uni_pgdir[d];
+ if (!dir) {
+ /* Account for empty table */
+ uni += UNI_DIR_ROWS * UNI_ROW_GLYPHS;
+ continue;
+ }
+
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ u16 *row = dir[r];
+ if (!row) {
+ /* Account for row of 64 empty entries */
+ uni += UNI_ROW_GLYPHS;
+ continue;
+ }
+
+ for (g = 0; g < UNI_ROW_GLYPHS; g++, uni++) {
+ if (row[g] == 0xffff)
+ continue;
+ /*
+ * Found one, copy entry for unicode uni with
+ * fontpos value row[g].
+ */
+ ret = con_insert_unipair(new, uni, row[g]);
+ if (ret) {
+ old->refcount++;
+ *vc->uni_pagedict_loc = old;
+ con_release_unimap(new);
+ kfree(new);
+ return ERR_PTR(ret);
+ }
+ }
+ }
+ }
+
+ return new;
+}
+
int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
{
- int err = 0, err1, i;
- struct uni_pagedir *p, *q;
+ int err = 0, err1;
+ struct uni_pagedict *dict;
struct unipair *unilist, *plist;
if (!ct)
return 0;
- unilist = vmemdup_user(list, array_size(sizeof(struct unipair), ct));
+ unilist = vmemdup_user(list, array_size(sizeof(*unilist), ct));
if (IS_ERR(unilist))
return PTR_ERR(unilist);
console_lock();
/* Save original vc_unipagdir_loc in case we allocate a new one */
- p = *vc->vc_uni_pagedir_loc;
-
- if (!p) {
+ dict = *vc->uni_pagedict_loc;
+ if (!dict) {
err = -EINVAL;
-
goto out_unlock;
}
-
- if (p->refcount > 1) {
- int j, k;
- u16 **p1, *p2, l;
-
- err1 = con_do_clear_unimap(vc);
- if (err1) {
- err = err1;
+
+ if (dict->refcount > 1) {
+ dict = con_unshare_unimap(vc, dict);
+ if (IS_ERR(dict)) {
+ err = PTR_ERR(dict);
goto out_unlock;
}
-
- /*
- * Since refcount was > 1, con_clear_unimap() allocated a
- * a new uni_pagedir for this vc. Re: p != q
- */
- q = *vc->vc_uni_pagedir_loc;
-
- /*
- * uni_pgdir is a 32*32*64 table with rows allocated
- * when its first entry is added. The unicode value must
- * still be incremented for empty rows. We are copying
- * entries from "p" (old) to "q" (new).
- */
- l = 0; /* unicode value */
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (p1)
- for (j = 0; j < 32; j++) {
- p2 = p1[j];
- if (p2) {
- for (k = 0; k < 64; k++, l++)
- if (p2[k] != 0xffff) {
- /*
- * Found one, copy entry for unicode
- * l with fontpos value p2[k].
- */
- err1 = con_insert_unipair(q, l, p2[k]);
- if (err1) {
- p->refcount++;
- *vc->vc_uni_pagedir_loc = p;
- con_release_unimap(q);
- kfree(q);
- err = err1;
- goto out_unlock;
- }
- }
- } else {
- /* Account for row of 64 empty entries */
- l += 64;
- }
- }
- else
- /* Account for empty table */
- l += 32 * 64;
- }
-
- /*
- * Finished copying font table, set vc_uni_pagedir to new table
- */
- p = q;
- } else if (p == dflt) {
+ } else if (dict == dflt) {
dflt = NULL;
}
@@ -624,20 +671,20 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
* Insert user specified unicode pairs into new table.
*/
for (plist = unilist; ct; ct--, plist++) {
- err1 = con_insert_unipair(p, plist->unicode, plist->fontpos);
+ err1 = con_insert_unipair(dict, plist->unicode, plist->fontpos);
if (err1)
err = err1;
}
-
+
/*
* Merge with fontmaps of any other virtual consoles.
*/
- if (con_unify_unimap(vc, p))
+ if (con_unify_unimap(vc, dict))
goto out_unlock;
- for (i = 0; i <= 3; i++)
- set_inverse_transl(vc, p, i); /* Update inverse translations */
- set_inverse_trans_unicode(vc, p);
+ for (enum translation_map m = FIRST_MAP; m <= LAST_MAP; m++)
+ set_inverse_transl(vc, dict, m);
+ set_inverse_trans_unicode(dict);
out_unlock:
console_unlock();
@@ -652,55 +699,56 @@ out_unlock:
* Loads the unimap for the hardware font, as defined in uni_hash.tbl.
* The representation used was the most compact I could come up
* with. This routine is executed at video setup, and when the
- * PIO_FONTRESET ioctl is called.
+ * PIO_FONTRESET ioctl is called.
*
* The caller must hold the console lock
*/
int con_set_default_unimap(struct vc_data *vc)
{
- int i, j, err = 0, err1;
- u16 *q;
- struct uni_pagedir *p;
+ struct uni_pagedict *dict;
+ unsigned int fontpos, count;
+ int err = 0, err1;
+ u16 *dfont;
if (dflt) {
- p = *vc->vc_uni_pagedir_loc;
- if (p == dflt)
+ dict = *vc->uni_pagedict_loc;
+ if (dict == dflt)
return 0;
dflt->refcount++;
- *vc->vc_uni_pagedir_loc = dflt;
- if (p && !--p->refcount) {
- con_release_unimap(p);
- kfree(p);
+ *vc->uni_pagedict_loc = dflt;
+ if (dict && !--dict->refcount) {
+ con_release_unimap(dict);
+ kfree(dict);
}
return 0;
}
-
+
/* The default font is always 256 characters */
err = con_do_clear_unimap(vc);
if (err)
return err;
-
- p = *vc->vc_uni_pagedir_loc;
- q = dfont_unitable;
-
- for (i = 0; i < 256; i++)
- for (j = dfont_unicount[i]; j; j--) {
- err1 = con_insert_unipair(p, *(q++), i);
+
+ dict = *vc->uni_pagedict_loc;
+ dfont = dfont_unitable;
+
+ for (fontpos = 0; fontpos < 256U; fontpos++)
+ for (count = dfont_unicount[fontpos]; count; count--) {
+ err1 = con_insert_unipair(dict, *(dfont++), fontpos);
if (err1)
err = err1;
}
-
- if (con_unify_unimap(vc, p)) {
- dflt = *vc->vc_uni_pagedir_loc;
+
+ if (con_unify_unimap(vc, dict)) {
+ dflt = *vc->uni_pagedict_loc;
return err;
}
- for (i = 0; i <= 3; i++)
- set_inverse_transl(vc, p, i); /* Update all inverse translations */
- set_inverse_trans_unicode(vc, p);
- dflt = p;
+ for (enum translation_map m = FIRST_MAP; m <= LAST_MAP; m++)
+ set_inverse_transl(vc, dict, m);
+ set_inverse_trans_unicode(dict);
+ dflt = dict;
return err;
}
EXPORT_SYMBOL(con_set_default_unimap);
@@ -714,16 +762,16 @@ EXPORT_SYMBOL(con_set_default_unimap);
*/
int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc)
{
- struct uni_pagedir *q;
+ struct uni_pagedict *src;
- if (!*src_vc->vc_uni_pagedir_loc)
+ if (!*src_vc->uni_pagedict_loc)
return -EINVAL;
- if (*dst_vc->vc_uni_pagedir_loc == *src_vc->vc_uni_pagedir_loc)
+ if (*dst_vc->uni_pagedict_loc == *src_vc->uni_pagedict_loc)
return 0;
con_free_unimap(dst_vc);
- q = *src_vc->vc_uni_pagedir_loc;
- q->refcount++;
- *dst_vc->vc_uni_pagedir_loc = q;
+ src = *src_vc->uni_pagedict_loc;
+ src->refcount++;
+ *dst_vc->uni_pagedict_loc = src;
return 0;
}
EXPORT_SYMBOL(con_copy_unimap);
@@ -734,46 +782,53 @@ EXPORT_SYMBOL(con_copy_unimap);
* Read the console unicode data for this console. Called from the ioctl
* handlers.
*/
-int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list)
+int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct,
+ struct unipair __user *list)
{
- int i, j, k, ret = 0;
ushort ect;
- u16 **p1, *p2;
- struct uni_pagedir *p;
+ struct uni_pagedict *dict;
struct unipair *unilist;
+ unsigned int d, r, g;
+ int ret = 0;
- unilist = kvmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
+ unilist = kvmalloc_array(ct, sizeof(*unilist), GFP_KERNEL);
if (!unilist)
return -ENOMEM;
console_lock();
ect = 0;
- if (*vc->vc_uni_pagedir_loc) {
- p = *vc->vc_uni_pagedir_loc;
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (p1)
- for (j = 0; j < 32; j++) {
- p2 = *(p1++);
- if (p2)
- for (k = 0; k < 64; k++, p2++) {
- if (*p2 >= MAX_GLYPH)
- continue;
- if (ect < ct) {
- unilist[ect].unicode =
- (i<<11)+(j<<6)+k;
- unilist[ect].fontpos = *p2;
- }
- ect++;
+ dict = *vc->uni_pagedict_loc;
+ if (!dict)
+ goto unlock;
+
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = dict->uni_pgdir[d];
+ if (!dir)
+ continue;
+
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ u16 *row = dir[r];
+ if (!row)
+ continue;
+
+ for (g = 0; g < UNI_ROW_GLYPHS; g++, row++) {
+ if (*row >= MAX_GLYPH)
+ continue;
+ if (ect < ct) {
+ unilist[ect].unicode = UNI(d, r, g);
+ unilist[ect].fontpos = *row;
}
+ ect++;
}
}
}
+unlock:
console_unlock();
- if (copy_to_user(list, unilist, min(ect, ct) * sizeof(struct unipair)))
+ if (copy_to_user(list, unilist, min(ect, ct) * sizeof(*unilist)))
+ ret = -EFAULT;
+ if (put_user(ect, uct))
ret = -EFAULT;
- put_user(ect, uct);
kvfree(unilist);
return ret ? ret : (ect <= ct) ? 0 : -ENOMEM;
}
@@ -798,20 +853,18 @@ u32 conv_8bit_to_uni(unsigned char c)
int conv_uni_to_8bit(u32 uni)
{
int c;
- for (c = 0; c < 0x100; c++)
+ for (c = 0; c < ARRAY_SIZE(translations[USER_MAP]); c++)
if (translations[USER_MAP][c] == uni ||
(translations[USER_MAP][c] == (c | 0xf000) && uni == c))
return c;
return -1;
}
-int
-conv_uni_to_pc(struct vc_data *conp, long ucs)
+int conv_uni_to_pc(struct vc_data *conp, long ucs)
{
- int h;
- u16 **p1, *p2;
- struct uni_pagedir *p;
-
+ struct uni_pagedict *dict;
+ u16 **dir, *row, glyph;
+
/* Only 16-bit codes supported at this time */
if (ucs > 0xffff)
return -4; /* Not found */
@@ -826,17 +879,24 @@ conv_uni_to_pc(struct vc_data *conp, long ucs)
*/
else if ((ucs & ~UNI_DIRECT_MASK) == UNI_DIRECT_BASE)
return ucs & UNI_DIRECT_MASK;
-
- if (!*conp->vc_uni_pagedir_loc)
+
+ dict = *conp->uni_pagedict_loc;
+ if (!dict)
return -3;
- p = *conp->vc_uni_pagedir_loc;
- if ((p1 = p->uni_pgdir[ucs >> 11]) &&
- (p2 = p1[(ucs >> 6) & 0x1f]) &&
- (h = p2[ucs & 0x3f]) < MAX_GLYPH)
- return h;
+ dir = dict->uni_pgdir[UNI_DIR(ucs)];
+ if (!dir)
+ return -4;
- return -4; /* not found */
+ row = dir[UNI_ROW(ucs)];
+ if (!row)
+ return -4;
+
+ glyph = row[UNI_GLYPH(ucs)];
+ if (glyph >= MAX_GLYPH)
+ return -4;
+
+ return glyph;
}
/*
@@ -844,13 +904,13 @@ conv_uni_to_pc(struct vc_data *conp, long ucs)
* initialized. It must be possible to call kmalloc(..., GFP_KERNEL)
* from this function, hence the call from sys_setup.
*/
-void __init
+void __init
console_map_init(void)
{
int i;
-
+
for (i = 0; i < MAX_NR_CONSOLES; i++)
- if (vc_cons_allocated(i) && !*vc_cons[i].d->vc_uni_pagedir_loc)
+ if (vc_cons_allocated(i) && !*vc_cons[i].d->uni_pagedict_loc)
con_set_default_unimap(vc_cons[i].d);
}
diff --git a/drivers/tty/vt/defkeymap.c_shipped b/drivers/tty/vt/defkeymap.c_shipped
index 094d95bf0005..0c043e4f292e 100644
--- a/drivers/tty/vt/defkeymap.c_shipped
+++ b/drivers/tty/vt/defkeymap.c_shipped
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-/* Do not edit this file! It was automatically generated by */
-/* loadkeys --mktable defkeymap.map > defkeymap.c */
+/* Do not edit this file! It was automatically generated by */
+/* loadkeys --mktable --unicode defkeymap.map > defkeymap.c */
#include <linux/types.h>
#include <linux/keyboard.h>
@@ -139,7 +139,7 @@ static unsigned short ctrl_alt_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
-ushort *key_maps[MAX_NR_KEYMAPS] = {
+unsigned short *key_maps[MAX_NR_KEYMAPS] = {
plain_map, shift_map, altgr_map, NULL,
ctrl_map, shift_ctrl_map, NULL, NULL,
alt_map, NULL, NULL, NULL,
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index f7755e73696e..6ef22f01cc51 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -68,7 +68,8 @@ sel_pos(int n, bool unicode)
{
if (unicode)
return screen_glyph_unicode(vc_sel.cons, n / 2);
- return inverse_translate(vc_sel.cons, screen_glyph(vc_sel.cons, n), 0);
+ return inverse_translate(vc_sel.cons, screen_glyph(vc_sel.cons, n),
+ false);
}
/**
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index dfc1f4b445f3..0b669c82ddc9 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -344,7 +344,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
/* allocate everything in one go */
memsize = cols * rows * sizeof(char32_t);
memsize += rows * sizeof(char32_t *);
- p = vmalloc(memsize);
+ p = vzalloc(memsize);
if (!p)
return NULL;
@@ -1063,10 +1063,10 @@ static void visual_init(struct vc_data *vc, int num, int init)
__module_get(vc->vc_sw->owner);
vc->vc_num = num;
vc->vc_display_fg = &master_display_fg;
- if (vc->vc_uni_pagedir_loc)
+ if (vc->uni_pagedict_loc)
con_free_unimap(vc);
- vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
- vc->vc_uni_pagedir = NULL;
+ vc->uni_pagedict_loc = &vc->uni_pagedict;
+ vc->uni_pagedict = NULL;
vc->vc_hi_font_mask = 0;
vc->vc_complement_mask = 0;
vc->vc_can_do_color = 0;
@@ -1136,7 +1136,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
visual_init(vc, currcons, 1);
- if (!*vc->vc_uni_pagedir_loc)
+ if (!*vc->uni_pagedict_loc)
con_set_default_unimap(vc);
err = -EINVAL;
@@ -3939,7 +3939,7 @@ static ssize_t show_bind(struct device *dev, struct device_attribute *attr,
bind = con_is_bound(con->con);
console_unlock();
- return snprintf(buf, PAGE_SIZE, "%i\n", bind);
+ return sysfs_emit(buf, "%i\n", bind);
}
static ssize_t show_name(struct device *dev, struct device_attribute *attr,
@@ -3947,7 +3947,7 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr,
{
struct con_driver *con = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s %s\n",
+ return sysfs_emit(buf, "%s %s\n",
(con->flag & CON_DRIVER_FLAG_MODULE) ? "(M)" : "(S)",
con->desc);
@@ -4662,9 +4662,11 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
console_lock();
if (vc->vc_mode != KD_TEXT)
rc = -EINVAL;
- else if (vc->vc_sw->con_font_set)
+ else if (vc->vc_sw->con_font_set) {
+ if (vc_is_sel(vc))
+ clear_selection();
rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
- else
+ } else
rc = -ENOSYS;
console_unlock();
kfree(font.data);
@@ -4691,9 +4693,11 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
console_unlock();
return -EINVAL;
}
- if (vc->vc_sw->con_font_default)
+ if (vc->vc_sw->con_font_default) {
+ if (vc_is_sel(vc))
+ clear_selection();
rc = vc->vc_sw->con_font_default(vc, &font, s);
- else
+ } else
rc = -ENOSYS;
console_unlock();
if (!rc) {
@@ -4741,7 +4745,7 @@ u32 screen_glyph_unicode(const struct vc_data *vc, int n)
if (uniscr)
return uniscr->lines[n / vc->vc_cols][n % vc->vc_cols];
- return inverse_translate(vc, screen_glyph(vc, n * 2), 1);
+ return inverse_translate(vc, screen_glyph(vc, n * 2), true);
}
EXPORT_SYMBOL_GPL(screen_glyph_unicode);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 3bc0709a5dc2..a202d7d5240d 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -8326,6 +8326,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
+ .max_sectors = (1 << 20) / SECTOR_SIZE, /* 1 MiB */
.max_host_blocked = 1,
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
@@ -8740,6 +8741,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
struct scsi_device *sdp;
unsigned long flags;
int ret, retries;
+ unsigned long deadline;
+ int32_t remaining;
spin_lock_irqsave(hba->host->host_lock, flags);
sdp = hba->ufs_device_wlun;
@@ -8772,9 +8775,14 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* callbacks hence set the RQF_PM flag so that it doesn't resume the
* already suspended childs.
*/
+ deadline = jiffies + 10 * HZ;
for (retries = 3; retries > 0; --retries) {
+ ret = -ETIMEDOUT;
+ remaining = deadline - jiffies;
+ if (remaining <= 0)
+ break;
ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
+ remaining / HZ, 0, 0, RQF_PM, NULL);
if (!scsi_status_is_check_condition(ret) ||
!scsi_sense_valid(&sshdr) ||
sshdr.sense_key != UNIT_ATTENTION)
@@ -9508,12 +9516,8 @@ EXPORT_SYMBOL(ufshcd_runtime_resume);
int ufshcd_shutdown(struct ufs_hba *hba)
{
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
- goto out;
+ ufshcd_suspend(hba);
- pm_runtime_get_sync(hba->dev);
-
- ufshcd_suspend(hba);
-out:
hba->is_powered = false;
/* allow force shutdown even in case of errors */
return 0;
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index eced97538082..c3628a8645a5 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -1711,7 +1711,7 @@ static struct exynos_ufs_uic_attr fsd_uic_attr = {
.pa_dbg_option_suite = 0x2E820183,
};
-struct exynos_ufs_drv_data fsd_ufs_drvs = {
+static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
.uic_attr = &fsd_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 24af1f389bf2..1c91f43e15c8 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -24,7 +24,7 @@ struct ufs_host {
void (*late_init)(struct ufs_hba *hba);
};
-enum {
+enum intel_ufs_dsm_func_id {
INTEL_DSM_FNS = 0,
INTEL_DSM_RESET = 1,
};
@@ -42,6 +42,15 @@ static const guid_t intel_dsm_guid =
GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
+static bool __intel_dsm_supported(struct intel_host *host,
+ enum intel_ufs_dsm_func_id fn)
+{
+ return fn < 32 && fn >= 0 && (host->dsm_fns & (1u << fn));
+}
+
+#define INTEL_DSM_SUPPORTED(host, name) \
+ __intel_dsm_supported(host, INTEL_DSM_##name)
+
static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
@@ -71,7 +80,7 @@ out:
static int intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
- if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
+ if (!__intel_dsm_supported(intel_host, fn))
return -EOPNOTSUPP;
return __intel_dsm(intel_host, dev, fn, result);
@@ -300,7 +309,7 @@ static int ufs_intel_device_reset(struct ufs_hba *hba)
{
struct intel_host *host = ufshcd_get_variant(hba);
- if (host->dsm_fns & INTEL_DSM_RESET) {
+ if (INTEL_DSM_SUPPORTED(host, RESET)) {
u32 result = 0;
int err;
@@ -342,7 +351,7 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
return -ENOMEM;
ufshcd_set_variant(hba, host);
intel_dsm_init(host, hba->dev);
- if (host->dsm_fns & INTEL_DSM_RESET) {
+ if (INTEL_DSM_SUPPORTED(host, RESET)) {
if (hba->vops->device_reset)
hba->caps |= UFSHCD_CAP_DEEPSLEEP;
} else {
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index d21b69997e75..5adcb349718c 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -1530,7 +1530,8 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
TRB_LEN(le32_to_cpu(trb->length));
if (priv_req->num_of_trb > 1 &&
- le32_to_cpu(trb->control) & TRB_SMM)
+ le32_to_cpu(trb->control) & TRB_SMM &&
+ le32_to_cpu(trb->control) & TRB_CHAIN)
transfer_end = true;
cdns3_ep_inc_deq(priv_ep);
@@ -1690,6 +1691,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
ep_cfg &= ~EP_CFG_ENABLE;
writel(ep_cfg, &priv_dev->regs->ep_cfg);
priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
+ priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
}
cdns3_transfer_completed(priv_dev, priv_ep);
} else if (!(priv_ep->flags & EP_STALLED) &&
diff --git a/drivers/usb/chipidea/trace.h b/drivers/usb/chipidea/trace.h
index 1601fd86c4c1..ca0e65b48f0a 100644
--- a/drivers/usb/chipidea/trace.h
+++ b/drivers/usb/chipidea/trace.h
@@ -28,11 +28,11 @@ TRACE_EVENT(ci_log,
TP_ARGS(ci, vaf),
TP_STRUCT__entry(
__string(name, dev_name(ci->dev))
- __dynamic_array(char, msg, CHIPIDEA_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(name, dev_name(ci->dev));
- vsnprintf(__get_str(msg), CHIPIDEA_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(name), __get_str(msg))
);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 483bcb1213f7..cc637c4599e1 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1810,6 +1810,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
},
+ { USB_DEVICE(0x0c26, 0x0020), /* Icom ICF3400 Serie */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
{ USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index a6a87c5d1b05..94b305bbd621 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1251,7 +1251,8 @@ void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb)
EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep);
/*
- * Some usb host controllers can only perform dma using a small SRAM area.
+ * Some usb host controllers can only perform dma using a small SRAM area,
+ * or have restrictions on addressable DRAM.
* The usb core itself is however optimized for host controllers that can dma
* using regular system memory - like pci devices doing bus mastering.
*
@@ -3127,8 +3128,18 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
if (IS_ERR(hcd->localmem_pool))
return PTR_ERR(hcd->localmem_pool);
- local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
- size, MEMREMAP_WC);
+ /*
+ * if a physical SRAM address was passed, map it, otherwise
+ * allocate system memory as a buffer.
+ */
+ if (phys_addr)
+ local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
+ size, MEMREMAP_WC);
+ else
+ local_mem = dmam_alloc_attrs(hcd->self.sysdev, size, &dma,
+ GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+
if (IS_ERR(local_mem))
return PTR_ERR(local_mem);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 2633acde7ac1..bbab424b0d55 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -6038,6 +6038,11 @@ re_enumerate:
* the reset is over (using their post_reset method).
*
* Return: The same as for usb_reset_and_verify_device().
+ * However, if a reset is already in progress (for instance, if a
+ * driver doesn't have pre_reset() or post_reset() callbacks, and while
+ * being unbound or re-bound during the ongoing reset its disconnect()
+ * or probe() routine tries to perform a second, nested reset), the
+ * routine returns -EINPROGRESS.
*
* Note:
* The caller must own the device lock. For example, it's safe to use
@@ -6071,6 +6076,10 @@ int usb_reset_device(struct usb_device *udev)
return -EISDIR;
}
+ if (udev->reset_in_progress)
+ return -EINPROGRESS;
+ udev->reset_in_progress = 1;
+
port_dev = hub->ports[udev->portnum - 1];
/*
@@ -6135,6 +6144,7 @@ int usb_reset_device(struct usb_device *udev)
usb_autosuspend_device(udev);
memalloc_noio_restore(noio_flag);
+ udev->reset_in_progress = 0;
return ret;
}
EXPORT_SYMBOL_GPL(usb_reset_device);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index c8ba87df7abe..fd0ccf6f3ec5 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -154,9 +154,9 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
} else if (hsotg->plat && hsotg->plat->phy_init) {
ret = hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
} else {
- ret = phy_power_on(hsotg->phy);
+ ret = phy_init(hsotg->phy);
if (ret == 0)
- ret = phy_init(hsotg->phy);
+ ret = phy_power_on(hsotg->phy);
}
return ret;
@@ -188,9 +188,9 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
} else if (hsotg->plat && hsotg->plat->phy_exit) {
ret = hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
} else {
- ret = phy_exit(hsotg->phy);
+ ret = phy_power_off(hsotg->phy);
if (ret == 0)
- ret = phy_power_off(hsotg->phy);
+ ret = phy_exit(hsotg->phy);
}
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index c5c238ab3083..d0237b30c9be 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -833,15 +833,16 @@ static void dwc3_core_exit(struct dwc3 *dwc)
{
dwc3_event_buffers_cleanup(dwc);
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
+
usb_phy_shutdown(dwc->usb2_phy);
usb_phy_shutdown(dwc->usb3_phy);
phy_exit(dwc->usb2_generic_phy);
phy_exit(dwc->usb3_generic_phy);
- usb_phy_set_suspend(dwc->usb2_phy, 1);
- usb_phy_set_suspend(dwc->usb3_phy, 1);
- phy_power_off(dwc->usb2_generic_phy);
- phy_power_off(dwc->usb3_generic_phy);
dwc3_clk_disable(dwc);
reset_control_assert(dwc->reset);
}
@@ -1751,12 +1752,6 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_properties(dwc);
- if (!dwc->sysdev_is_parent) {
- ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
- if (ret)
- return ret;
- }
-
dwc->reset = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset);
@@ -1821,7 +1816,13 @@ static int dwc3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
- device_init_wakeup(&pdev->dev, of_property_read_bool(dev->of_node, "wakeup-source"));
+
+ if (!dwc->sysdev_is_parent &&
+ DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
+ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
+ if (ret)
+ goto disable_clks;
+ }
spin_lock_init(&dwc->lock);
mutex_init(&dwc->mutex);
@@ -1879,16 +1880,16 @@ err5:
dwc3_debugfs_exit(dwc);
dwc3_event_buffers_cleanup(dwc);
- usb_phy_shutdown(dwc->usb2_phy);
- usb_phy_shutdown(dwc->usb3_phy);
- phy_exit(dwc->usb2_generic_phy);
- phy_exit(dwc->usb3_generic_phy);
-
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
phy_power_off(dwc->usb2_generic_phy);
phy_power_off(dwc->usb3_generic_phy);
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
dwc3_ulpi_exit(dwc);
err4:
@@ -1983,7 +1984,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
dwc3_core_exit(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
- if (!PMSG_IS_AUTO(msg) && !device_can_wakeup(dwc->dev)) {
+ if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
dwc3_core_exit(dwc);
break;
}
@@ -2044,7 +2045,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
spin_unlock_irqrestore(&dwc->lock, flags);
break;
case DWC3_GCTL_PRTCAP_HOST:
- if (!PMSG_IS_AUTO(msg) && !device_can_wakeup(dwc->dev)) {
+ if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 6b018048fe2e..4ee4ca09873a 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -44,6 +44,7 @@
#define PCI_DEVICE_ID_INTEL_ADLP 0x51ee
#define PCI_DEVICE_ID_INTEL_ADLM 0x54ee
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
+#define PCI_DEVICE_ID_INTEL_RPL 0x460e
#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
@@ -456,6 +457,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index c5e482f53e9d..d3f3937d7005 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -17,7 +17,6 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-#include <linux/pm_domain.h>
#include <linux/usb/of.h>
#include <linux/reset.h>
#include <linux/iopoll.h>
@@ -299,11 +298,24 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom)
icc_put(qcom->icc_path_apps);
}
+/* Only usable in contexts where the role can not change. */
+static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
+{
+ struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
+
+ return dwc->xhci;
+}
+
static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom)
{
struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
- struct usb_hcd *hcd = platform_get_drvdata(dwc->xhci);
struct usb_device *udev;
+ struct usb_hcd __maybe_unused *hcd;
+
+ /*
+ * FIXME: Fix this layering violation.
+ */
+ hcd = platform_get_drvdata(dwc->xhci);
/*
* It is possible to query the speed of all children of
@@ -311,8 +323,11 @@ static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom)
* currently supports only 1 port per controller. So
* this is sufficient.
*/
+#ifdef CONFIG_USB
udev = usb_hub_find_child(hcd->self.root_hub, 1);
-
+#else
+ udev = NULL;
+#endif
if (!udev)
return USB_SPEED_UNKNOWN;
@@ -387,7 +402,7 @@ static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
dwc3_qcom_enable_wakeup_irq(qcom->ss_phy_irq, 0);
}
-static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
+static int dwc3_qcom_suspend(struct dwc3_qcom *qcom, bool wakeup)
{
u32 val;
int i, ret;
@@ -406,7 +421,11 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
if (ret)
dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret);
- if (device_may_wakeup(qcom->dev)) {
+ /*
+ * The role is stable during suspend as role switching is done from a
+ * freezable workqueue.
+ */
+ if (dwc3_qcom_is_host(qcom) && wakeup) {
qcom->usb2_speed = dwc3_qcom_read_usb2_speed(qcom);
dwc3_qcom_enable_interrupts(qcom);
}
@@ -416,7 +435,7 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
return 0;
}
-static int dwc3_qcom_resume(struct dwc3_qcom *qcom)
+static int dwc3_qcom_resume(struct dwc3_qcom *qcom, bool wakeup)
{
int ret;
int i;
@@ -424,7 +443,7 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom)
if (!qcom->is_suspended)
return 0;
- if (device_may_wakeup(qcom->dev))
+ if (dwc3_qcom_is_host(qcom) && wakeup)
dwc3_qcom_disable_interrupts(qcom);
for (i = 0; i < qcom->num_clocks; i++) {
@@ -458,7 +477,11 @@ static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data)
if (qcom->pm_suspended)
return IRQ_HANDLED;
- if (dwc->xhci)
+ /*
+ * This is safe as role switching is done from a freezable workqueue
+ * and the wakeup interrupts are disabled as part of resume.
+ */
+ if (dwc3_qcom_is_host(qcom))
pm_runtime_resume(&dwc->xhci->dev);
return IRQ_HANDLED;
@@ -757,13 +780,13 @@ dwc3_qcom_create_urs_usb_platdev(struct device *dev)
static int dwc3_qcom_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- struct dwc3_qcom *qcom;
- struct resource *res, *parent_res = NULL;
- int ret, i;
- bool ignore_pipe_clk;
- struct generic_pm_domain *genpd;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct dwc3_qcom *qcom;
+ struct resource *res, *parent_res = NULL;
+ int ret, i;
+ bool ignore_pipe_clk;
+ bool wakeup_source;
qcom = devm_kzalloc(&pdev->dev, sizeof(*qcom), GFP_KERNEL);
if (!qcom)
@@ -772,8 +795,6 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qcom);
qcom->dev = &pdev->dev;
- genpd = pd_to_genpd(qcom->dev->pm_domain);
-
if (has_acpi_companion(dev)) {
qcom->acpi_pdata = acpi_device_get_match_data(dev);
if (!qcom->acpi_pdata) {
@@ -881,16 +902,9 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (ret)
goto interconnect_exit;
- if (device_can_wakeup(&qcom->dwc3->dev)) {
- /*
- * Setting GENPD_FLAG_ALWAYS_ON flag takes care of keeping
- * genpd on in both runtime suspend and system suspend cases.
- */
- genpd->flags |= GENPD_FLAG_ALWAYS_ON;
- device_init_wakeup(&pdev->dev, true);
- } else {
- genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
- }
+ wakeup_source = of_property_read_bool(dev->of_node, "wakeup-source");
+ device_init_wakeup(&pdev->dev, wakeup_source);
+ device_init_wakeup(&qcom->dwc3->dev, wakeup_source);
qcom->is_suspended = false;
pm_runtime_set_active(dev);
@@ -944,39 +958,45 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
- int ret = 0;
+ bool wakeup = device_may_wakeup(dev);
+ int ret;
- ret = dwc3_qcom_suspend(qcom);
- if (!ret)
- qcom->pm_suspended = true;
+ ret = dwc3_qcom_suspend(qcom, wakeup);
+ if (ret)
+ return ret;
- return ret;
+ qcom->pm_suspended = true;
+
+ return 0;
}
static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+ bool wakeup = device_may_wakeup(dev);
int ret;
- ret = dwc3_qcom_resume(qcom);
- if (!ret)
- qcom->pm_suspended = false;
+ ret = dwc3_qcom_resume(qcom, wakeup);
+ if (ret)
+ return ret;
- return ret;
+ qcom->pm_suspended = false;
+
+ return 0;
}
static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
- return dwc3_qcom_suspend(qcom);
+ return dwc3_qcom_suspend(qcom, true);
}
static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
- return dwc3_qcom_resume(qcom);
+ return dwc3_qcom_resume(qcom, true);
}
static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index aeeec751c53c..eca945feeec3 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2539,9 +2539,6 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
is_on = !!is_on;
- if (dwc->pullups_connected == is_on)
- return 0;
-
dwc->softconnect = is_on;
/*
@@ -2566,6 +2563,11 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
return 0;
}
+ if (dwc->pullups_connected == is_on) {
+ pm_runtime_put(dwc->dev);
+ return 0;
+ }
+
if (!is_on) {
ret = dwc3_gadget_soft_disconnect(dwc);
} else {
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index f56c30cf151e..a7154fe8206d 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -11,8 +11,13 @@
#include <linux/of.h>
#include <linux/platform_device.h>
+#include "../host/xhci-plat.h"
#include "core.h"
+static const struct xhci_plat_priv dwc3_xhci_plat_priv = {
+ .quirks = XHCI_SKIP_PHY_INIT,
+};
+
static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
int irq, char *name)
{
@@ -92,6 +97,11 @@ int dwc3_host_init(struct dwc3 *dwc)
goto err;
}
+ ret = platform_device_add_data(xhci, &dwc3_xhci_plat_priv,
+ sizeof(dwc3_xhci_plat_priv));
+ if (ret)
+ goto err;
+
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
if (dwc->usb3_lpm_capable)
@@ -135,4 +145,5 @@ err:
void dwc3_host_exit(struct dwc3 *dwc)
{
platform_device_unregister(dwc->xhci);
+ dwc->xhci = NULL;
}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 1905a8d8e0c9..08726e4c68a5 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -291,6 +291,12 @@ static struct usb_endpoint_descriptor ss_ep_int_desc = {
.bInterval = 4,
};
+static struct usb_ss_ep_comp_descriptor ss_ep_int_desc_comp = {
+ .bLength = sizeof(ss_ep_int_desc_comp),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .wBytesPerInterval = cpu_to_le16(6),
+};
+
/* Audio Streaming OUT Interface - Alt0 */
static struct usb_interface_descriptor std_as_out_if0_desc = {
.bLength = sizeof std_as_out_if0_desc,
@@ -604,7 +610,8 @@ static struct usb_descriptor_header *ss_audio_desc[] = {
(struct usb_descriptor_header *)&in_feature_unit_desc,
(struct usb_descriptor_header *)&io_out_ot_desc,
- (struct usb_descriptor_header *)&ss_ep_int_desc,
+ (struct usb_descriptor_header *)&ss_ep_int_desc,
+ (struct usb_descriptor_header *)&ss_ep_int_desc_comp,
(struct usb_descriptor_header *)&std_as_out_if0_desc,
(struct usb_descriptor_header *)&std_as_out_if1_desc,
@@ -800,6 +807,7 @@ static void setup_headers(struct f_uac2_opts *opts,
struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *epin_fback_desc_comp = NULL;
+ struct usb_ss_ep_comp_descriptor *ep_int_desc_comp = NULL;
struct usb_endpoint_descriptor *epout_desc;
struct usb_endpoint_descriptor *epin_desc;
struct usb_endpoint_descriptor *epin_fback_desc;
@@ -827,6 +835,7 @@ static void setup_headers(struct f_uac2_opts *opts,
epin_fback_desc = &ss_epin_fback_desc;
epin_fback_desc_comp = &ss_epin_fback_desc_comp;
ep_int_desc = &ss_ep_int_desc;
+ ep_int_desc_comp = &ss_ep_int_desc_comp;
}
i = 0;
@@ -855,8 +864,11 @@ static void setup_headers(struct f_uac2_opts *opts,
if (EPOUT_EN(opts))
headers[i++] = USBDHDR(&io_out_ot_desc);
- if (FUOUT_EN(opts) || FUIN_EN(opts))
+ if (FUOUT_EN(opts) || FUIN_EN(opts)) {
headers[i++] = USBDHDR(ep_int_desc);
+ if (ep_int_desc_comp)
+ headers[i++] = USBDHDR(ep_int_desc_comp);
+ }
if (EPOUT_EN(opts)) {
headers[i++] = USBDHDR(&std_as_out_if0_desc);
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index 03035dbbe97b..208c6a92780a 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -294,8 +294,10 @@ EXPORT_SYMBOL_GPL(fsg_lun_fsync_sub);
void store_cdrom_address(u8 *dest, int msf, u32 addr)
{
if (msf) {
- /* Convert to Minutes-Seconds-Frames */
- addr >>= 2; /* Convert to 2048-byte frames */
+ /*
+ * Convert to Minutes-Seconds-Frames.
+ * Sector size is already set to 2048 bytes.
+ */
addr += 2*75; /* Lead-in occupies 2 seconds */
dest[3] = addr % 75; /* Frames */
addr /= 75;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index cafcf260394c..c63c0c2cf649 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -736,7 +736,10 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
ret = gadget->ops->pullup(gadget, 0);
if (!ret) {
gadget->connected = 0;
- gadget->udc->driver->disconnect(gadget);
+ mutex_lock(&udc_lock);
+ if (gadget->udc->driver)
+ gadget->udc->driver->disconnect(gadget);
+ mutex_unlock(&udc_lock);
}
out:
@@ -1489,7 +1492,6 @@ static int gadget_bind_driver(struct device *dev)
usb_gadget_udc_set_speed(udc, driver->max_speed);
- mutex_lock(&udc_lock);
ret = driver->bind(udc->gadget, driver);
if (ret)
goto err_bind;
@@ -1499,7 +1501,6 @@ static int gadget_bind_driver(struct device *dev)
goto err_start;
usb_gadget_enable_async_callbacks(udc);
usb_udc_connect_control(udc);
- mutex_unlock(&udc_lock);
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
return 0;
@@ -1512,6 +1513,7 @@ static int gadget_bind_driver(struct device *dev)
dev_err(&udc->dev, "failed to start %s: %d\n",
driver->function, ret);
+ mutex_lock(&udc_lock);
udc->driver = NULL;
driver->is_bound = false;
mutex_unlock(&udc_lock);
@@ -1529,7 +1531,6 @@ static void gadget_unbind_driver(struct device *dev)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
- mutex_lock(&udc_lock);
usb_gadget_disconnect(gadget);
usb_gadget_disable_async_callbacks(udc);
if (gadget->irq)
@@ -1537,6 +1538,7 @@ static void gadget_unbind_driver(struct device *dev)
udc->driver->unbind(gadget);
usb_gadget_udc_stop(udc);
+ mutex_lock(&udc_lock);
driver->is_bound = false;
udc->driver = NULL;
mutex_unlock(&udc_lock);
@@ -1612,7 +1614,7 @@ static ssize_t soft_connect_store(struct device *dev,
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
ssize_t ret;
- mutex_lock(&udc_lock);
+ device_lock(&udc->gadget->dev);
if (!udc->driver) {
dev_err(dev, "soft-connect without a gadget driver\n");
ret = -EOPNOTSUPP;
@@ -1633,7 +1635,7 @@ static ssize_t soft_connect_store(struct device *dev,
ret = n;
out:
- mutex_unlock(&udc_lock);
+ device_unlock(&udc->gadget->dev);
return ret;
}
static DEVICE_ATTR_WO(soft_connect);
@@ -1652,11 +1654,15 @@ static ssize_t function_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
- struct usb_gadget_driver *drv = udc->driver;
+ struct usb_gadget_driver *drv;
+ int rc = 0;
- if (!drv || !drv->function)
- return 0;
- return scnprintf(buf, PAGE_SIZE, "%s\n", drv->function);
+ mutex_lock(&udc_lock);
+ drv = udc->driver;
+ if (drv && drv->function)
+ rc = scnprintf(buf, PAGE_SIZE, "%s\n", drv->function);
+ mutex_unlock(&udc_lock);
+ return rc;
}
static DEVICE_ATTR_RO(function);
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index feca826d3f6a..75c2b28b3379 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -203,6 +203,31 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
goto err1;
}
+ /*
+ * According to the "Intel StrongARM SA-1111 Microprocessor Companion
+ * Chip Specification Update" (June 2000), erratum #7, there is a
+ * significant bug in the SA1111 SDRAM shared memory controller. If
+ * an access to a region of memory above 1MB relative to the bank base,
+ * it is important that address bit 10 _NOT_ be asserted. Depending
+ * on the configuration of the RAM, bit 10 may correspond to one
+ * of several different (processor-relative) address bits.
+ *
+ * Section 4.6 of the "Intel StrongARM SA-1111 Development Module
+ * User's Guide" mentions that jumpers R51 and R52 control the
+ * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
+ * SDRAM bank 1 on Neponset). The default configuration selects
+ * Assabet, so any address in bank 1 is necessarily invalid.
+ *
+ * As a workaround, use a bounce buffer in addressable memory
+ * as local_mem, relying on ZONE_DMA to provide an area that
+ * fits within the above constraints.
+ *
+ * SZ_64K is an estimate for what size this might need.
+ */
+ ret = usb_hcd_setup_local_mem(hcd, 0, 0, SZ_64K);
+ if (ret)
+ goto err1;
+
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&dev->dev, "request_mem_region failed\n");
ret = -EBUSY;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0fdc014c9401..4619d5e89d5b 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -652,7 +652,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
* It will release and re-aquire the lock while calling ACPI
* method.
*/
-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
+static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
u16 index, bool on, unsigned long *flags)
__must_hold(&xhci->lock)
{
@@ -1648,6 +1648,17 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
status = bus_state->resuming_ports;
+ /*
+ * SS devices are only visible to roothub after link training completes.
+ * Keep polling roothubs for a grace period after xHC start
+ */
+ if (xhci->run_graceperiod) {
+ if (time_before(jiffies, xhci->run_graceperiod))
+ status = 1;
+ else
+ xhci->run_graceperiod = 0;
+ }
+
mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
/* For each port, did anything change? If so, set that bit in buf. */
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 06a6b19acaae..579899eb24c1 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -425,7 +425,6 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
{
- u32 extra_cs_count;
u32 start_ss, last_ss;
u32 start_cs, last_cs;
@@ -461,18 +460,12 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
if (last_cs > 7)
return -ESCH_CS_OVERFLOW;
- if (sch_ep->ep_type == ISOC_IN_EP)
- extra_cs_count = (last_cs == 7) ? 1 : 2;
- else /* ep_type : INTR IN / INTR OUT */
- extra_cs_count = 1;
-
- cs_count += extra_cs_count;
if (cs_count > 7)
cs_count = 7; /* HW limit */
sch_ep->cs_count = cs_count;
- /* one for ss, the other for idle */
- sch_ep->num_budget_microframes = cs_count + 2;
+ /* ss, idle are ignored */
+ sch_ep->num_budget_microframes = cs_count;
/*
* if interval=1, maxp >752, num_budge_micoframe is larger
@@ -771,8 +764,8 @@ int xhci_mtk_drop_ep(struct usb_hcd *hcd, struct usb_device *udev,
if (ret)
return ret;
- if (ep->hcpriv)
- drop_ep_quirk(hcd, udev, ep);
+ /* needn't check @ep->hcpriv, xhci_endpoint_disable set it NULL */
+ drop_ep_quirk(hcd, udev, ep);
return 0;
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 044855818cb1..a8641b6536ee 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -398,12 +398,17 @@ static int xhci_plat_remove(struct platform_device *dev)
pm_runtime_get_sync(&dev->dev);
xhci->xhc_state |= XHCI_STATE_REMOVING;
- usb_remove_hcd(shared_hcd);
- xhci->shared_hcd = NULL;
+ if (shared_hcd) {
+ usb_remove_hcd(shared_hcd);
+ xhci->shared_hcd = NULL;
+ }
+
usb_phy_shutdown(hcd->usb_phy);
usb_remove_hcd(hcd);
- usb_put_hcd(shared_hcd);
+
+ if (shared_hcd)
+ usb_put_hcd(shared_hcd);
clk_disable_unprepare(clk);
clk_disable_unprepare(reg_clk);
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index a5da02077297..61e93a3540a7 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -28,9 +28,9 @@
DECLARE_EVENT_CLASS(xhci_log_msg,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
- TP_STRUCT__entry(__dynamic_array(char, msg, XHCI_MSG_MAX)),
+ TP_STRUCT__entry(__vstring(msg, vaf->fmt, vaf->va)),
TP_fast_assign(
- vsnprintf(__get_str(msg), XHCI_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 65858f607437..38649284ff88 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -151,9 +151,11 @@ int xhci_start(struct xhci_hcd *xhci)
xhci_err(xhci, "Host took too long to start, "
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
- if (!ret)
+ if (!ret) {
/* clear state flags. Including dying, halted or removing */
xhci->xhc_state = 0;
+ xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
+ }
return ret;
}
@@ -791,8 +793,6 @@ static void xhci_stop(struct usb_hcd *hcd)
void xhci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- unsigned long flags;
- int i;
if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
@@ -808,21 +808,12 @@ void xhci_shutdown(struct usb_hcd *hcd)
del_timer_sync(&xhci->shared_hcd->rh_timer);
}
- spin_lock_irqsave(&xhci->lock, flags);
+ spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
-
- /* Power off USB2 ports*/
- for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
- xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags);
-
- /* Power off USB3 ports*/
- for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
- xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags);
-
/* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
- spin_unlock_irqrestore(&xhci->lock, flags);
+ spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 1960b47acfb2..7caa0db5e826 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1826,7 +1826,7 @@ struct xhci_hcd {
/* Host controller watchdog timer structures */
unsigned int xhc_state;
-
+ unsigned long run_graceperiod;
u32 command;
struct s3_save s3;
/* Host controller is dying - not responding to commands. "I'm not dead yet!"
@@ -2196,8 +2196,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index,
- bool on, unsigned long *flags);
void xhci_hc_died(struct xhci_hcd *xhci);
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index d1df153e7f5a..d63c63942af1 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -71,10 +71,7 @@ static int onboard_hub_power_off(struct onboard_hub *hub)
{
int err;
- if (hub->reset_gpio) {
- gpiod_set_value_cansleep(hub->reset_gpio, 1);
- fsleep(hub->pdata->reset_us);
- }
+ gpiod_set_value_cansleep(hub->reset_gpio, 1);
err = regulator_disable(hub->vdd);
if (err) {
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index dfa0d5ce6012..fcb95fb639e0 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -248,7 +248,7 @@ sisusbcon_init(struct vc_data *c, int init)
*/
kref_get(&sisusb->kref);
- if (!*c->vc_uni_pagedir_loc)
+ if (!*c->uni_pagedict_loc)
con_set_default_unimap(c);
mutex_unlock(&sisusb->lock);
diff --git a/drivers/usb/mtu3/mtu3_trace.h b/drivers/usb/mtu3/mtu3_trace.h
index a09deae1146f..03d2a9bac27e 100644
--- a/drivers/usb/mtu3/mtu3_trace.h
+++ b/drivers/usb/mtu3/mtu3_trace.h
@@ -18,18 +18,16 @@
#include "mtu3.h"
-#define MTU3_MSG_MAX 256
-
TRACE_EVENT(mtu3_log,
TP_PROTO(struct device *dev, struct va_format *vaf),
TP_ARGS(dev, vaf),
TP_STRUCT__entry(
__string(name, dev_name(dev))
- __dynamic_array(char, msg, MTU3_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
- vsnprintf(__get_str(msg), MTU3_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(name), __get_str(msg))
);
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index f906dfd360d3..6c8f7763e75e 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -86,7 +86,7 @@ config USB_MUSB_TUSB6010
tristate "TUSB6010"
depends on HAS_IOMEM
depends on ARCH_OMAP2PLUS || COMPILE_TEST
- depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
+ depends on NOP_USB_XCEIV!=m || USB_MUSB_HDRC=m
config USB_MUSB_OMAP2PLUS
tristate "OMAP2430 and onwards"
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
index ec28b5716796..f246b14394c4 100644
--- a/drivers/usb/musb/musb_trace.h
+++ b/drivers/usb/musb/musb_trace.h
@@ -28,11 +28,11 @@ TRACE_EVENT(musb_log,
TP_ARGS(musb, vaf),
TP_STRUCT__entry(
__string(name, dev_name(musb->controller))
- __dynamic_array(char, msg, MUSB_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(name, dev_name(musb->controller));
- vsnprintf(__get_str(msg), MUSB_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(name), __get_str(msg))
);
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 2798fca71261..af01a462cc43 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -97,7 +97,10 @@ struct ch341_private {
u8 mcr;
u8 msr;
u8 lcr;
+
unsigned long quirks;
+ u8 version;
+
unsigned long break_end;
};
@@ -250,8 +253,12 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev,
/*
* CH341A buffers data until a full endpoint-size packet (32 bytes)
* has been received unless bit 7 is set.
+ *
+ * At least one device with version 0x27 appears to have this bit
+ * inverted.
*/
- val |= BIT(7);
+ if (priv->version > 0x27)
+ val |= BIT(7);
r = ch341_control_out(dev, CH341_REQ_WRITE_REG,
CH341_REG_DIVISOR << 8 | CH341_REG_PRESCALER,
@@ -265,6 +272,9 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev,
* (stop bits, parity and word length). Version 0x30 and above use
* CH341_REG_LCR only and CH341_REG_LCR2 is always set to zero.
*/
+ if (priv->version < 0x30)
+ return 0;
+
r = ch341_control_out(dev, CH341_REQ_WRITE_REG,
CH341_REG_LCR2 << 8 | CH341_REG_LCR, lcr);
if (r)
@@ -308,7 +318,9 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size);
if (r)
return r;
- dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]);
+
+ priv->version = buffer[0];
+ dev_dbg(&dev->dev, "Chip version: 0x%02x\n", priv->version);
r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0);
if (r < 0)
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index c374620a486f..a34957c4b64c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -130,6 +130,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
+ { USB_DEVICE(0x10C4, 0x8414) }, /* Decagon USB Cable Adapter */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index d5a3986dfee7..52d59be92034 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1045,6 +1045,8 @@ static const struct usb_device_id id_table_combined[] = {
/* IDS GmbH devices */
{ USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
{ USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
+ /* Omron devices */
+ { USB_DEVICE(OMRON_VID, OMRON_CS1W_CIF31_PID) },
/* U-Blox devices */
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4e92c165c86b..31c8ccabbbb7 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -662,6 +662,12 @@
#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
/*
+ * Omron corporation (https://www.omron.com)
+ */
+ #define OMRON_VID 0x0590
+ #define OMRON_CS1W_CIF31_PID 0x00b2
+
+/*
* Acton Research Corp.
*/
#define ACTON_VID 0x0647 /* Vendor ID */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index de59fa919540..697683e3fbff 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -253,8 +253,10 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM05G 0x030a
+#define QUECTEL_PRODUCT_EM060K 0x030b
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
+#define QUECTEL_PRODUCT_RM520N 0x0801
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EC200T 0x6026
#define QUECTEL_PRODUCT_RM500K 0x7001
@@ -438,6 +440,8 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9
#define CINTERION_PRODUCT_MV32_WA 0x00f1
#define CINTERION_PRODUCT_MV32_WB 0x00f2
+#define CINTERION_PRODUCT_MV32_WA_RMNET 0x00f3
+#define CINTERION_PRODUCT_MV32_WB_RMNET 0x00f4
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -573,6 +577,10 @@ static void option_instat_callback(struct urb *urb);
#define WETELECOM_PRODUCT_6802 0x6802
#define WETELECOM_PRODUCT_WMD300 0x6803
+/* OPPO products */
+#define OPPO_VENDOR_ID 0x22d9
+#define OPPO_PRODUCT_R11 0x276c
+
/* Device flags */
@@ -1131,6 +1139,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */
+ .driver_info = ZLP },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
@@ -1138,6 +1148,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
.driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
@@ -1149,6 +1162,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
@@ -1993,8 +2009,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
.driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA_RMNET, 0xff),
+ .driver_info = RSVD(0) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
.driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB_RMNET, 0xff),
+ .driver_info = RSVD(0) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
@@ -2155,6 +2175,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
+ { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1a05e3dcfec8..4993227ab293 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2294,6 +2294,13 @@ UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+/* Reported by Witold Lipieta <witold.lipieta@thaumatec.com> */
+UNUSUAL_DEV( 0x1fc9, 0x0117, 0x0100, 0x0100,
+ "NXP Semiconductors",
+ "PN7462AU",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Supplied with some Castlewood ORB removable drives */
UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
"Double-H Technology",
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 4051c8cd0cd8..23ab3b048d9b 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -62,6 +62,13 @@ UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
+/* Reported-by: Tom Hu <huxiaoying@kylinos.cn> */
+UNUSUAL_DEV(0x0b05, 0x1932, 0x0000, 0x9999,
+ "ASUS",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: David Webb <djw@noc.ac.uk> */
UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
"Seagate",
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 5defdfead653..831e7049977d 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -56,6 +56,7 @@ config TYPEC_ANX7411
tristate "Analogix ANX7411 Type-C DRP Port controller driver"
depends on I2C
depends on USB_ROLE_SWITCH
+ depends on POWER_SUPPLY
help
Say Y or M here if your system has Analogix ANX7411 Type-C DRP Port
controller driver.
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index c1d8c23baa39..de66a2949e33 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -99,8 +99,8 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
case DP_STATUS_CON_UFP_D:
case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */
conf |= DP_CONF_UFP_U_AS_UFP_D;
- pin_assign = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo) &
- DP_CAP_UFP_D_PIN_ASSIGN(dp->port->vdo);
+ pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) &
+ DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo);
break;
default:
break;
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index ebc29ec20e3f..bd5e5dd70431 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -2346,6 +2346,7 @@ static void __exit typec_exit(void)
ida_destroy(&typec_index_ida);
bus_unregister(&typec_bus);
class_unregister(&typec_mux_class);
+ class_unregister(&retimer_class);
}
module_exit(typec_exit);
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 47b733f78fb0..a8e273fe204a 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -571,9 +571,11 @@ err_unregister_switch:
static int is_memory(struct acpi_resource *res, void *data)
{
- struct resource r;
+ struct resource_win win = {};
+ struct resource *r = &win.res;
- return !acpi_dev_resource_memory(res, &r);
+ return !(acpi_dev_resource_memory(res, r) ||
+ acpi_dev_resource_address_space(res, &win));
}
/* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */
@@ -583,6 +585,9 @@ static const struct acpi_device_id iom_acpi_ids[] = {
/* AlderLake */
{ "INTC1079", 0x160, },
+
+ /* Meteor Lake */
+ { "INTC107A", 0x160, },
{}
};
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index ea5a917c51b1..904c7b4ce2f0 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -6320,6 +6320,13 @@ static int tcpm_psy_set_prop(struct power_supply *psy,
struct tcpm_port *port = power_supply_get_drvdata(psy);
int ret;
+ /*
+ * All the properties below are related to USB PD. The check needs to be
+ * property specific when a non-pd related property is added.
+ */
+ if (!port->pd_supported)
+ return -EOPNOTSUPP;
+
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
ret = tcpm_psy_set_online(port, val);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 1aea46493b85..7f2624f42724 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1200,32 +1200,6 @@ out_unlock:
return ret;
}
-static void ucsi_unregister_connectors(struct ucsi *ucsi)
-{
- struct ucsi_connector *con;
- int i;
-
- if (!ucsi->connector)
- return;
-
- for (i = 0; i < ucsi->cap.num_connectors; i++) {
- con = &ucsi->connector[i];
-
- if (!con->wq)
- break;
-
- cancel_work_sync(&con->work);
- ucsi_unregister_partner(con);
- ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
- ucsi_unregister_port_psy(con);
- destroy_workqueue(con->wq);
- typec_unregister_port(con->port);
- }
-
- kfree(ucsi->connector);
- ucsi->connector = NULL;
-}
-
/**
* ucsi_init - Initialize UCSI interface
* @ucsi: UCSI to be initialized
@@ -1234,6 +1208,7 @@ static void ucsi_unregister_connectors(struct ucsi *ucsi)
*/
static int ucsi_init(struct ucsi *ucsi)
{
+ struct ucsi_connector *con;
u64 command;
int ret;
int i;
@@ -1264,7 +1239,7 @@ static int ucsi_init(struct ucsi *ucsi)
}
/* Allocate the connectors. Released in ucsi_unregister() */
- ucsi->connector = kcalloc(ucsi->cap.num_connectors,
+ ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
sizeof(*ucsi->connector), GFP_KERNEL);
if (!ucsi->connector) {
ret = -ENOMEM;
@@ -1288,7 +1263,15 @@ static int ucsi_init(struct ucsi *ucsi)
return 0;
err_unregister:
- ucsi_unregister_connectors(ucsi);
+ for (con = ucsi->connector; con->port; con++) {
+ ucsi_unregister_partner(con);
+ ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(con);
+ if (con->wq)
+ destroy_workqueue(con->wq);
+ typec_unregister_port(con->port);
+ con->port = NULL;
+ }
err_reset:
memset(&ucsi->cap, 0, sizeof(ucsi->cap));
@@ -1402,6 +1385,7 @@ EXPORT_SYMBOL_GPL(ucsi_register);
void ucsi_unregister(struct ucsi *ucsi)
{
u64 cmd = UCSI_SET_NOTIFICATION_ENABLE;
+ int i;
/* Make sure that we are not in the middle of driver initialization */
cancel_delayed_work_sync(&ucsi->work);
@@ -1409,7 +1393,18 @@ void ucsi_unregister(struct ucsi *ucsi)
/* Disable notifications */
ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
- ucsi_unregister_connectors(ucsi);
+ for (i = 0; i < ucsi->cap.num_connectors; i++) {
+ cancel_work_sync(&ucsi->connector[i].work);
+ ucsi_unregister_partner(&ucsi->connector[i]);
+ ucsi_unregister_altmodes(&ucsi->connector[i],
+ UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(&ucsi->connector[i]);
+ if (ucsi->connector[i].wq)
+ destroy_workqueue(ucsi->connector[i].wq);
+ typec_unregister_port(ucsi->connector[i].port);
+ }
+
+ kfree(ucsi->connector);
}
EXPORT_SYMBOL_GPL(ucsi_unregister);
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 48c4dadb0c7c..75a703b803a2 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -29,7 +29,6 @@ u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
- cfg = hw->common_cfg;
vp_iowrite16(vector, &cfg->msix_config);
return vp_ioread16(&cfg->msix_config);
@@ -128,6 +127,7 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
break;
case VIRTIO_PCI_CAP_DEVICE_CFG:
hw->dev_cfg = get_cap_addr(hw, &cap);
+ hw->cap_dev_config_size = le32_to_cpu(cap.length);
IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg);
break;
}
@@ -233,15 +233,23 @@ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
{
struct ifcvf_adapter *adapter;
+ u32 net_config_size = sizeof(struct virtio_net_config);
+ u32 blk_config_size = sizeof(struct virtio_blk_config);
+ u32 cap_size = hw->cap_dev_config_size;
u32 config_size;
adapter = vf_to_adapter(hw);
+ /* If the onboard device config space size is greater than
+ * the size of struct virtio_net/blk_config, only the spec
+ * implementing contents size is returned, this is very
+ * unlikely, defensive programming.
+ */
switch (hw->dev_type) {
case VIRTIO_ID_NET:
- config_size = sizeof(struct virtio_net_config);
+ config_size = min(cap_size, net_config_size);
break;
case VIRTIO_ID_BLOCK:
- config_size = sizeof(struct virtio_blk_config);
+ config_size = min(cap_size, blk_config_size);
break;
default:
config_size = 0;
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 115b61f4924b..f5563f665cc6 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -87,6 +87,8 @@ struct ifcvf_hw {
int config_irq;
int vqs_reused_irq;
u16 nr_vring;
+ /* VIRTIO_PCI_CAP_DEVICE_CFG size */
+ u32 cap_dev_config_size;
};
struct ifcvf_adapter {
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 0a5670729412..f9c0044c6442 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -685,7 +685,7 @@ static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_devic
}
/*
- * IFCVF currently does't have on-chip IOMMU, so not
+ * IFCVF currently doesn't have on-chip IOMMU, so not
* implemented set_map()/dma_map()/dma_unmap()
*/
static const struct vdpa_config_ops ifc_vdpa_ops = {
@@ -752,59 +752,36 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
struct ifcvf_adapter *adapter;
+ struct vdpa_device *vdpa_dev;
struct pci_dev *pdev;
struct ifcvf_hw *vf;
- struct device *dev;
- int ret, i;
+ int ret;
ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
- if (ifcvf_mgmt_dev->adapter)
+ if (!ifcvf_mgmt_dev->adapter)
return -EOPNOTSUPP;
- pdev = ifcvf_mgmt_dev->pdev;
- dev = &pdev->dev;
- adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- dev, &ifc_vdpa_ops, 1, 1, name, false);
- if (IS_ERR(adapter)) {
- IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
- return PTR_ERR(adapter);
- }
-
- ifcvf_mgmt_dev->adapter = adapter;
-
+ adapter = ifcvf_mgmt_dev->adapter;
vf = &adapter->vf;
- vf->dev_type = get_dev_type(pdev);
- vf->base = pcim_iomap_table(pdev);
+ pdev = adapter->pdev;
+ vdpa_dev = &adapter->vdpa;
- adapter->pdev = pdev;
- adapter->vdpa.dma_dev = &pdev->dev;
-
- ret = ifcvf_init_hw(vf, pdev);
- if (ret) {
- IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
- goto err;
- }
-
- for (i = 0; i < vf->nr_vring; i++)
- vf->vring[i].irq = -EINVAL;
-
- vf->hw_features = ifcvf_get_hw_features(vf);
- vf->config_size = ifcvf_get_config_size(vf);
+ if (name)
+ ret = dev_set_name(&vdpa_dev->dev, "%s", name);
+ else
+ ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index);
- adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
if (ret) {
+ put_device(&adapter->vdpa.dev);
IFCVF_ERR(pdev, "Failed to register to vDPA bus");
- goto err;
+ return ret;
}
return 0;
-
-err:
- put_device(&adapter->vdpa.dev);
- return ret;
}
+
static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
@@ -823,61 +800,94 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
struct device *dev = &pdev->dev;
+ struct ifcvf_adapter *adapter;
+ struct ifcvf_hw *vf;
u32 dev_type;
- int ret;
-
- ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
- if (!ifcvf_mgmt_dev) {
- IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
- return -ENOMEM;
- }
-
- dev_type = get_dev_type(pdev);
- switch (dev_type) {
- case VIRTIO_ID_NET:
- ifcvf_mgmt_dev->mdev.id_table = id_table_net;
- break;
- case VIRTIO_ID_BLOCK:
- ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
- break;
- default:
- IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
- ret = -EOPNOTSUPP;
- goto err;
- }
-
- ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
- ifcvf_mgmt_dev->mdev.device = dev;
- ifcvf_mgmt_dev->pdev = pdev;
+ int ret, i;
ret = pcim_enable_device(pdev);
if (ret) {
IFCVF_ERR(pdev, "Failed to enable device\n");
- goto err;
+ return ret;
}
-
ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
IFCVF_DRIVER_NAME);
if (ret) {
IFCVF_ERR(pdev, "Failed to request MMIO region\n");
- goto err;
+ return ret;
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
IFCVF_ERR(pdev, "No usable DMA configuration\n");
- goto err;
+ return ret;
}
ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
if (ret) {
IFCVF_ERR(pdev,
"Failed for adding devres for freeing irq vectors\n");
- goto err;
+ return ret;
}
pci_set_master(pdev);
+ adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
+ dev, &ifc_vdpa_ops, 1, 1, NULL, false);
+ if (IS_ERR(adapter)) {
+ IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
+ return PTR_ERR(adapter);
+ }
+
+ vf = &adapter->vf;
+ vf->dev_type = get_dev_type(pdev);
+ vf->base = pcim_iomap_table(pdev);
+
+ adapter->pdev = pdev;
+ adapter->vdpa.dma_dev = &pdev->dev;
+
+ ret = ifcvf_init_hw(vf, pdev);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
+ return ret;
+ }
+
+ for (i = 0; i < vf->nr_vring; i++)
+ vf->vring[i].irq = -EINVAL;
+
+ vf->hw_features = ifcvf_get_hw_features(vf);
+ vf->config_size = ifcvf_get_config_size(vf);
+
+ ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
+ if (!ifcvf_mgmt_dev) {
+ IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
+ return -ENOMEM;
+ }
+
+ ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
+ ifcvf_mgmt_dev->mdev.device = dev;
+ ifcvf_mgmt_dev->adapter = adapter;
+
+ dev_type = get_dev_type(pdev);
+ switch (dev_type) {
+ case VIRTIO_ID_NET:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_net;
+ break;
+ case VIRTIO_ID_BLOCK:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
+ break;
+ default:
+ IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring;
+ ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features;
+
+ adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
+
+
ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
if (ret) {
IFCVF_ERR(pdev,
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 44104093163b..6af9fdbb86b7 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -70,6 +70,16 @@ struct mlx5_vdpa_wq_ent {
struct mlx5_vdpa_dev *mvdev;
};
+enum {
+ MLX5_VDPA_DATAVQ_GROUP,
+ MLX5_VDPA_CVQ_GROUP,
+ MLX5_VDPA_NUMVQ_GROUPS
+};
+
+enum {
+ MLX5_VDPA_NUM_AS = MLX5_VDPA_NUMVQ_GROUPS
+};
+
struct mlx5_vdpa_dev {
struct vdpa_device vdev;
struct mlx5_core_dev *mdev;
@@ -85,6 +95,7 @@ struct mlx5_vdpa_dev {
struct mlx5_vdpa_mr mr;
struct mlx5_control_vq cvq;
struct workqueue_struct *wq;
+ unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
};
int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index e85c1d71f4ed..ed100a35e596 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -164,6 +164,7 @@ struct mlx5_vdpa_net {
bool setup;
u32 cur_num_vqs;
u32 rqt_size;
+ bool nb_registered;
struct notifier_block nb;
struct vdpa_callback config_cb;
struct mlx5_vdpa_wq_ent cvq_ent;
@@ -895,6 +896,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
if (err)
goto err_cmd;
+ mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT;
kfree(in);
mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
@@ -922,6 +924,7 @@ static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtq
mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id);
return;
}
+ mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE;
umems_destroy(ndev, mvq);
}
@@ -1121,6 +1124,20 @@ err_cmd:
return err;
}
+static bool is_valid_state_change(int oldstate, int newstate)
+{
+ switch (oldstate) {
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT:
+ return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY;
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
+ return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND;
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR:
+ default:
+ return false;
+ }
+}
+
static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state)
{
int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
@@ -1130,6 +1147,12 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
void *in;
int err;
+ if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE)
+ return 0;
+
+ if (!is_valid_state_change(mvq->fw_state, state))
+ return -EINVAL;
+
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1440,7 +1463,7 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
- memset(dmac_c, 0xff, ETH_ALEN);
+ eth_broadcast_addr(dmac_c);
ether_addr_copy(dmac_v, mac);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
if (tagged) {
@@ -1992,6 +2015,7 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
+ int err;
if (!mvdev->actual_features)
return;
@@ -2005,8 +2029,16 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
}
mvq = &ndev->vqs[idx];
- if (!ready)
+ if (!ready) {
suspend_vq(ndev, mvq);
+ } else {
+ err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "modify VQ %d to ready failed (%d)\n", idx, err);
+ ready = false;
+ }
+ }
+
mvq->ready = ready;
}
@@ -2095,9 +2127,14 @@ static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
return PAGE_SIZE;
}
-static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdev, u16 idx)
{
- return 0;
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+ if (is_ctrl_vq_idx(mvdev, idx))
+ return MLX5_VDPA_CVQ_GROUP;
+
+ return MLX5_VDPA_DATAVQ_GROUP;
}
enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
@@ -2511,6 +2548,15 @@ err_clear:
up_write(&ndev->reslock);
}
+static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
+{
+ int i;
+
+ /* default mapping all groups are mapped to asid 0 */
+ for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++)
+ mvdev->group2asid[i] = 0;
+}
+
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -2529,7 +2575,9 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
ndev->mvdev.cvq.completed_desc = 0;
memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
ndev->mvdev.actual_features = 0;
+ init_group_to_asid_map(mvdev);
++mvdev->generation;
+
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
if (mlx5_vdpa_create_mr(mvdev, NULL))
mlx5_vdpa_warn(mvdev, "create MR failed\n");
@@ -2567,26 +2615,63 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
return mvdev->generation;
}
-static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
- struct vhost_iotlb *iotlb)
+static int set_map_control(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ u64 start = 0ULL, last = 0ULL - 1;
+ struct vhost_iotlb_map *map;
+ int err = 0;
+
+ spin_lock(&mvdev->cvq.iommu_lock);
+ vhost_iotlb_reset(mvdev->cvq.iotlb);
+
+ for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start,
+ map->last, map->addr, map->perm);
+ if (err)
+ goto out;
+ }
+
+out:
+ spin_unlock(&mvdev->cvq.iommu_lock);
+ return err;
+}
+
+static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
- struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
- struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
bool change_map;
int err;
- down_write(&ndev->reslock);
-
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
if (err) {
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
- goto err;
+ return err;
}
if (change_map)
err = mlx5_vdpa_change_map(mvdev, iotlb);
-err:
+ return err;
+}
+
+static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
+ struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ int err = -EINVAL;
+
+ down_write(&ndev->reslock);
+ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+ err = set_map_data(mvdev, iotlb);
+ if (err)
+ goto out;
+ }
+
+ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid)
+ err = set_map_control(mvdev, iotlb);
+
+out:
up_write(&ndev->reslock);
return err;
}
@@ -2733,6 +2818,49 @@ out_err:
return err;
}
+static void mlx5_vdpa_cvq_suspend(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_control_vq *cvq;
+
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+ return;
+
+ cvq = &mvdev->cvq;
+ cvq->ready = false;
+}
+
+static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq;
+ int i;
+
+ down_write(&ndev->reslock);
+ mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ ndev->nb_registered = false;
+ flush_workqueue(ndev->mvdev.wq);
+ for (i = 0; i < ndev->cur_num_vqs; i++) {
+ mvq = &ndev->vqs[i];
+ suspend_vq(ndev, mvq);
+ }
+ mlx5_vdpa_cvq_suspend(mvdev);
+ up_write(&ndev->reslock);
+ return 0;
+}
+
+static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
+ unsigned int asid)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+ if (group >= MLX5_VDPA_NUMVQ_GROUPS)
+ return -EINVAL;
+
+ mvdev->group2asid[group] = asid;
+ return 0;
+}
+
static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_vq_address = mlx5_vdpa_set_vq_address,
.set_vq_num = mlx5_vdpa_set_vq_num,
@@ -2762,7 +2890,9 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_config = mlx5_vdpa_set_config,
.get_generation = mlx5_vdpa_get_generation,
.set_map = mlx5_vdpa_set_map,
+ .set_group_asid = mlx5_set_group_asid,
.free = mlx5_vdpa_free,
+ .suspend = mlx5_vdpa_suspend,
};
static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
@@ -2828,6 +2958,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
mvq->index = i;
mvq->ndev = ndev;
mvq->fwqp.fw = true;
+ mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE;
}
for (; i < ndev->mvdev.max_vqs; i++) {
mvq = &ndev->vqs[i];
@@ -2902,13 +3033,21 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
switch (eqe->sub_type) {
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+ down_read(&ndev->reslock);
+ if (!ndev->nb_registered) {
+ up_read(&ndev->reslock);
+ return NOTIFY_DONE;
+ }
wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
- if (!wqent)
+ if (!wqent) {
+ up_read(&ndev->reslock);
return NOTIFY_DONE;
+ }
wqent->mvdev = &ndev->mvdev;
INIT_WORK(&wqent->work, update_carrier);
queue_work(ndev->mvdev.wq, &wqent->work);
+ up_read(&ndev->reslock);
ret = NOTIFY_OK;
break;
default:
@@ -2982,7 +3121,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
}
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
- 1, 1, name, false);
+ MLX5_VDPA_NUMVQ_GROUPS, MLX5_VDPA_NUM_AS, name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
@@ -3062,6 +3201,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->nb.notifier_call = event_handler;
mlx5_notifier_register(mdev, &ndev->nb);
+ ndev->nb_registered = true;
mvdev->vdev.mdev = &mgtdev->mgtdev;
err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
if (err)
@@ -3093,7 +3233,10 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct workqueue_struct *wq;
- mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ if (ndev->nb_registered) {
+ mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ ndev->nb_registered = false;
+ }
wq = mvdev->wq;
mvdev->wq = NULL;
destroy_workqueue(wq);
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index ebf2f363fbe7..c06c02704461 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -824,11 +824,11 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
config.mac))
return -EMSGSIZE;
- val_u16 = le16_to_cpu(config.status);
+ val_u16 = __virtio16_to_cpu(true, config.status);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
return -EMSGSIZE;
- val_u16 = le16_to_cpu(config.mtu);
+ val_u16 = __virtio16_to_cpu(true, config.mtu);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
return -EMSGSIZE;
@@ -846,17 +846,9 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
{
u32 device_id;
void *hdr;
- u8 status;
int err;
down_read(&vdev->cf_lock);
- status = vdev->config->get_status(vdev);
- if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
- NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
- err = -EAGAIN;
- goto out;
- }
-
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
VDPA_CMD_DEV_CONFIG_GET);
if (!hdr) {
@@ -913,7 +905,7 @@ static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
}
vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
- max_vqp = le16_to_cpu(config.max_virtqueue_pairs);
+ max_vqp = __virtio16_to_cpu(true, config.max_virtqueue_pairs);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
return -EMSGSIZE;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 0f2865899647..225b7f5d8be3 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -33,7 +33,7 @@ MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
static int max_iotlb_entries = 2048;
module_param(max_iotlb_entries, int, 0444);
MODULE_PARM_DESC(max_iotlb_entries,
- "Maximum number of iotlb entries. 0 means unlimited. (default: 2048)");
+ "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
#define VDPASIM_QUEUE_MAX 256
@@ -107,6 +107,7 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
for (i = 0; i < vdpasim->dev_attr.nas; i++)
vhost_iotlb_reset(&vdpasim->iommu[i]);
+ vdpasim->running = true;
spin_unlock(&vdpasim->iommu_lock);
vdpasim->features = 0;
@@ -291,7 +292,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
goto err_iommu;
for (i = 0; i < vdpasim->dev_attr.nas; i++)
- vhost_iotlb_init(&vdpasim->iommu[i], 0, 0);
+ vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
if (!vdpasim->buffer)
@@ -505,6 +506,17 @@ static int vdpasim_reset(struct vdpa_device *vdpa)
return 0;
}
+static int vdpasim_suspend(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ spin_lock(&vdpasim->lock);
+ vdpasim->running = false;
+ spin_unlock(&vdpasim->lock);
+
+ return 0;
+}
+
static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -694,6 +706,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
.reset = vdpasim_reset,
+ .suspend = vdpasim_suspend,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
@@ -726,6 +739,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
.reset = vdpasim_reset,
+ .suspend = vdpasim_suspend,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
index 622782e92239..061986f30911 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -66,6 +66,7 @@ struct vdpasim {
u32 generation;
u64 features;
u32 groups;
+ bool running;
/* spinlock to synchronize iommu table */
spinlock_t iommu_lock;
};
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index 42d401d43911..c8bfea3b7db2 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -25,31 +25,49 @@
#define DRV_LICENSE "GPL v2"
#define VDPASIM_BLK_FEATURES (VDPASIM_FEATURES | \
+ (1ULL << VIRTIO_BLK_F_FLUSH) | \
(1ULL << VIRTIO_BLK_F_SIZE_MAX) | \
(1ULL << VIRTIO_BLK_F_SEG_MAX) | \
(1ULL << VIRTIO_BLK_F_BLK_SIZE) | \
(1ULL << VIRTIO_BLK_F_TOPOLOGY) | \
- (1ULL << VIRTIO_BLK_F_MQ))
+ (1ULL << VIRTIO_BLK_F_MQ) | \
+ (1ULL << VIRTIO_BLK_F_DISCARD) | \
+ (1ULL << VIRTIO_BLK_F_WRITE_ZEROES))
#define VDPASIM_BLK_CAPACITY 0x40000
#define VDPASIM_BLK_SIZE_MAX 0x1000
#define VDPASIM_BLK_SEG_MAX 32
+#define VDPASIM_BLK_DWZ_MAX_SECTORS UINT_MAX
+
+/* 1 virtqueue, 1 address space, 1 virtqueue group */
#define VDPASIM_BLK_VQ_NUM 1
+#define VDPASIM_BLK_AS_NUM 1
+#define VDPASIM_BLK_GROUP_NUM 1
static char vdpasim_blk_id[VIRTIO_BLK_ID_BYTES] = "vdpa_blk_sim";
-static bool vdpasim_blk_check_range(u64 start_sector, size_t range_size)
+static bool vdpasim_blk_check_range(struct vdpasim *vdpasim, u64 start_sector,
+ u64 num_sectors, u64 max_sectors)
{
- u64 range_sectors = range_size >> SECTOR_SHIFT;
-
- if (range_size > VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)
- return false;
+ if (start_sector > VDPASIM_BLK_CAPACITY) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "starting sector exceeds the capacity - start: 0x%llx capacity: 0x%x\n",
+ start_sector, VDPASIM_BLK_CAPACITY);
+ }
- if (start_sector > VDPASIM_BLK_CAPACITY)
+ if (num_sectors > max_sectors) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "number of sectors exceeds the max allowed in a request - num: 0x%llx max: 0x%llx\n",
+ num_sectors, max_sectors);
return false;
+ }
- if (range_sectors > VDPASIM_BLK_CAPACITY - start_sector)
+ if (num_sectors > VDPASIM_BLK_CAPACITY - start_sector) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "request exceeds the capacity - start: 0x%llx num: 0x%llx capacity: 0x%x\n",
+ start_sector, num_sectors, VDPASIM_BLK_CAPACITY);
return false;
+ }
return true;
}
@@ -63,6 +81,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
{
size_t pushed = 0, to_pull, to_push;
struct virtio_blk_outhdr hdr;
+ bool handled = false;
ssize_t bytes;
loff_t offset;
u64 sector;
@@ -76,14 +95,14 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
return false;
if (vq->out_iov.used < 1 || vq->in_iov.used < 1) {
- dev_err(&vdpasim->vdpa.dev, "missing headers - out_iov: %u in_iov %u\n",
+ dev_dbg(&vdpasim->vdpa.dev, "missing headers - out_iov: %u in_iov %u\n",
vq->out_iov.used, vq->in_iov.used);
- return false;
+ goto err;
}
if (vq->in_iov.iov[vq->in_iov.used - 1].iov_len < 1) {
- dev_err(&vdpasim->vdpa.dev, "request in header too short\n");
- return false;
+ dev_dbg(&vdpasim->vdpa.dev, "request in header too short\n");
+ goto err;
}
/* The last byte is the status and we checked if the last iov has
@@ -96,8 +115,8 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &hdr,
sizeof(hdr));
if (bytes != sizeof(hdr)) {
- dev_err(&vdpasim->vdpa.dev, "request out header too short\n");
- return false;
+ dev_dbg(&vdpasim->vdpa.dev, "request out header too short\n");
+ goto err;
}
to_pull -= bytes;
@@ -107,12 +126,20 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
offset = sector << SECTOR_SHIFT;
status = VIRTIO_BLK_S_OK;
+ if (type != VIRTIO_BLK_T_IN && type != VIRTIO_BLK_T_OUT &&
+ sector != 0) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "sector must be 0 for %u request - sector: 0x%llx\n",
+ type, sector);
+ status = VIRTIO_BLK_S_IOERR;
+ goto err_status;
+ }
+
switch (type) {
case VIRTIO_BLK_T_IN:
- if (!vdpasim_blk_check_range(sector, to_push)) {
- dev_err(&vdpasim->vdpa.dev,
- "reading over the capacity - offset: 0x%llx len: 0x%zx\n",
- offset, to_push);
+ if (!vdpasim_blk_check_range(vdpasim, sector,
+ to_push >> SECTOR_SHIFT,
+ VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)) {
status = VIRTIO_BLK_S_IOERR;
break;
}
@@ -121,7 +148,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
vdpasim->buffer + offset,
to_push);
if (bytes < 0) {
- dev_err(&vdpasim->vdpa.dev,
+ dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_push_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
bytes, offset, to_push);
status = VIRTIO_BLK_S_IOERR;
@@ -132,10 +159,9 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
break;
case VIRTIO_BLK_T_OUT:
- if (!vdpasim_blk_check_range(sector, to_pull)) {
- dev_err(&vdpasim->vdpa.dev,
- "writing over the capacity - offset: 0x%llx len: 0x%zx\n",
- offset, to_pull);
+ if (!vdpasim_blk_check_range(vdpasim, sector,
+ to_pull >> SECTOR_SHIFT,
+ VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)) {
status = VIRTIO_BLK_S_IOERR;
break;
}
@@ -144,7 +170,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
vdpasim->buffer + offset,
to_pull);
if (bytes < 0) {
- dev_err(&vdpasim->vdpa.dev,
+ dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
bytes, offset, to_pull);
status = VIRTIO_BLK_S_IOERR;
@@ -157,7 +183,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
vdpasim_blk_id,
VIRTIO_BLK_ID_BYTES);
if (bytes < 0) {
- dev_err(&vdpasim->vdpa.dev,
+ dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_push_iotlb() error: %zd\n", bytes);
status = VIRTIO_BLK_S_IOERR;
break;
@@ -166,13 +192,76 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
pushed += bytes;
break;
+ case VIRTIO_BLK_T_FLUSH:
+ /* nothing to do */
+ break;
+
+ case VIRTIO_BLK_T_DISCARD:
+ case VIRTIO_BLK_T_WRITE_ZEROES: {
+ struct virtio_blk_discard_write_zeroes range;
+ u32 num_sectors, flags;
+
+ if (to_pull != sizeof(range)) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "discard/write_zeroes header len: 0x%zx [expected: 0x%zx]\n",
+ to_pull, sizeof(range));
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &range,
+ to_pull);
+ if (bytes < 0) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
+ bytes, offset, to_pull);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ sector = le64_to_cpu(range.sector);
+ offset = sector << SECTOR_SHIFT;
+ num_sectors = le32_to_cpu(range.num_sectors);
+ flags = le32_to_cpu(range.flags);
+
+ if (type == VIRTIO_BLK_T_DISCARD && flags != 0) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "discard unexpected flags set - flags: 0x%x\n",
+ flags);
+ status = VIRTIO_BLK_S_UNSUPP;
+ break;
+ }
+
+ if (type == VIRTIO_BLK_T_WRITE_ZEROES &&
+ flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "write_zeroes unexpected flags set - flags: 0x%x\n",
+ flags);
+ status = VIRTIO_BLK_S_UNSUPP;
+ break;
+ }
+
+ if (!vdpasim_blk_check_range(vdpasim, sector, num_sectors,
+ VDPASIM_BLK_DWZ_MAX_SECTORS)) {
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ if (type == VIRTIO_BLK_T_WRITE_ZEROES) {
+ memset(vdpasim->buffer + offset, 0,
+ num_sectors << SECTOR_SHIFT);
+ }
+
+ break;
+ }
default:
- dev_warn(&vdpasim->vdpa.dev,
- "Unsupported request type %d\n", type);
+ dev_dbg(&vdpasim->vdpa.dev,
+ "Unsupported request type %d\n", type);
status = VIRTIO_BLK_S_IOERR;
break;
}
+err_status:
/* If some operations fail, we need to skip the remaining bytes
* to put the status in the last byte
*/
@@ -182,21 +271,25 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
/* Last byte is the status */
bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, &status, 1);
if (bytes != 1)
- return false;
+ goto err;
pushed += bytes;
/* Make sure data is wrote before advancing index */
smp_wmb();
+ handled = true;
+
+err:
vringh_complete_iotlb(&vq->vring, vq->head, pushed);
- return true;
+ return handled;
}
static void vdpasim_blk_work(struct work_struct *work)
{
struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
+ bool reschedule = false;
int i;
spin_lock(&vdpasim->lock);
@@ -204,8 +297,12 @@ static void vdpasim_blk_work(struct work_struct *work)
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
+ if (!vdpasim->running)
+ goto out;
+
for (i = 0; i < VDPASIM_BLK_VQ_NUM; i++) {
struct vdpasim_virtqueue *vq = &vdpasim->vqs[i];
+ int reqs = 0;
if (!vq->ready)
continue;
@@ -218,10 +315,18 @@ static void vdpasim_blk_work(struct work_struct *work)
if (vringh_need_notify_iotlb(&vq->vring) > 0)
vringh_notify(&vq->vring);
local_bh_enable();
+
+ if (++reqs > 4) {
+ reschedule = true;
+ break;
+ }
}
}
out:
spin_unlock(&vdpasim->lock);
+
+ if (reschedule)
+ schedule_work(&vdpasim->work);
}
static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config)
@@ -237,6 +342,17 @@ static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config)
blk_config->min_io_size = cpu_to_vdpasim16(vdpasim, 1);
blk_config->opt_io_size = cpu_to_vdpasim32(vdpasim, 1);
blk_config->blk_size = cpu_to_vdpasim32(vdpasim, SECTOR_SIZE);
+ /* VIRTIO_BLK_F_DISCARD */
+ blk_config->discard_sector_alignment =
+ cpu_to_vdpasim32(vdpasim, SECTOR_SIZE);
+ blk_config->max_discard_sectors =
+ cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_DWZ_MAX_SECTORS);
+ blk_config->max_discard_seg = cpu_to_vdpasim32(vdpasim, 1);
+ /* VIRTIO_BLK_F_WRITE_ZEROES */
+ blk_config->max_write_zeroes_sectors =
+ cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_DWZ_MAX_SECTORS);
+ blk_config->max_write_zeroes_seg = cpu_to_vdpasim32(vdpasim, 1);
+
}
static void vdpasim_blk_mgmtdev_release(struct device *dev)
@@ -260,6 +376,8 @@ static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
dev_attr.id = VIRTIO_ID_BLOCK;
dev_attr.supported_features = VDPASIM_BLK_FEATURES;
dev_attr.nvqs = VDPASIM_BLK_VQ_NUM;
+ dev_attr.ngroups = VDPASIM_BLK_GROUP_NUM;
+ dev_attr.nas = VDPASIM_BLK_AS_NUM;
dev_attr.config_size = sizeof(struct virtio_blk_config);
dev_attr.get_config = vdpasim_blk_get_config;
dev_attr.work_fn = vdpasim_blk_work;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index 5125976a4df8..886449e88502 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -154,6 +154,9 @@ static void vdpasim_net_work(struct work_struct *work)
spin_lock(&vdpasim->lock);
+ if (!vdpasim->running)
+ goto out;
+
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 6daa3978d290..e682bc7ee6c9 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -138,18 +138,17 @@ static void do_bounce(phys_addr_t orig, void *addr, size_t size,
{
unsigned long pfn = PFN_DOWN(orig);
unsigned int offset = offset_in_page(orig);
- char *buffer;
+ struct page *page;
unsigned int sz = 0;
while (size) {
sz = min_t(size_t, PAGE_SIZE - offset, size);
- buffer = kmap_atomic(pfn_to_page(pfn));
+ page = pfn_to_page(pfn);
if (dir == DMA_TO_DEVICE)
- memcpy(addr, buffer + offset, sz);
+ memcpy_from_page(addr, page, offset, sz);
else
- memcpy(buffer + offset, addr, sz);
- kunmap_atomic(buffer);
+ memcpy_to_page(page, offset, addr, sz);
size -= sz;
pfn++;
@@ -179,8 +178,9 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
map->orig_phys == INVALID_PHYS_ADDR))
return;
- addr = page_address(map->bounce_page) + offset;
- do_bounce(map->orig_phys + offset, addr, sz, dir);
+ addr = kmap_local_page(map->bounce_page);
+ do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
+ kunmap_local(addr);
size -= sz;
iova += sz;
}
@@ -213,21 +213,21 @@ vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
struct vduse_bounce_map *map;
struct page *page = NULL;
- spin_lock(&domain->iotlb_lock);
+ read_lock(&domain->bounce_lock);
map = &domain->bounce_maps[iova >> PAGE_SHIFT];
- if (!map->bounce_page)
+ if (domain->user_bounce_pages || !map->bounce_page)
goto out;
page = map->bounce_page;
get_page(page);
out:
- spin_unlock(&domain->iotlb_lock);
+ read_unlock(&domain->bounce_lock);
return page;
}
static void
-vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
+vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain)
{
struct vduse_bounce_map *map;
unsigned long pfn, bounce_pfns;
@@ -247,6 +247,73 @@ vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
}
}
+int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
+ struct page **pages, int count)
+{
+ struct vduse_bounce_map *map;
+ int i, ret;
+
+ /* Now we don't support partial mapping */
+ if (count != (domain->bounce_size >> PAGE_SHIFT))
+ return -EINVAL;
+
+ write_lock(&domain->bounce_lock);
+ ret = -EEXIST;
+ if (domain->user_bounce_pages)
+ goto out;
+
+ for (i = 0; i < count; i++) {
+ map = &domain->bounce_maps[i];
+ if (map->bounce_page) {
+ /* Copy kernel page to user page if it's in use */
+ if (map->orig_phys != INVALID_PHYS_ADDR)
+ memcpy_to_page(pages[i], 0,
+ page_address(map->bounce_page),
+ PAGE_SIZE);
+ __free_page(map->bounce_page);
+ }
+ map->bounce_page = pages[i];
+ get_page(pages[i]);
+ }
+ domain->user_bounce_pages = true;
+ ret = 0;
+out:
+ write_unlock(&domain->bounce_lock);
+
+ return ret;
+}
+
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
+{
+ struct vduse_bounce_map *map;
+ unsigned long i, count;
+
+ write_lock(&domain->bounce_lock);
+ if (!domain->user_bounce_pages)
+ goto out;
+
+ count = domain->bounce_size >> PAGE_SHIFT;
+ for (i = 0; i < count; i++) {
+ struct page *page = NULL;
+
+ map = &domain->bounce_maps[i];
+ if (WARN_ON(!map->bounce_page))
+ continue;
+
+ /* Copy user page to kernel page if it's in use */
+ if (map->orig_phys != INVALID_PHYS_ADDR) {
+ page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
+ memcpy_from_page(page_address(page),
+ map->bounce_page, 0, PAGE_SIZE);
+ }
+ put_page(map->bounce_page);
+ map->bounce_page = page;
+ }
+ domain->user_bounce_pages = false;
+out:
+ write_unlock(&domain->bounce_lock);
+}
+
void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain)
{
if (!domain->bounce_map)
@@ -322,13 +389,18 @@ dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
if (vduse_domain_init_bounce_map(domain))
goto err;
+ read_lock(&domain->bounce_lock);
if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
- goto err;
+ goto err_unlock;
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
+ read_unlock(&domain->bounce_lock);
+
return iova;
+err_unlock:
+ read_unlock(&domain->bounce_lock);
err:
vduse_domain_free_iova(iovad, iova, size);
return DMA_MAPPING_ERROR;
@@ -340,10 +412,12 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
{
struct iova_domain *iovad = &domain->stream_iovad;
+ read_lock(&domain->bounce_lock);
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
+ read_unlock(&domain->bounce_lock);
vduse_domain_free_iova(iovad, dma_addr, size);
}
@@ -451,7 +525,8 @@ static int vduse_domain_release(struct inode *inode, struct file *file)
spin_lock(&domain->iotlb_lock);
vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
- vduse_domain_free_bounce_pages(domain);
+ vduse_domain_remove_user_bounce_pages(domain);
+ vduse_domain_free_kernel_bounce_pages(domain);
spin_unlock(&domain->iotlb_lock);
put_iova_domain(&domain->stream_iovad);
put_iova_domain(&domain->consistent_iovad);
@@ -511,6 +586,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
goto err_file;
domain->file = file;
+ rwlock_init(&domain->bounce_lock);
spin_lock_init(&domain->iotlb_lock);
init_iova_domain(&domain->stream_iovad,
PAGE_SIZE, IOVA_START_PFN);
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index 2722d9b8e21a..4e0e50e7ac15 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -14,6 +14,7 @@
#include <linux/iova.h>
#include <linux/dma-mapping.h>
#include <linux/vhost_iotlb.h>
+#include <linux/rwlock.h>
#define IOVA_START_PFN 1
@@ -34,6 +35,8 @@ struct vduse_iova_domain {
struct vhost_iotlb *iotlb;
spinlock_t iotlb_lock;
struct file *file;
+ bool user_bounce_pages;
+ rwlock_t bounce_lock;
};
int vduse_domain_set_map(struct vduse_iova_domain *domain,
@@ -61,6 +64,11 @@ void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
+int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
+ struct page **pages, int count);
+
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
+
void vduse_domain_destroy(struct vduse_iova_domain *domain);
struct vduse_iova_domain *vduse_domain_create(unsigned long iova_limit,
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 3bc27de58f46..41c0b29739f1 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -21,6 +21,8 @@
#include <linux/uio.h>
#include <linux/vdpa.h>
#include <linux/nospec.h>
+#include <linux/vmalloc.h>
+#include <linux/sched/mm.h>
#include <uapi/linux/vduse.h>
#include <uapi/linux/vdpa.h>
#include <uapi/linux/virtio_config.h>
@@ -64,6 +66,13 @@ struct vduse_vdpa {
struct vduse_dev *dev;
};
+struct vduse_umem {
+ unsigned long iova;
+ unsigned long npages;
+ struct page **pages;
+ struct mm_struct *mm;
+};
+
struct vduse_dev {
struct vduse_vdpa *vdev;
struct device *dev;
@@ -95,6 +104,8 @@ struct vduse_dev {
u8 status;
u32 vq_num;
u32 vq_align;
+ struct vduse_umem *umem;
+ struct mutex mem_lock;
};
struct vduse_dev_msg {
@@ -917,6 +928,102 @@ unlock:
return ret;
}
+static int vduse_dev_dereg_umem(struct vduse_dev *dev,
+ u64 iova, u64 size)
+{
+ int ret;
+
+ mutex_lock(&dev->mem_lock);
+ ret = -ENOENT;
+ if (!dev->umem)
+ goto unlock;
+
+ ret = -EINVAL;
+ if (dev->umem->iova != iova || size != dev->domain->bounce_size)
+ goto unlock;
+
+ vduse_domain_remove_user_bounce_pages(dev->domain);
+ unpin_user_pages_dirty_lock(dev->umem->pages,
+ dev->umem->npages, true);
+ atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);
+ mmdrop(dev->umem->mm);
+ vfree(dev->umem->pages);
+ kfree(dev->umem);
+ dev->umem = NULL;
+ ret = 0;
+unlock:
+ mutex_unlock(&dev->mem_lock);
+ return ret;
+}
+
+static int vduse_dev_reg_umem(struct vduse_dev *dev,
+ u64 iova, u64 uaddr, u64 size)
+{
+ struct page **page_list = NULL;
+ struct vduse_umem *umem = NULL;
+ long pinned = 0;
+ unsigned long npages, lock_limit;
+ int ret;
+
+ if (!dev->domain->bounce_map ||
+ size != dev->domain->bounce_size ||
+ iova != 0 || uaddr & ~PAGE_MASK)
+ return -EINVAL;
+
+ mutex_lock(&dev->mem_lock);
+ ret = -EEXIST;
+ if (dev->umem)
+ goto unlock;
+
+ ret = -ENOMEM;
+ npages = size >> PAGE_SHIFT;
+ page_list = __vmalloc(array_size(npages, sizeof(struct page *)),
+ GFP_KERNEL_ACCOUNT);
+ umem = kzalloc(sizeof(*umem), GFP_KERNEL);
+ if (!page_list || !umem)
+ goto unlock;
+
+ mmap_read_lock(current->mm);
+
+ lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
+ if (npages + atomic64_read(&current->mm->pinned_vm) > lock_limit)
+ goto out;
+
+ pinned = pin_user_pages(uaddr, npages, FOLL_LONGTERM | FOLL_WRITE,
+ page_list, NULL);
+ if (pinned != npages) {
+ ret = pinned < 0 ? pinned : -ENOMEM;
+ goto out;
+ }
+
+ ret = vduse_domain_add_user_bounce_pages(dev->domain,
+ page_list, pinned);
+ if (ret)
+ goto out;
+
+ atomic64_add(npages, &current->mm->pinned_vm);
+
+ umem->pages = page_list;
+ umem->npages = pinned;
+ umem->iova = iova;
+ umem->mm = current->mm;
+ mmgrab(current->mm);
+
+ dev->umem = umem;
+out:
+ if (ret && pinned > 0)
+ unpin_user_pages(page_list, pinned);
+
+ mmap_read_unlock(current->mm);
+unlock:
+ if (ret) {
+ vfree(page_list);
+ kfree(umem);
+ }
+ mutex_unlock(&dev->mem_lock);
+ return ret;
+}
+
static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -1089,6 +1196,77 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
ret = vduse_dev_queue_irq_work(dev, &dev->vqs[index].inject);
break;
}
+ case VDUSE_IOTLB_REG_UMEM: {
+ struct vduse_iova_umem umem;
+
+ ret = -EFAULT;
+ if (copy_from_user(&umem, argp, sizeof(umem)))
+ break;
+
+ ret = -EINVAL;
+ if (!is_mem_zero((const char *)umem.reserved,
+ sizeof(umem.reserved)))
+ break;
+
+ ret = vduse_dev_reg_umem(dev, umem.iova,
+ umem.uaddr, umem.size);
+ break;
+ }
+ case VDUSE_IOTLB_DEREG_UMEM: {
+ struct vduse_iova_umem umem;
+
+ ret = -EFAULT;
+ if (copy_from_user(&umem, argp, sizeof(umem)))
+ break;
+
+ ret = -EINVAL;
+ if (!is_mem_zero((const char *)umem.reserved,
+ sizeof(umem.reserved)))
+ break;
+
+ ret = vduse_dev_dereg_umem(dev, umem.iova,
+ umem.size);
+ break;
+ }
+ case VDUSE_IOTLB_GET_INFO: {
+ struct vduse_iova_info info;
+ struct vhost_iotlb_map *map;
+ struct vduse_iova_domain *domain = dev->domain;
+
+ ret = -EFAULT;
+ if (copy_from_user(&info, argp, sizeof(info)))
+ break;
+
+ ret = -EINVAL;
+ if (info.start > info.last)
+ break;
+
+ if (!is_mem_zero((const char *)info.reserved,
+ sizeof(info.reserved)))
+ break;
+
+ spin_lock(&domain->iotlb_lock);
+ map = vhost_iotlb_itree_first(domain->iotlb,
+ info.start, info.last);
+ if (map) {
+ info.start = map->start;
+ info.last = map->last;
+ info.capability = 0;
+ if (domain->bounce_map && map->start == 0 &&
+ map->last == domain->bounce_size - 1)
+ info.capability |= VDUSE_IOVA_CAP_UMEM;
+ }
+ spin_unlock(&domain->iotlb_lock);
+ if (!map)
+ break;
+
+ ret = -EFAULT;
+ if (copy_to_user(argp, &info, sizeof(info)))
+ break;
+
+ ret = 0;
+ break;
+ }
default:
ret = -ENOIOCTLCMD;
break;
@@ -1101,6 +1279,7 @@ static int vduse_dev_release(struct inode *inode, struct file *file)
{
struct vduse_dev *dev = file->private_data;
+ vduse_dev_dereg_umem(dev, 0, dev->domain->bounce_size);
spin_lock(&dev->msg_lock);
/* Make sure the inflight messages can processed after reconncection */
list_splice_init(&dev->recv_list, &dev->send_list);
@@ -1163,6 +1342,7 @@ static struct vduse_dev *vduse_dev_create(void)
return NULL;
mutex_init(&dev->lock);
+ mutex_init(&dev->mem_lock);
spin_lock_init(&dev->msg_lock);
INIT_LIST_HEAD(&dev->send_list);
INIT_LIST_HEAD(&dev->recv_list);
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index fee73f3d9480..1a32357592e3 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
vfio_virqfd-y := virqfd.o
+vfio-y += vfio_main.o
+
obj-$(CONFIG_VFIO) += vfio.o
obj-$(CONFIG_VFIO_VIRQFD) += vfio_virqfd.o
obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
index 4ad63ececb91..7a29f572f93d 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
@@ -39,7 +39,7 @@ struct vfio_fsl_mc_device {
struct vfio_fsl_mc_irq *mc_irqs;
};
-extern int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
+int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
u32 flags, unsigned int index,
unsigned int start, unsigned int count,
void *data);
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index 4def43f5f7b6..ea762e28c1cc 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -1185,7 +1185,7 @@ static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
if (ret)
return ret;
- if (core_vdev->ops->migration_set_state) {
+ if (core_vdev->mig_ops) {
ret = hisi_acc_vf_qm_init(hisi_acc_vdev);
if (ret) {
vfio_pci_core_disable(vdev);
@@ -1208,6 +1208,11 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
vfio_pci_core_close_device(core_vdev);
}
+static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = {
+ .migration_set_state = hisi_acc_vfio_pci_set_device_state,
+ .migration_get_state = hisi_acc_vfio_pci_get_device_state,
+};
+
static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
.name = "hisi-acc-vfio-pci-migration",
.open_device = hisi_acc_vfio_pci_open_device,
@@ -1219,8 +1224,6 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
.mmap = hisi_acc_vfio_pci_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
- .migration_set_state = hisi_acc_vfio_pci_set_device_state,
- .migration_get_state = hisi_acc_vfio_pci_get_device_state,
};
static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
@@ -1272,6 +1275,8 @@ static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device
if (!ret) {
vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
&hisi_acc_vfio_pci_migrn_ops);
+ hisi_acc_vdev->core_device.vdev.mig_ops =
+ &hisi_acc_vfio_pci_migrn_state_ops;
} else {
pci_warn(pdev, "migration support failed, continue with generic interface\n");
vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index 9b9f33ca270a..dd5d7bfe0a49 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -88,6 +88,16 @@ static int mlx5fv_vf_event(struct notifier_block *nb,
return 0;
}
+void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev)
+{
+ if (!mvdev->migrate_cap)
+ return;
+
+ mutex_lock(&mvdev->state_mutex);
+ mlx5vf_disable_fds(mvdev);
+ mlx5vf_state_mutex_unlock(mvdev);
+}
+
void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
{
if (!mvdev->migrate_cap)
@@ -98,7 +108,8 @@ void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
destroy_workqueue(mvdev->cb_wq);
}
-void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev)
+void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
+ const struct vfio_migration_ops *mig_ops)
{
struct pci_dev *pdev = mvdev->core_device.pdev;
int ret;
@@ -139,6 +150,7 @@ void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev)
mvdev->core_device.vdev.migration_flags =
VFIO_MIGRATION_STOP_COPY |
VFIO_MIGRATION_P2P;
+ mvdev->core_device.vdev.mig_ops = mig_ops;
end:
mlx5_vf_put_core_dev(mvdev->mdev);
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index 6c3112fdd8b1..8208f4701a90 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -62,8 +62,10 @@ int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
size_t *state_size);
-void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
+ const struct vfio_migration_ops *mig_ops);
void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev);
int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf);
int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index 0558d0649ddb..a9b63d15c5d3 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -570,10 +570,15 @@ static void mlx5vf_pci_close_device(struct vfio_device *core_vdev)
struct mlx5vf_pci_core_device *mvdev = container_of(
core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
- mlx5vf_disable_fds(mvdev);
+ mlx5vf_cmd_close_migratable(mvdev);
vfio_pci_core_close_device(core_vdev);
}
+static const struct vfio_migration_ops mlx5vf_pci_mig_ops = {
+ .migration_set_state = mlx5vf_pci_set_device_state,
+ .migration_get_state = mlx5vf_pci_get_device_state,
+};
+
static const struct vfio_device_ops mlx5vf_pci_ops = {
.name = "mlx5-vfio-pci",
.open_device = mlx5vf_pci_open_device,
@@ -585,8 +590,6 @@ static const struct vfio_device_ops mlx5vf_pci_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
- .migration_set_state = mlx5vf_pci_set_device_state,
- .migration_get_state = mlx5vf_pci_get_device_state,
};
static int mlx5vf_pci_probe(struct pci_dev *pdev,
@@ -599,7 +602,7 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
if (!mvdev)
return -ENOMEM;
vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops);
- mlx5vf_cmd_set_migratable(mvdev);
+ mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops);
dev_set_drvdata(&pdev->dev, &mvdev->core_device);
ret = vfio_pci_core_register_device(&mvdev->core_device);
if (ret)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 9343f597182d..442d3ba4122b 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -222,7 +222,7 @@ static int vfio_default_config_write(struct vfio_pci_core_device *vdev, int pos,
memcpy(vdev->vconfig + pos, &virt_val, count);
}
- /* Non-virtualzed and writable bits go to hardware */
+ /* Non-virtualized and writable bits go to hardware */
if (write & ~virt) {
struct pci_dev *pdev = vdev->pdev;
__le32 phys_val = 0;
@@ -1728,7 +1728,7 @@ int vfio_config_init(struct vfio_pci_core_device *vdev)
/*
* Config space, caps and ecaps are all dword aligned, so we could
* use one byte per dword to record the type. However, there are
- * no requiremenst on the length of a capability, so the gap between
+ * no requirements on the length of a capability, so the gap between
* capabilities needs byte granularity.
*/
map = kmalloc(pdev->cfg_size, GFP_KERNEL);
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 0d8d4cdfb2f0..c8d3b0450fb3 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1868,6 +1868,13 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
+ if (vdev->vdev.mig_ops) {
+ if (!(vdev->vdev.mig_ops->migration_get_state &&
+ vdev->vdev.mig_ops->migration_set_state) ||
+ !(vdev->vdev.migration_flags & VFIO_MIGRATION_STOP_COPY))
+ return -EINVAL;
+ }
+
/*
* Prevent binding to PFs with VFs enabled, the VFs might be in use
* by the host or other users. We cannot capture the VFs if they
diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c
index e163aa9f6144..0cbdcd14f1c8 100644
--- a/drivers/vfio/pci/vfio_pci_zdev.c
+++ b/drivers/vfio/pci/vfio_pci_zdev.c
@@ -151,7 +151,10 @@ int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev)
if (!vdev->vdev.kvm)
return 0;
- return kvm_s390_pci_register_kvm(zdev, vdev->vdev.kvm);
+ if (zpci_kvm_hook.kvm_register)
+ return zpci_kvm_hook.kvm_register(zdev, vdev->vdev.kvm);
+
+ return -ENOENT;
}
void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
@@ -161,5 +164,6 @@ void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
if (!zdev || !vdev->vdev.kvm)
return;
- kvm_s390_pci_unregister_kvm(zdev);
+ if (zpci_kvm_hook.kvm_unregister)
+ zpci_kvm_hook.kvm_unregister(zdev);
}
diff --git a/drivers/vfio/platform/vfio_platform_private.h b/drivers/vfio/platform/vfio_platform_private.h
index 520d2a8e8375..691b43f4b2b2 100644
--- a/drivers/vfio/platform/vfio_platform_private.h
+++ b/drivers/vfio/platform/vfio_platform_private.h
@@ -78,21 +78,20 @@ struct vfio_platform_reset_node {
vfio_platform_reset_fn_t of_reset;
};
-extern int vfio_platform_probe_common(struct vfio_platform_device *vdev,
- struct device *dev);
+int vfio_platform_probe_common(struct vfio_platform_device *vdev,
+ struct device *dev);
void vfio_platform_remove_common(struct vfio_platform_device *vdev);
-extern int vfio_platform_irq_init(struct vfio_platform_device *vdev);
-extern void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev);
+int vfio_platform_irq_init(struct vfio_platform_device *vdev);
+void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev);
-extern int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
- uint32_t flags, unsigned index,
- unsigned start, unsigned count,
- void *data);
+int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
+ uint32_t flags, unsigned index,
+ unsigned start, unsigned count, void *data);
-extern void __vfio_platform_register_reset(struct vfio_platform_reset_node *n);
-extern void vfio_platform_unregister_reset(const char *compat,
- vfio_platform_reset_fn_t fn);
+void __vfio_platform_register_reset(struct vfio_platform_reset_node *n);
+void vfio_platform_unregister_reset(const char *compat,
+ vfio_platform_reset_fn_t fn);
#define vfio_platform_register_reset(__compat, __reset) \
static struct vfio_platform_reset_node __reset ## _node = { \
.owner = THIS_MODULE, \
diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
index a67130221151..503bea6c843d 100644
--- a/drivers/vfio/vfio.h
+++ b/drivers/vfio/vfio.h
@@ -50,16 +50,15 @@ struct vfio_iommu_driver_ops {
struct iommu_group *group);
int (*pin_pages)(void *iommu_data,
struct iommu_group *group,
- unsigned long *user_pfn,
+ dma_addr_t user_iova,
int npage, int prot,
- unsigned long *phys_pfn);
- int (*unpin_pages)(void *iommu_data,
- unsigned long *user_pfn, int npage);
- int (*register_notifier)(void *iommu_data,
- unsigned long *events,
- struct notifier_block *nb);
- int (*unregister_notifier)(void *iommu_data,
- struct notifier_block *nb);
+ struct page **pages);
+ void (*unpin_pages)(void *iommu_data,
+ dma_addr_t user_iova, int npage);
+ void (*register_device)(void *iommu_data,
+ struct vfio_device *vdev);
+ void (*unregister_device)(void *iommu_data,
+ struct vfio_device *vdev);
int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
void *data, size_t count, bool write);
struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 708a95e61831..169f07ac162d 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -378,8 +378,7 @@ static void tce_iommu_release(void *iommu_data)
kfree(container);
}
-static void tce_iommu_unuse_page(struct tce_container *container,
- unsigned long hpa)
+static void tce_iommu_unuse_page(unsigned long hpa)
{
struct page *page;
@@ -474,7 +473,7 @@ static int tce_iommu_clear(struct tce_container *container,
continue;
}
- tce_iommu_unuse_page(container, oldhpa);
+ tce_iommu_unuse_page(oldhpa);
}
iommu_tce_kill(tbl, firstentry, pages);
@@ -524,7 +523,7 @@ static long tce_iommu_build(struct tce_container *container,
ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
&hpa, &dirtmp);
if (ret) {
- tce_iommu_unuse_page(container, hpa);
+ tce_iommu_unuse_page(hpa);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
__func__, entry << tbl->it_page_shift,
tce, ret);
@@ -532,7 +531,7 @@ static long tce_iommu_build(struct tce_container *container,
}
if (dirtmp != DMA_NONE)
- tce_iommu_unuse_page(container, hpa);
+ tce_iommu_unuse_page(hpa);
tce += IOMMU_PAGE_SIZE(tbl);
}
@@ -1266,7 +1265,10 @@ static int tce_iommu_attach_group(void *iommu_data,
goto unlock_exit;
}
- /* Check if new group has the same iommu_ops (i.e. compatible) */
+ /*
+ * Check if new group has the same iommu_table_group_ops
+ * (i.e. compatible)
+ */
list_for_each_entry(tcegrp, &container->group_list, next) {
struct iommu_table_group *table_group_tmp;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c13b9290e357..8706482665d1 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -67,7 +67,8 @@ struct vfio_iommu {
struct list_head iova_list;
struct mutex lock;
struct rb_root dma_list;
- struct blocking_notifier_head notifier;
+ struct list_head device_list;
+ struct mutex device_list_lock;
unsigned int dma_avail;
unsigned int vaddr_invalid_count;
uint64_t pgsize_bitmap;
@@ -557,6 +558,18 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM,
pages, NULL, NULL);
if (ret > 0) {
+ int i;
+
+ /*
+ * The zero page is always resident, we don't need to pin it
+ * and it falls into our invalid/reserved test so we don't
+ * unpin in put_pfn(). Unpin all zero pages in the batch here.
+ */
+ for (i = 0 ; i < ret; i++) {
+ if (unlikely(is_zero_pfn(page_to_pfn(pages[i]))))
+ unpin_user_page(pages[i]);
+ }
+
*pfn = page_to_pfn(pages[0]);
goto done;
}
@@ -828,9 +841,9 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
static int vfio_iommu_type1_pin_pages(void *iommu_data,
struct iommu_group *iommu_group,
- unsigned long *user_pfn,
+ dma_addr_t user_iova,
int npage, int prot,
- unsigned long *phys_pfn)
+ struct page **pages)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_iommu_group *group;
@@ -840,7 +853,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
bool do_accounting;
dma_addr_t iova;
- if (!iommu || !user_pfn || !phys_pfn)
+ if (!iommu || !pages)
return -EINVAL;
/* Supported for v2 version only */
@@ -856,7 +869,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
again:
if (iommu->vaddr_invalid_count) {
for (i = 0; i < npage; i++) {
- iova = user_pfn[i] << PAGE_SHIFT;
+ iova = user_iova + PAGE_SIZE * i;
ret = vfio_find_dma_valid(iommu, iova, PAGE_SIZE, &dma);
if (ret < 0)
goto pin_done;
@@ -865,8 +878,8 @@ again:
}
}
- /* Fail if notifier list is empty */
- if (!iommu->notifier.head) {
+ /* Fail if no dma_umap notifier is registered */
+ if (list_empty(&iommu->device_list)) {
ret = -EINVAL;
goto pin_done;
}
@@ -879,9 +892,10 @@ again:
do_accounting = list_empty(&iommu->domain_list);
for (i = 0; i < npage; i++) {
+ unsigned long phys_pfn;
struct vfio_pfn *vpfn;
- iova = user_pfn[i] << PAGE_SHIFT;
+ iova = user_iova + PAGE_SIZE * i;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
if (!dma) {
ret = -EINVAL;
@@ -895,23 +909,25 @@ again:
vpfn = vfio_iova_get_vfio_pfn(dma, iova);
if (vpfn) {
- phys_pfn[i] = vpfn->pfn;
+ pages[i] = pfn_to_page(vpfn->pfn);
continue;
}
remote_vaddr = dma->vaddr + (iova - dma->iova);
- ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
+ ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn,
do_accounting);
if (ret)
goto pin_unwind;
- ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
+ ret = vfio_add_to_pfn_list(dma, iova, phys_pfn);
if (ret) {
- if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
+ if (put_pfn(phys_pfn, dma->prot) && do_accounting)
vfio_lock_acct(dma, -1, true);
goto pin_unwind;
}
+ pages[i] = pfn_to_page(phys_pfn);
+
if (iommu->dirty_page_tracking) {
unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
@@ -934,43 +950,38 @@ again:
goto pin_done;
pin_unwind:
- phys_pfn[i] = 0;
+ pages[i] = NULL;
for (j = 0; j < i; j++) {
dma_addr_t iova;
- iova = user_pfn[j] << PAGE_SHIFT;
+ iova = user_iova + PAGE_SIZE * j;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
vfio_unpin_page_external(dma, iova, do_accounting);
- phys_pfn[j] = 0;
+ pages[j] = NULL;
}
pin_done:
mutex_unlock(&iommu->lock);
return ret;
}
-static int vfio_iommu_type1_unpin_pages(void *iommu_data,
- unsigned long *user_pfn,
- int npage)
+static void vfio_iommu_type1_unpin_pages(void *iommu_data,
+ dma_addr_t user_iova, int npage)
{
struct vfio_iommu *iommu = iommu_data;
bool do_accounting;
int i;
- if (!iommu || !user_pfn || npage <= 0)
- return -EINVAL;
-
/* Supported for v2 version only */
- if (!iommu->v2)
- return -EACCES;
+ if (WARN_ON(!iommu->v2))
+ return;
mutex_lock(&iommu->lock);
do_accounting = list_empty(&iommu->domain_list);
for (i = 0; i < npage; i++) {
+ dma_addr_t iova = user_iova + PAGE_SIZE * i;
struct vfio_dma *dma;
- dma_addr_t iova;
- iova = user_pfn[i] << PAGE_SHIFT;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
if (!dma)
break;
@@ -979,7 +990,8 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
}
mutex_unlock(&iommu->lock);
- return i > 0 ? i : -EINVAL;
+
+ WARN_ON(i != npage);
}
static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
@@ -1287,6 +1299,35 @@ static int verify_bitmap_size(uint64_t npages, uint64_t bitmap_size)
return 0;
}
+/*
+ * Notify VFIO drivers using vfio_register_emulated_iommu_dev() to invalidate
+ * and unmap iovas within the range we're about to unmap. Drivers MUST unpin
+ * pages in response to an invalidation.
+ */
+static void vfio_notify_dma_unmap(struct vfio_iommu *iommu,
+ struct vfio_dma *dma)
+{
+ struct vfio_device *device;
+
+ if (list_empty(&iommu->device_list))
+ return;
+
+ /*
+ * The device is expected to call vfio_unpin_pages() for any IOVA it has
+ * pinned within the range. Since vfio_unpin_pages() will eventually
+ * call back down to this code and try to obtain the iommu->lock we must
+ * drop it.
+ */
+ mutex_lock(&iommu->device_list_lock);
+ mutex_unlock(&iommu->lock);
+
+ list_for_each_entry(device, &iommu->device_list, iommu_entry)
+ device->ops->dma_unmap(device, dma->iova, dma->size);
+
+ mutex_unlock(&iommu->device_list_lock);
+ mutex_lock(&iommu->lock);
+}
+
static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_unmap *unmap,
struct vfio_bitmap *bitmap)
@@ -1377,12 +1418,6 @@ again:
if (!iommu->v2 && iova > dma->iova)
break;
- /*
- * Task with same address space who mapped this iova range is
- * allowed to unmap the iova range.
- */
- if (dma->task->mm != current->mm)
- break;
if (invalidate_vaddr) {
if (dma->vaddr_invalid) {
@@ -1406,8 +1441,6 @@ again:
}
if (!RB_EMPTY_ROOT(&dma->pfn_list)) {
- struct vfio_iommu_type1_dma_unmap nb_unmap;
-
if (dma_last == dma) {
BUG_ON(++retries > 10);
} else {
@@ -1415,20 +1448,7 @@ again:
retries = 0;
}
- nb_unmap.iova = dma->iova;
- nb_unmap.size = dma->size;
-
- /*
- * Notify anyone (mdev vendor drivers) to invalidate and
- * unmap iovas within the range we're about to unmap.
- * Vendor drivers MUST unpin pages in response to an
- * invalidation.
- */
- mutex_unlock(&iommu->lock);
- blocking_notifier_call_chain(&iommu->notifier,
- VFIO_IOMMU_NOTIFY_DMA_UNMAP,
- &nb_unmap);
- mutex_lock(&iommu->lock);
+ vfio_notify_dma_unmap(iommu, dma);
goto again;
}
@@ -1679,18 +1699,6 @@ out_unlock:
return ret;
}
-static int vfio_bus_type(struct device *dev, void *data)
-{
- struct bus_type **bus = data;
-
- if (*bus && *bus != dev->bus)
- return -EINVAL;
-
- *bus = dev->bus;
-
- return 0;
-}
-
static int vfio_iommu_replay(struct vfio_iommu *iommu,
struct vfio_domain *domain)
{
@@ -2153,13 +2161,26 @@ static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu,
list_splice_tail(iova_copy, iova);
}
+/* Redundantly walks non-present capabilities to simplify caller */
+static int vfio_iommu_device_capable(struct device *dev, void *data)
+{
+ return device_iommu_capable(dev, (enum iommu_cap)data);
+}
+
+static int vfio_iommu_domain_alloc(struct device *dev, void *data)
+{
+ struct iommu_domain **domain = data;
+
+ *domain = iommu_domain_alloc(dev->bus);
+ return 1; /* Don't iterate */
+}
+
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group, enum vfio_group_type type)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_iommu_group *group;
struct vfio_domain *domain, *d;
- struct bus_type *bus = NULL;
bool resv_msi, msi_remap;
phys_addr_t resv_msi_base = 0;
struct iommu_domain_geometry *geo;
@@ -2192,18 +2213,19 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
goto out_unlock;
}
- /* Determine bus_type in order to allocate a domain */
- ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
- if (ret)
- goto out_free_group;
-
ret = -ENOMEM;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
goto out_free_group;
+ /*
+ * Going via the iommu_group iterator avoids races, and trivially gives
+ * us a representative device for the IOMMU API call. We don't actually
+ * want to iterate beyond the first device (if any).
+ */
ret = -EIO;
- domain->domain = iommu_domain_alloc(bus);
+ iommu_group_for_each_dev(iommu_group, &domain->domain,
+ vfio_iommu_domain_alloc);
if (!domain->domain)
goto out_free_domain;
@@ -2258,7 +2280,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
list_add(&group->next, &domain->group_list);
msi_remap = irq_domain_check_msi_remap() ||
- iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
+ iommu_group_for_each_dev(iommu_group, (void *)IOMMU_CAP_INTR_REMAP,
+ vfio_iommu_device_capable);
if (!allow_unsafe_interrupts && !msi_remap) {
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
@@ -2478,7 +2501,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (list_empty(&iommu->emulated_iommu_groups) &&
list_empty(&iommu->domain_list)) {
- WARN_ON(iommu->notifier.head);
+ WARN_ON(!list_empty(&iommu->device_list));
vfio_iommu_unmap_unpin_all(iommu);
}
goto detach_group_done;
@@ -2510,7 +2533,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (list_empty(&domain->group_list)) {
if (list_is_singular(&iommu->domain_list)) {
if (list_empty(&iommu->emulated_iommu_groups)) {
- WARN_ON(iommu->notifier.head);
+ WARN_ON(!list_empty(
+ &iommu->device_list));
vfio_iommu_unmap_unpin_all(iommu);
} else {
vfio_iommu_unmap_unpin_reaccount(iommu);
@@ -2571,7 +2595,8 @@ static void *vfio_iommu_type1_open(unsigned long arg)
iommu->dma_avail = dma_entry_limit;
iommu->container_open = true;
mutex_init(&iommu->lock);
- BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
+ mutex_init(&iommu->device_list_lock);
+ INIT_LIST_HEAD(&iommu->device_list);
init_waitqueue_head(&iommu->vaddr_wait);
iommu->pgsize_bitmap = PAGE_MASK;
INIT_LIST_HEAD(&iommu->emulated_iommu_groups);
@@ -3008,28 +3033,40 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
}
}
-static int vfio_iommu_type1_register_notifier(void *iommu_data,
- unsigned long *events,
- struct notifier_block *nb)
+static void vfio_iommu_type1_register_device(void *iommu_data,
+ struct vfio_device *vdev)
{
struct vfio_iommu *iommu = iommu_data;
- /* clear known events */
- *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP;
-
- /* refuse to register if still events remaining */
- if (*events)
- return -EINVAL;
+ if (!vdev->ops->dma_unmap)
+ return;
- return blocking_notifier_chain_register(&iommu->notifier, nb);
+ /*
+ * list_empty(&iommu->device_list) is tested under the iommu->lock while
+ * iteration for dma_unmap must be done under the device_list_lock.
+ * Holding both locks here allows avoiding the device_list_lock in
+ * several fast paths. See vfio_notify_dma_unmap()
+ */
+ mutex_lock(&iommu->lock);
+ mutex_lock(&iommu->device_list_lock);
+ list_add(&vdev->iommu_entry, &iommu->device_list);
+ mutex_unlock(&iommu->device_list_lock);
+ mutex_unlock(&iommu->lock);
}
-static int vfio_iommu_type1_unregister_notifier(void *iommu_data,
- struct notifier_block *nb)
+static void vfio_iommu_type1_unregister_device(void *iommu_data,
+ struct vfio_device *vdev)
{
struct vfio_iommu *iommu = iommu_data;
- return blocking_notifier_chain_unregister(&iommu->notifier, nb);
+ if (!vdev->ops->dma_unmap)
+ return;
+
+ mutex_lock(&iommu->lock);
+ mutex_lock(&iommu->device_list_lock);
+ list_del(&vdev->iommu_entry);
+ mutex_unlock(&iommu->device_list_lock);
+ mutex_unlock(&iommu->lock);
}
static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
@@ -3163,8 +3200,8 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.detach_group = vfio_iommu_type1_detach_group,
.pin_pages = vfio_iommu_type1_pin_pages,
.unpin_pages = vfio_iommu_type1_unpin_pages,
- .register_notifier = vfio_iommu_type1_register_notifier,
- .unregister_notifier = vfio_iommu_type1_unregister_notifier,
+ .register_device = vfio_iommu_type1_register_device,
+ .unregister_device = vfio_iommu_type1_unregister_device,
.dma_rw = vfio_iommu_type1_dma_rw,
.group_iommu_domain = vfio_iommu_type1_group_iommu_domain,
.notify = vfio_iommu_type1_notify,
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio_main.c
index 521a3eabf0e1..7cb56c382c97 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio_main.c
@@ -231,6 +231,9 @@ int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
{
struct vfio_iommu_driver *driver, *tmp;
+ if (WARN_ON(!ops->register_device != !ops->unregister_device))
+ return -EINVAL;
+
driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver)
return -ENOMEM;
@@ -504,7 +507,9 @@ static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev,
if (IS_ERR(iommu_group))
return ERR_CAST(iommu_group);
- iommu_group_set_name(iommu_group, "vfio-noiommu");
+ ret = iommu_group_set_name(iommu_group, "vfio-noiommu");
+ if (ret)
+ goto out_put_group;
ret = iommu_group_add_device(iommu_group, dev);
if (ret)
goto out_put_group;
@@ -554,7 +559,7 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
* restore cache coherency. It has to be checked here because it is only
* valid for cases where we are using iommu groups.
*/
- if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
+ if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) {
iommu_group_put(iommu_group);
return ERR_PTR(-EINVAL);
}
@@ -1082,6 +1087,7 @@ static void vfio_device_unassign_container(struct vfio_device *device)
static struct file *vfio_device_open(struct vfio_device *device)
{
+ struct vfio_iommu_driver *iommu_driver;
struct file *filep;
int ret;
@@ -1112,6 +1118,12 @@ static struct file *vfio_device_open(struct vfio_device *device)
if (ret)
goto err_undo_count;
}
+
+ iommu_driver = device->group->container->iommu_driver;
+ if (iommu_driver && iommu_driver->ops->register_device)
+ iommu_driver->ops->register_device(
+ device->group->container->iommu_data, device);
+
up_read(&device->group->group_rwsem);
}
mutex_unlock(&device->dev_set->lock);
@@ -1146,13 +1158,19 @@ static struct file *vfio_device_open(struct vfio_device *device)
err_close_device:
mutex_lock(&device->dev_set->lock);
down_read(&device->group->group_rwsem);
- if (device->open_count == 1 && device->ops->close_device)
+ if (device->open_count == 1 && device->ops->close_device) {
device->ops->close_device(device);
+
+ iommu_driver = device->group->container->iommu_driver;
+ if (iommu_driver && iommu_driver->ops->unregister_device)
+ iommu_driver->ops->unregister_device(
+ device->group->container->iommu_data, device);
+ }
err_undo_count:
+ up_read(&device->group->group_rwsem);
device->open_count--;
if (device->open_count == 0 && device->kvm)
device->kvm = NULL;
- up_read(&device->group->group_rwsem);
mutex_unlock(&device->dev_set->lock);
module_put(device->dev->driver->owner);
err_unassign_container:
@@ -1342,12 +1360,18 @@ static const struct file_operations vfio_group_fops = {
static int vfio_device_fops_release(struct inode *inode, struct file *filep)
{
struct vfio_device *device = filep->private_data;
+ struct vfio_iommu_driver *iommu_driver;
mutex_lock(&device->dev_set->lock);
vfio_assert_device_open(device);
down_read(&device->group->group_rwsem);
if (device->open_count == 1 && device->ops->close_device)
device->ops->close_device(device);
+
+ iommu_driver = device->group->container->iommu_driver;
+ if (iommu_driver && iommu_driver->ops->unregister_device)
+ iommu_driver->ops->unregister_device(
+ device->group->container->iommu_data, device);
up_read(&device->group->group_rwsem);
device->open_count--;
if (device->open_count == 0)
@@ -1544,8 +1568,7 @@ vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
struct file *filp = NULL;
int ret;
- if (!device->ops->migration_set_state ||
- !device->ops->migration_get_state)
+ if (!device->mig_ops)
return -ENOTTY;
ret = vfio_check_feature(flags, argsz,
@@ -1561,7 +1584,8 @@ vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
if (flags & VFIO_DEVICE_FEATURE_GET) {
enum vfio_device_mig_state curr_state;
- ret = device->ops->migration_get_state(device, &curr_state);
+ ret = device->mig_ops->migration_get_state(device,
+ &curr_state);
if (ret)
return ret;
mig.device_state = curr_state;
@@ -1569,7 +1593,7 @@ vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
}
/* Handle the VFIO_DEVICE_FEATURE_SET */
- filp = device->ops->migration_set_state(device, mig.device_state);
+ filp = device->mig_ops->migration_set_state(device, mig.device_state);
if (IS_ERR(filp) || !filp)
goto out_copy;
@@ -1592,8 +1616,7 @@ static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
};
int ret;
- if (!device->ops->migration_set_state ||
- !device->ops->migration_get_state)
+ if (!device->mig_ops)
return -ENOTTY;
ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET,
@@ -1815,6 +1838,7 @@ struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
if (!buf) {
kfree(caps->buf);
+ caps->buf = NULL;
caps->size = 0;
return ERR_PTR(-ENOMEM);
}
@@ -1913,26 +1937,25 @@ int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
/*
- * Pin a set of guest PFNs and return their associated host PFNs for local
+ * Pin contiguous user pages and return their associated host pages for local
* domain only.
* @device [in] : device
- * @user_pfn [in]: array of user/guest PFNs to be pinned.
- * @npage [in] : count of elements in user_pfn array. This count should not
- * be greater VFIO_PIN_PAGES_MAX_ENTRIES.
+ * @iova [in] : starting IOVA of user pages to be pinned.
+ * @npage [in] : count of pages to be pinned. This count should not
+ * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
* @prot [in] : protection flags
- * @phys_pfn[out]: array of host PFNs
+ * @pages[out] : array of host pages
* Return error or number of pages pinned.
*/
-int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
- int npage, int prot, unsigned long *phys_pfn)
+int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
+ int npage, int prot, struct page **pages)
{
struct vfio_container *container;
struct vfio_group *group = device->group;
struct vfio_iommu_driver *driver;
int ret;
- if (!user_pfn || !phys_pfn || !npage ||
- !vfio_assert_device_open(device))
+ if (!pages || !npage || !vfio_assert_device_open(device))
return -EINVAL;
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
@@ -1946,8 +1969,8 @@ int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
driver = container->iommu_driver;
if (likely(driver && driver->ops->pin_pages))
ret = driver->ops->pin_pages(container->iommu_data,
- group->iommu_group, user_pfn,
- npage, prot, phys_pfn);
+ group->iommu_group, iova,
+ npage, prot, pages);
else
ret = -ENOTTY;
@@ -1956,37 +1979,28 @@ int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
EXPORT_SYMBOL(vfio_pin_pages);
/*
- * Unpin set of host PFNs for local domain only.
+ * Unpin contiguous host pages for local domain only.
* @device [in] : device
- * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
- * PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
- * @npage [in] : count of elements in user_pfn array. This count should not
+ * @iova [in] : starting address of user pages to be unpinned.
+ * @npage [in] : count of pages to be unpinned. This count should not
* be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
- * Return error or number of pages unpinned.
*/
-int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
- int npage)
+void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
{
struct vfio_container *container;
struct vfio_iommu_driver *driver;
- int ret;
- if (!user_pfn || !npage || !vfio_assert_device_open(device))
- return -EINVAL;
+ if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES))
+ return;
- if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
- return -E2BIG;
+ if (WARN_ON(!vfio_assert_device_open(device)))
+ return;
/* group->container cannot change while a vfio device is open */
container = device->group->container;
driver = container->iommu_driver;
- if (likely(driver && driver->ops->unpin_pages))
- ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
- npage);
- else
- ret = -ENOTTY;
- return ret;
+ driver->ops->unpin_pages(container->iommu_data, iova, npage);
}
EXPORT_SYMBOL(vfio_unpin_pages);
@@ -2001,13 +2015,13 @@ EXPORT_SYMBOL(vfio_unpin_pages);
* not a real device DMA, it is not necessary to pin the user space memory.
*
* @device [in] : VFIO device
- * @user_iova [in] : base IOVA of a user space buffer
+ * @iova [in] : base IOVA of a user space buffer
* @data [in] : pointer to kernel buffer
* @len [in] : kernel buffer length
* @write : indicate read or write
* Return error code on failure or 0 on success.
*/
-int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova, void *data,
+int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
size_t len, bool write)
{
struct vfio_container *container;
@@ -2023,97 +2037,13 @@ int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova, void *data,
if (likely(driver && driver->ops->dma_rw))
ret = driver->ops->dma_rw(container->iommu_data,
- user_iova, data, len, write);
+ iova, data, len, write);
else
ret = -ENOTTY;
return ret;
}
EXPORT_SYMBOL(vfio_dma_rw);
-static int vfio_register_iommu_notifier(struct vfio_group *group,
- unsigned long *events,
- struct notifier_block *nb)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
- int ret;
-
- lockdep_assert_held_read(&group->group_rwsem);
-
- container = group->container;
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->register_notifier))
- ret = driver->ops->register_notifier(container->iommu_data,
- events, nb);
- else
- ret = -ENOTTY;
-
- return ret;
-}
-
-static int vfio_unregister_iommu_notifier(struct vfio_group *group,
- struct notifier_block *nb)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
- int ret;
-
- lockdep_assert_held_read(&group->group_rwsem);
-
- container = group->container;
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->unregister_notifier))
- ret = driver->ops->unregister_notifier(container->iommu_data,
- nb);
- else
- ret = -ENOTTY;
-
- return ret;
-}
-
-int vfio_register_notifier(struct vfio_device *device,
- enum vfio_notify_type type, unsigned long *events,
- struct notifier_block *nb)
-{
- struct vfio_group *group = device->group;
- int ret;
-
- if (!nb || !events || (*events == 0) ||
- !vfio_assert_device_open(device))
- return -EINVAL;
-
- switch (type) {
- case VFIO_IOMMU_NOTIFY:
- ret = vfio_register_iommu_notifier(group, events, nb);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-EXPORT_SYMBOL(vfio_register_notifier);
-
-int vfio_unregister_notifier(struct vfio_device *device,
- enum vfio_notify_type type,
- struct notifier_block *nb)
-{
- struct vfio_group *group = device->group;
- int ret;
-
- if (!nb || !vfio_assert_device_open(device))
- return -EINVAL;
-
- switch (type) {
- case VFIO_IOMMU_NOTIFY:
- ret = vfio_unregister_iommu_notifier(group, nb);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-EXPORT_SYMBOL(vfio_unregister_notifier);
-
/*
* Module/class support
*/
@@ -2159,13 +2089,17 @@ static int __init vfio_init(void)
if (ret)
goto err_alloc_chrdev;
- pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-
#ifdef CONFIG_VFIO_NOIOMMU
- vfio_register_iommu_driver(&vfio_noiommu_ops);
+ ret = vfio_register_iommu_driver(&vfio_noiommu_ops);
#endif
+ if (ret)
+ goto err_driver_register;
+
+ pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return 0;
+err_driver_register:
+ unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
err_alloc_chrdev:
class_destroy(vfio.class);
vfio.class = NULL;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index ffd9e6c2ffc1..7ebf106d50c1 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -159,9 +159,13 @@ enum {
};
#define VHOST_SCSI_MAX_TARGET 256
-#define VHOST_SCSI_MAX_VQ 128
+#define VHOST_SCSI_MAX_IO_VQ 1024
#define VHOST_SCSI_MAX_EVENT 128
+static unsigned vhost_scsi_max_io_vqs = 128;
+module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
+MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
+
struct vhost_scsi_virtqueue {
struct vhost_virtqueue vq;
/*
@@ -186,7 +190,9 @@ struct vhost_scsi {
char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
struct vhost_dev dev;
- struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
+ struct vhost_scsi_virtqueue *vqs;
+ unsigned long *compl_bitmap;
+ struct vhost_scsi_inflight **old_inflight;
struct vhost_work vs_completion_work; /* cmd completion work item */
struct llist_head vs_completion_list; /* cmd completion queue */
@@ -245,7 +251,7 @@ static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
struct vhost_virtqueue *vq;
int idx, i;
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
@@ -533,7 +539,6 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
{
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_completion_work);
- DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
struct virtio_scsi_cmd_resp v_rsp;
struct vhost_scsi_cmd *cmd, *t;
struct llist_node *llnode;
@@ -541,7 +546,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
struct iov_iter iov_iter;
int ret, vq;
- bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
+ bitmap_zero(vs->compl_bitmap, vs->dev.nvqs);
llnode = llist_del_all(&vs->vs_completion_list);
llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
se_cmd = &cmd->tvc_se_cmd;
@@ -566,7 +571,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
vq = q - vs->vqs;
- __set_bit(vq, signal);
+ __set_bit(vq, vs->compl_bitmap);
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
@@ -574,8 +579,8 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
}
vq = -1;
- while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
- < VHOST_SCSI_MAX_VQ)
+ while ((vq = find_next_bit(vs->compl_bitmap, vs->dev.nvqs, vq + 1))
+ < vs->dev.nvqs)
vhost_signal(&vs->dev, &vs->vqs[vq].vq);
}
@@ -643,14 +648,12 @@ vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
size_t offset;
unsigned int npages = 0;
- bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
+ bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
VHOST_SCSI_PREALLOC_UPAGES, &offset);
/* No pages were pinned */
if (bytes <= 0)
return bytes < 0 ? bytes : -EFAULT;
- iov_iter_advance(iter, bytes);
-
while (bytes) {
unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
sg_set_page(sg++, pages[npages++], n, offset);
@@ -1421,26 +1424,25 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
/* Callers must hold dev mutex */
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
- struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
int i;
/* Init new inflight and remember the old inflight */
- vhost_scsi_init_inflight(vs, old_inflight);
+ vhost_scsi_init_inflight(vs, vs->old_inflight);
/*
* The inflight->kref was initialized to 1. We decrement it here to
* indicate the start of the flush operation so that it will reach 0
* when all the reqs are finished.
*/
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
+ for (i = 0; i < vs->dev.nvqs; i++)
+ kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
/* Flush both the vhost poll and vhost work */
vhost_dev_flush(&vs->dev);
/* Wait for all reqs issued before the flush to be finished */
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- wait_for_completion(&old_inflight[i]->comp);
+ for (i = 0; i < vs->dev.nvqs; i++)
+ wait_for_completion(&vs->old_inflight[i]->comp);
}
static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
@@ -1603,7 +1605,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
sizeof(vs->vs_vhost_wwpn));
- for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
if (!vhost_vq_is_setup(vq))
continue;
@@ -1613,7 +1615,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
goto destroy_vq_cmds;
}
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, vs_tpg);
@@ -1715,7 +1717,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
target_undepend_item(&se_tpg->tpg_group.cg_item);
}
if (match) {
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, NULL);
@@ -1724,7 +1726,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
/* Make sure cmds are not running before tearing them down. */
vhost_scsi_flush(vs);
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
vhost_scsi_destroy_vq_cmds(vq);
}
@@ -1764,7 +1766,7 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
return -EFAULT;
}
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vq->acked_features = features;
@@ -1778,16 +1780,40 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi *vs;
struct vhost_virtqueue **vqs;
- int r = -ENOMEM, i;
+ int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
goto err_vs;
- vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
- if (!vqs)
+ if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
+ pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
+ VHOST_SCSI_MAX_IO_VQ);
+ nvqs = VHOST_SCSI_MAX_IO_VQ;
+ } else if (nvqs == 0) {
+ pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
+ nvqs = 1;
+ }
+ nvqs += VHOST_SCSI_VQ_IO;
+
+ vs->compl_bitmap = bitmap_alloc(nvqs, GFP_KERNEL);
+ if (!vs->compl_bitmap)
+ goto err_compl_bitmap;
+
+ vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vs->old_inflight)
+ goto err_inflight;
+
+ vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vs->vqs)
goto err_vqs;
+ vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs)
+ goto err_local_vqs;
+
vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
@@ -1798,11 +1824,11 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
- for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
- vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
+ vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
VHOST_SCSI_WEIGHT, 0, true, NULL);
vhost_scsi_init_inflight(vs, NULL);
@@ -1810,7 +1836,13 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
f->private_data = vs;
return 0;
+err_local_vqs:
+ kfree(vs->vqs);
err_vqs:
+ kfree(vs->old_inflight);
+err_inflight:
+ bitmap_free(vs->compl_bitmap);
+err_compl_bitmap:
kvfree(vs);
err_vs:
return r;
@@ -1828,6 +1860,9 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
vhost_dev_stop(&vs->dev);
vhost_dev_cleanup(&vs->dev);
kfree(vs->dev.vqs);
+ kfree(vs->vqs);
+ kfree(vs->old_inflight);
+ bitmap_free(vs->compl_bitmap);
kvfree(vs);
return 0;
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 23dcbfdfa13b..166044642fd5 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -347,6 +347,14 @@ static long vhost_vdpa_set_config(struct vhost_vdpa *v,
return 0;
}
+static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->suspend;
+}
+
static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
@@ -470,6 +478,22 @@ static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
return 0;
}
+/* After a successful return of ioctl the device must not process more
+ * virtqueue descriptors. The device can answer to read or writes of config
+ * fields as if it were not suspended. In particular, writing to "queue_enable"
+ * with a value of 1 will not make the device start processing buffers.
+ */
+static long vhost_vdpa_suspend(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!ops->suspend)
+ return -EOPNOTSUPP;
+
+ return ops->suspend(vdpa);
+}
+
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp)
{
@@ -577,7 +601,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
if (cmd == VHOST_SET_BACKEND_FEATURES) {
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
- if (features & ~VHOST_VDPA_BACKEND_FEATURES)
+ if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
+ BIT_ULL(VHOST_BACKEND_F_SUSPEND)))
+ return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
+ !vhost_vdpa_can_suspend(v))
return -EOPNOTSUPP;
vhost_set_backend_features(&v->vdev, features);
return 0;
@@ -628,6 +656,8 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
break;
case VHOST_GET_BACKEND_FEATURES:
features = VHOST_VDPA_BACKEND_FEATURES;
+ if (vhost_vdpa_can_suspend(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
if (copy_to_user(featurep, &features, sizeof(features)))
r = -EFAULT;
break;
@@ -640,6 +670,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
case VHOST_VDPA_GET_VQS_COUNT:
r = vhost_vdpa_get_vqs_count(v, argp);
break;
+ case VHOST_VDPA_SUSPEND:
+ r = vhost_vdpa_suspend(v);
+ break;
default:
r = vhost_dev_ioctl(&v->vdev, cmd, argp);
if (r == -ENOIOCTLCMD)
@@ -1076,7 +1109,7 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
if (!bus)
return -EFAULT;
- if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
+ if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY))
return -ENOTSUPP;
v->domain = iommu_domain_alloc(bus);
@@ -1363,6 +1396,7 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
err:
put_device(&v->dev);
+ ida_simple_remove(&vhost_vdpa_ida, v->minor);
return r;
}
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index eab55accf381..11f59dd06a74 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -1095,7 +1095,8 @@ EXPORT_SYMBOL(vringh_need_notify_kern);
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
static int iotlb_translate(const struct vringh *vrh,
- u64 addr, u64 len, struct bio_vec iov[],
+ u64 addr, u64 len, u64 *translated,
+ struct bio_vec iov[],
int iov_size, u32 perm)
{
struct vhost_iotlb_map *map;
@@ -1136,43 +1137,76 @@ static int iotlb_translate(const struct vringh *vrh,
spin_unlock(vrh->iotlb_lock);
+ if (translated)
+ *translated = min(len, s);
+
return ret;
}
static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
void *src, size_t len)
{
- struct iov_iter iter;
- struct bio_vec iov[16];
- int ret;
+ u64 total_translated = 0;
- ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
- len, iov, 16, VHOST_MAP_RO);
- if (ret < 0)
- return ret;
+ while (total_translated < len) {
+ struct bio_vec iov[16];
+ struct iov_iter iter;
+ u64 translated;
+ int ret;
- iov_iter_bvec(&iter, READ, iov, ret, len);
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
+ len - total_translated, &translated,
+ iov, ARRAY_SIZE(iov), VHOST_MAP_RO);
+ if (ret == -ENOBUFS)
+ ret = ARRAY_SIZE(iov);
+ else if (ret < 0)
+ return ret;
- ret = copy_from_iter(dst, len, &iter);
+ iov_iter_bvec(&iter, READ, iov, ret, translated);
- return ret;
+ ret = copy_from_iter(dst, translated, &iter);
+ if (ret < 0)
+ return ret;
+
+ src += translated;
+ dst += translated;
+ total_translated += translated;
+ }
+
+ return total_translated;
}
static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
void *src, size_t len)
{
- struct iov_iter iter;
- struct bio_vec iov[16];
- int ret;
+ u64 total_translated = 0;
- ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
- len, iov, 16, VHOST_MAP_WO);
- if (ret < 0)
- return ret;
+ while (total_translated < len) {
+ struct bio_vec iov[16];
+ struct iov_iter iter;
+ u64 translated;
+ int ret;
+
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
+ len - total_translated, &translated,
+ iov, ARRAY_SIZE(iov), VHOST_MAP_WO);
+ if (ret == -ENOBUFS)
+ ret = ARRAY_SIZE(iov);
+ else if (ret < 0)
+ return ret;
- iov_iter_bvec(&iter, WRITE, iov, ret, len);
+ iov_iter_bvec(&iter, WRITE, iov, ret, translated);
+
+ ret = copy_to_iter(src, translated, &iter);
+ if (ret < 0)
+ return ret;
+
+ src += translated;
+ dst += translated;
+ total_translated += translated;
+ }
- return copy_to_iter(src, len, &iter);
+ return total_translated;
}
static inline int getu16_iotlb(const struct vringh *vrh,
@@ -1183,7 +1217,7 @@ static inline int getu16_iotlb(const struct vringh *vrh,
int ret;
/* Atomic read is needed for getu16 */
- ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), NULL,
&iov, 1, VHOST_MAP_RO);
if (ret < 0)
return ret;
@@ -1204,7 +1238,7 @@ static inline int putu16_iotlb(const struct vringh *vrh,
int ret;
/* Atomic write is needed for putu16 */
- ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), NULL,
&iov, 1, VHOST_MAP_WO);
if (ret < 0)
return ret;
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 2b9e2bbbb03e..fc02c5c16055 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -218,9 +218,8 @@ err:
static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
{
- unsigned int period = lp->pdata->period_ns;
- unsigned int duty = br * period / max_br;
struct pwm_device *pwm;
+ struct pwm_state state;
/* request pwm device with the consumer name */
if (!lp->pwm) {
@@ -230,18 +229,16 @@ static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
lp->pwm = pwm;
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(pwm);
+ pwm_init_state(lp->pwm, &state);
+ } else {
+ pwm_get_state(lp->pwm, &state);
}
- pwm_config(lp->pwm, duty, period);
- if (duty)
- pwm_enable(lp->pwm);
- else
- pwm_disable(lp->pwm);
+ state.period = lp->pdata->period_ns;
+ state.duty_cycle = div_u64(br * state.period, max_br);
+ state.enabled = state.duty_cycle;
+
+ pwm_apply_state(lp->pwm, &state);
}
static int lp855x_bl_update_status(struct backlight_device *bl)
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index b2bfbf070200..dc37494baf42 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -12,7 +12,6 @@
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/lcd.h>
-#include <linux/of.h>
#include <linux/slab.h>
#include <video/platform_lcd.h>
@@ -133,19 +132,10 @@ static int platform_lcd_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(platform_lcd_pm_ops, platform_lcd_suspend,
platform_lcd_resume);
-#ifdef CONFIG_OF
-static const struct of_device_id platform_lcd_of_match[] = {
- { .compatible = "platform-lcd" },
- {},
-};
-MODULE_DEVICE_TABLE(of, platform_lcd_of_match);
-#endif
-
static struct platform_driver platform_lcd_driver = {
.driver = {
.name = "platform-lcd",
.pm = &platform_lcd_pm_ops,
- .of_match_table = of_match_ptr(platform_lcd_of_match),
},
.probe = platform_lcd_probe,
};
diff --git a/drivers/video/backlight/rt4831-backlight.c b/drivers/video/backlight/rt4831-backlight.c
index 42155c7d2db1..eb8c59e8713f 100644
--- a/drivers/video/backlight/rt4831-backlight.c
+++ b/drivers/video/backlight/rt4831-backlight.c
@@ -12,6 +12,7 @@
#define RT4831_REG_BLCFG 0x02
#define RT4831_REG_BLDIML 0x04
#define RT4831_REG_ENABLE 0x08
+#define RT4831_REG_BLOPT2 0x11
#define RT4831_BLMAX_BRIGHTNESS 2048
@@ -23,6 +24,11 @@
#define RT4831_BLDIML_MASK GENMASK(2, 0)
#define RT4831_BLDIMH_MASK GENMASK(10, 3)
#define RT4831_BLDIMH_SHIFT 3
+#define RT4831_BLOCP_MASK GENMASK(1, 0)
+
+#define RT4831_BLOCP_MINUA 900000
+#define RT4831_BLOCP_MAXUA 1800000
+#define RT4831_BLOCP_STEPUA 300000
struct rt4831_priv {
struct device *dev;
@@ -85,7 +91,7 @@ static int rt4831_parse_backlight_properties(struct rt4831_priv *priv,
{
struct device *dev = priv->dev;
u8 propval;
- u32 brightness;
+ u32 brightness, ocp_uA;
unsigned int val = 0;
int ret;
@@ -120,6 +126,31 @@ static int rt4831_parse_backlight_properties(struct rt4831_priv *priv,
if (ret)
return ret;
+ /*
+ * This OCP level is used to protect and limit the inductor current.
+ * If inductor peak current reach the level, low-side MOSFET will be
+ * turned off. Meanwhile, the output channel current may be limited.
+ * To match the configured channel current, the inductor chosen must
+ * be higher than the OCP level.
+ *
+ * Not like the OVP level, the default 21V can be used in the most
+ * application. But if the chosen OCP level is smaller than needed,
+ * it will also affect the backlight channel output current to be
+ * smaller than the register setting.
+ */
+ ret = device_property_read_u32(dev, "richtek,bled-ocp-microamp",
+ &ocp_uA);
+ if (!ret) {
+ ocp_uA = clamp_val(ocp_uA, RT4831_BLOCP_MINUA,
+ RT4831_BLOCP_MAXUA);
+ val = DIV_ROUND_UP(ocp_uA - RT4831_BLOCP_MINUA,
+ RT4831_BLOCP_STEPUA);
+ ret = regmap_update_bits(priv->regmap, RT4831_REG_BLOPT2,
+ RT4831_BLOCP_MASK, val);
+ if (ret)
+ return ret;
+ }
+
ret = device_property_read_u8(dev, "richtek,channel-use", &propval);
if (ret) {
dev_err(dev, "richtek,channel-use DT property missing\n");
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index bd4dc97d4d34..db568f67e4dc 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -290,7 +290,7 @@ static char default_sti_path[21] __read_mostly;
static int __init sti_setup(char *str)
{
if (str)
- strlcpy (default_sti_path, str, sizeof (default_sti_path));
+ strscpy(default_sti_path, str, sizeof(default_sti_path));
return 1;
}
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 576612f18d59..fcdf017e2665 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -75,7 +75,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines);
static int vgacon_set_origin(struct vc_data *c);
static void vgacon_save_screen(struct vc_data *c);
static void vgacon_invert_region(struct vc_data *c, u16 * p, int count);
-static struct uni_pagedir *vgacon_uni_pagedir;
+static struct uni_pagedict *vgacon_uni_pagedir;
static int vgacon_refcount;
/* Description of the hardware situation */
@@ -342,7 +342,7 @@ static const char *vgacon_startup(void)
static void vgacon_init(struct vc_data *c, int init)
{
- struct uni_pagedir *p;
+ struct uni_pagedict *p;
/*
* We cannot be loaded as a module, therefore init will be 1
@@ -367,10 +367,10 @@ static void vgacon_init(struct vc_data *c, int init)
c->vc_complement_mask = 0x7700;
if (vga_512_chars)
c->vc_hi_font_mask = 0x0800;
- p = *c->vc_uni_pagedir_loc;
- if (c->vc_uni_pagedir_loc != &vgacon_uni_pagedir) {
+ p = *c->uni_pagedict_loc;
+ if (c->uni_pagedict_loc != &vgacon_uni_pagedir) {
con_free_unimap(c);
- c->vc_uni_pagedir_loc = &vgacon_uni_pagedir;
+ c->uni_pagedict_loc = &vgacon_uni_pagedir;
vgacon_refcount++;
}
if (!vgacon_uni_pagedir && p)
@@ -392,7 +392,7 @@ static void vgacon_deinit(struct vc_data *c)
if (!--vgacon_refcount)
con_free_unimap(c);
- c->vc_uni_pagedir_loc = &c->vc_uni_pagedir;
+ c->uni_pagedict_loc = &c->uni_pagedict;
con_set_default_unimap(c);
}
diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c
index 9811f1bad8d4..7db03ed77c76 100644
--- a/drivers/video/fbdev/68328fb.c
+++ b/drivers/video/fbdev/68328fb.c
@@ -84,9 +84,6 @@ static const struct fb_fix_screeninfo mc68x328fb_fix __initconst = {
/*
* Interface used by the world
*/
-int mc68x328fb_init(void);
-int mc68x328fb_setup(char *);
-
static int mc68x328fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
static int mc68x328fb_set_par(struct fb_info *info);
@@ -403,7 +400,7 @@ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
#endif
}
-int __init mc68x328fb_setup(char *options)
+static int __init mc68x328fb_setup(char *options)
{
if (!options || !*options)
return 1;
@@ -414,7 +411,7 @@ int __init mc68x328fb_setup(char *options)
* Initialisation
*/
-int __init mc68x328fb_init(void)
+static int __init mc68x328fb_init(void)
{
#ifndef MODULE
char *option = NULL;
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 8080116aea84..f65c96d1394d 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -698,16 +698,18 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
return -ENODEV;
panel = of_graph_get_remote_port_parent(endpoint);
- if (!panel)
- return -ENODEV;
+ if (!panel) {
+ err = -ENODEV;
+ goto out_endpoint_put;
+ }
err = clcdfb_of_get_backlight(&fb->dev->dev, fb->panel);
if (err)
- return err;
+ goto out_panel_put;
err = clcdfb_of_get_mode(&fb->dev->dev, panel, fb->panel);
if (err)
- return err;
+ goto out_panel_put;
err = of_property_read_u32(fb->dev->dev.of_node, "max-memory-bandwidth",
&max_bandwidth);
@@ -736,11 +738,21 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
if (of_property_read_u32_array(endpoint,
"arm,pl11x,tft-r0g0b0-pads",
- tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0)
- return -ENOENT;
+ tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0) {
+ err = -ENOENT;
+ goto out_panel_put;
+ }
+
+ of_node_put(panel);
+ of_node_put(endpoint);
return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0],
tft_r0b0g0[1], tft_r0b0g0[2]);
+out_panel_put:
+ of_node_put(panel);
+out_endpoint_put:
+ of_node_put(endpoint);
+ return err;
}
static int clcdfb_of_vram_setup(struct clcd_fb *fb)
diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c
index 6e07a97bbd31..d88265dbebf4 100644
--- a/drivers/video/fbdev/amifb.c
+++ b/drivers/video/fbdev/amifb.c
@@ -2540,27 +2540,16 @@ static int amifb_blank(int blank, struct fb_info *info)
static int amifb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- if (var->vmode & FB_VMODE_YWRAP) {
- if (var->yoffset < 0 ||
- var->yoffset >= info->var.yres_virtual || var->xoffset)
- return -EINVAL;
- } else {
+ if (!(var->vmode & FB_VMODE_YWRAP)) {
/*
* TODO: There will be problems when xpan!=1, so some columns
* on the right side will never be seen
*/
if (var->xoffset + info->var.xres >
- upx(16 << maxfmode, info->var.xres_virtual) ||
- var->yoffset + info->var.yres > info->var.yres_virtual)
+ upx(16 << maxfmode, info->var.xres_virtual))
return -EINVAL;
}
ami_pan_var(var, info);
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
- if (var->vmode & FB_VMODE_YWRAP)
- info->var.vmode |= FB_VMODE_YWRAP;
- else
- info->var.vmode &= ~FB_VMODE_YWRAP;
return 0;
}
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index eb3e47c58c5f..a2a381631628 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -781,7 +781,12 @@ static int arkfb_set_par(struct fb_info *info)
return -EINVAL;
}
- ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul);
+ value = (hdiv * info->var.pixclock) / hmul;
+ if (!value) {
+ fb_dbg(info, "invalid pixclock\n");
+ value = 1;
+ }
+ ark_set_pixclock(info, value);
svga_set_timings(par->state.vgabase, &ark_timing_regs, &(info->var), hmul, hdiv,
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
(info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
@@ -792,6 +797,8 @@ static int arkfb_set_par(struct fb_info *info)
value = ((value * hmul / hdiv) / 8) - 5;
vga_wcrt(par->state.vgabase, 0x42, (value + 1) / 2);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index 52a35b661643..2bc4089865e6 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -236,8 +236,6 @@ static int *MV300_reg = MV300_reg_8bit;
#endif /* ATAFB_EXT */
-static int inverse;
-
/*
* struct fb_ops {
* * open/release and usage marking
@@ -467,27 +465,27 @@ static struct fb_videomode atafb_modedb[] __initdata = {
{
/* 320x200, 15 kHz, 60 Hz (ST low) */
"st-low", 60, 320, 200, 32000, 32, 16, 31, 14, 96, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x200, 15 kHz, 60 Hz (ST medium) */
"st-mid", 60, 640, 200, 32000, 32, 16, 31, 14, 96, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x400, 30.25 kHz, 63.5 Hz (ST high) */
"st-high", 63, 640, 400, 32000, 128, 0, 40, 14, 128, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 320x480, 15 kHz, 60 Hz (TT low) */
"tt-low", 60, 320, 480, 31041, 120, 100, 8, 16, 140, 30,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x480, 29 kHz, 57 Hz (TT medium) */
"tt-mid", 60, 640, 480, 31041, 120, 100, 8, 16, 140, 30,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 1280x960, 72 kHz, 72 Hz (TT high) */
- "tt-high", 57, 1280, 960, 7760, 260, 60, 36, 4, 192, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ "tt-high", 72, 1280, 960, 7760, 260, 60, 36, 4, 192, 4,
+ 0, FB_VMODE_NONINTERLACED
},
/*
@@ -496,12 +494,12 @@ static struct fb_videomode atafb_modedb[] __initdata = {
{
/* 640x480, 31 kHz, 60 Hz (VGA) */
- "vga", 63.5, 640, 480, 32000, 18, 42, 31, 11, 96, 3,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ "vga", 60, 640, 480, 39721, 42, 18, 31, 11, 100, 3,
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x400, 31 kHz, 70 Hz (VGA) */
- "vga70", 70, 640, 400, 32000, 18, 42, 31, 11, 96, 3,
- FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ "vga70", 70, 640, 400, 39721, 42, 18, 31, 11, 100, 3,
+ FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED
},
/*
@@ -511,7 +509,7 @@ static struct fb_videomode atafb_modedb[] __initdata = {
{
/* 896x608, 31 kHz, 60 Hz (Falcon High) */
"falh", 60, 896, 608, 32000, 18, 42, 31, 1, 96,3,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
},
};
@@ -1010,10 +1008,6 @@ static int falcon_decode_var(struct fb_var_screeninfo *var,
else if (yres_virtual < yres)
yres_virtual = yres;
- /* backward bug-compatibility */
- if (var->pixclock > 1)
- var->pixclock -= 1;
-
par->hw.falcon.line_width = bpp * xres / 16;
par->hw.falcon.line_offset = bpp * (xres_virtual - xres) / 16;
@@ -1072,8 +1066,6 @@ static int falcon_decode_var(struct fb_var_screeninfo *var,
xstretch = 2; /* Double pixel width only for hicolor */
/* Default values are used for vert./hor. timing if no pixelclock given. */
if (var->pixclock == 0) {
- int linesize;
-
/* Choose master pixelclock depending on hor. timing */
plen = 1 * xstretch;
if ((plen * xres + f25.right + f25.hsync + f25.left) *
@@ -1092,7 +1084,6 @@ static int falcon_decode_var(struct fb_var_screeninfo *var,
left_margin = pclock->left / plen;
right_margin = pclock->right / plen;
hsync_len = pclock->hsync / plen;
- linesize = left_margin + xres + right_margin + hsync_len;
upper_margin = 31;
lower_margin = 11;
vsync_len = 3;
@@ -1641,7 +1632,7 @@ static irqreturn_t falcon_vbl_switcher(int irq, void *dummy)
static int falcon_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int xoffset;
int bpp = info->var.bits_per_pixel;
@@ -2208,6 +2199,10 @@ static int ext_setcolreg(unsigned int regno, unsigned int red,
if (regno > 255)
return 1;
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+
switch (external_card_type) {
case IS_VGA:
OUTB(0x3c8, regno);
@@ -2261,7 +2256,7 @@ static void set_screen_base(void *s_base)
static int pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
if (!fbhw->set_screen_base ||
(!ATARIHW_PRESENT(EXTD_SHIFTER) && var->xoffset))
@@ -2407,55 +2402,19 @@ static void atafb_set_disp(struct fb_info *info)
static int
atafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
- int xoffset = var->xoffset;
- int yoffset = var->yoffset;
- int err;
-
- if (var->vmode & FB_VMODE_YWRAP) {
- if (yoffset < 0 || yoffset >= info->var.yres_virtual || xoffset)
- return -EINVAL;
- } else {
- if (xoffset + info->var.xres > info->var.xres_virtual ||
- yoffset + info->var.yres > info->var.yres_virtual)
- return -EINVAL;
- }
-
- if (fbhw->pan_display) {
- err = fbhw->pan_display(var, info);
- if (err)
- return err;
- } else
+ if (!fbhw->pan_display)
return -EINVAL;
- info->var.xoffset = xoffset;
- info->var.yoffset = yoffset;
-
- if (var->vmode & FB_VMODE_YWRAP)
- info->var.vmode |= FB_VMODE_YWRAP;
- else
- info->var.vmode &= ~FB_VMODE_YWRAP;
-
- return 0;
+ return fbhw->pan_display(var, info);
}
/*
* generic drawing routines; imageblit needs updating for image depth > 1
*/
-#if BITS_PER_LONG == 32
-#define BYTES_PER_LONG 4
-#define SHIFT_PER_LONG 5
-#elif BITS_PER_LONG == 64
-#define BYTES_PER_LONG 8
-#define SHIFT_PER_LONG 6
-#else
-#define Please update me
-#endif
-
-
static void atafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int x2, y2;
u32 width, height;
@@ -2498,7 +2457,7 @@ static void atafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
static void atafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int x2, y2;
u32 dx, dy, sx, sy, width, height;
int rev_copy = 0;
@@ -2552,10 +2511,8 @@ static void atafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
static void atafb_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int x2, y2;
- unsigned long *dst;
- int dst_idx;
const char *src;
u32 dx, dy, width, height, pitch;
@@ -2582,10 +2539,6 @@ static void atafb_imageblit(struct fb_info *info, const struct fb_image *image)
if (image->depth == 1) {
// used for font data
- dst = (unsigned long *)
- ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1));
- dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8;
- dst_idx += dy * par->next_line * 8 + dx;
src = image->data;
pitch = (image->width + 7) / 8;
while (height--) {
@@ -2622,14 +2575,14 @@ atafb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
switch (cmd) {
#ifdef FBCMD_GET_CURRENTPAR
case FBCMD_GET_CURRENTPAR:
- if (copy_to_user((void *)arg, (void *)&current_par,
+ if (copy_to_user((void *)arg, &current_par,
sizeof(struct atafb_par)))
return -EFAULT;
return 0;
#endif
#ifdef FBCMD_SET_CURRENTPAR
case FBCMD_SET_CURRENTPAR:
- if (copy_from_user((void *)&current_par, (void *)arg,
+ if (copy_from_user(&current_par, (void *)arg,
sizeof(struct atafb_par)))
return -EFAULT;
ata_set_par(&current_par);
@@ -2695,7 +2648,7 @@ static int atafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
* hw par just decoded */
static int atafb_set_par(struct fb_info *info)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
/* Decode wanted screen parameters */
fbhw->decode_var(&info->var, par);
@@ -2981,7 +2934,7 @@ static void __init atafb_setup_user(char *spec)
}
}
-int __init atafb_setup(char *options)
+static int __init atafb_setup(char *options)
{
char *this_opt;
int temp;
@@ -2996,7 +2949,7 @@ int __init atafb_setup(char *options)
default_par = temp;
mode_option = this_opt;
} else if (!strcmp(this_opt, "inverse"))
- inverse = 1;
+ fb_invert_cmaps();
else if (!strncmp(this_opt, "hwscroll_", 9)) {
hwscroll = simple_strtoul(this_opt + 9, NULL, 10);
if (hwscroll < 0)
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index a3e6faed7745..14eb718bd67c 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3891,7 +3891,7 @@ static int __init atyfb_setup(char *options)
&& (!strncmp(this_opt, "Mach64:", 7))) {
static unsigned char m64_num;
static char mach64_str[80];
- strlcpy(mach64_str, this_opt + 7, sizeof(mach64_str));
+ strscpy(mach64_str, this_opt + 7, sizeof(mach64_str));
if (!store_video_par(mach64_str, m64_num)) {
m64_num++;
mach64_count = m64_num;
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 6851f47613e1..a14a8d73035c 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -1980,7 +1980,7 @@ static int radeon_set_fbinfo(struct radeonfb_info *rinfo)
info->screen_base = rinfo->fb_base;
info->screen_size = rinfo->mapped_vram;
/* Fill fix common fields */
- strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
+ strscpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
info->fix.smem_start = rinfo->fb_base_phys;
info->fix.smem_len = rinfo->video_ram;
info->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -2094,34 +2094,34 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
u32 tmp;
/* framebuffer size */
- if ((rinfo->family == CHIP_FAMILY_RS100) ||
+ if ((rinfo->family == CHIP_FAMILY_RS100) ||
(rinfo->family == CHIP_FAMILY_RS200) ||
(rinfo->family == CHIP_FAMILY_RS300) ||
(rinfo->family == CHIP_FAMILY_RC410) ||
(rinfo->family == CHIP_FAMILY_RS400) ||
(rinfo->family == CHIP_FAMILY_RS480) ) {
- u32 tom = INREG(NB_TOM);
- tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
-
- radeon_fifo_wait(6);
- OUTREG(MC_FB_LOCATION, tom);
- OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
- OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
- OUTREG(OV0_BASE_ADDR, (tom & 0xffff) << 16);
-
- /* This is supposed to fix the crtc2 noise problem. */
- OUTREG(GRPH2_BUFFER_CNTL, INREG(GRPH2_BUFFER_CNTL) & ~0x7f0000);
-
- if ((rinfo->family == CHIP_FAMILY_RS100) ||
- (rinfo->family == CHIP_FAMILY_RS200)) {
- /* This is to workaround the asic bug for RMX, some versions
- of BIOS doesn't have this register initialized correctly.
- */
- OUTREGP(CRTC_MORE_CNTL, CRTC_H_CUTOFF_ACTIVE_EN,
- ~CRTC_H_CUTOFF_ACTIVE_EN);
- }
- } else {
- tmp = INREG(CNFG_MEMSIZE);
+ u32 tom = INREG(NB_TOM);
+
+ tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
+ radeon_fifo_wait(6);
+ OUTREG(MC_FB_LOCATION, tom);
+ OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
+ OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
+ OUTREG(OV0_BASE_ADDR, (tom & 0xffff) << 16);
+
+ /* This is supposed to fix the crtc2 noise problem. */
+ OUTREG(GRPH2_BUFFER_CNTL, INREG(GRPH2_BUFFER_CNTL) & ~0x7f0000);
+
+ if ((rinfo->family == CHIP_FAMILY_RS100) ||
+ (rinfo->family == CHIP_FAMILY_RS200)) {
+ /* This is to workaround the asic bug for RMX, some versions
+ * of BIOS doesn't have this register initialized correctly.
+ */
+ OUTREGP(CRTC_MORE_CNTL, CRTC_H_CUTOFF_ACTIVE_EN,
+ ~CRTC_H_CUTOFF_ACTIVE_EN);
+ }
+ } else {
+ tmp = INREG(CNFG_MEMSIZE);
}
/* mem size is bits [28:0], mask off the rest */
diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c
index e7702fe1fe7d..6403ae07970d 100644
--- a/drivers/video/fbdev/bw2.c
+++ b/drivers/video/fbdev/bw2.c
@@ -182,7 +182,7 @@ static int bw2_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
static void bw2_init_fix(struct fb_info *info, int linebytes)
{
- strlcpy(info->fix.id, "bwtwo", sizeof(info->fix.id));
+ strscpy(info->fix.id, "bwtwo", sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_MONO01;
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index 393894af26f8..2b00a9d554fc 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -430,6 +430,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
err_release_fb:
framebuffer_release(p);
err_disable:
+ pci_disable_device(dp);
err_out:
return rc;
}
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index 51e072c03e1c..2a9fa06881b5 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -1999,7 +1999,7 @@ static int cirrusfb_set_fbinfo(struct fb_info *info)
}
/* Fill fix common fields */
- strlcpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
+ strscpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
sizeof(info->fix.id));
/* monochrome: only 1 memory plane */
@@ -2301,7 +2301,7 @@ err_release_fb:
return error;
}
-void cirrusfb_zorro_unregister(struct zorro_dev *z)
+static void cirrusfb_zorro_unregister(struct zorro_dev *z)
{
struct fb_info *info = zorro_get_drvdata(z);
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index 771ce1f76951..a1061c2f1640 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -326,7 +326,7 @@ static int clps711x_fb_probe(struct platform_device *pdev)
info->var.vmode = FB_VMODE_NONINTERLACED;
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.accel = FB_ACCEL_NONE;
- strlcpy(info->fix.id, CLPS711X_FB_NAME, sizeof(info->fix.id));
+ strscpy(info->fix.id, CLPS711X_FB_NAME, sizeof(info->fix.id));
fb_videomode_to_var(&info->var, &cfb->mode);
ret = fb_alloc_cmap(&info->cmap, BIT(CLPS711X_FB_BPP_MAX), 0);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index f114242b5a70..098b62f7b701 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -412,7 +412,7 @@ static int __init fb_console_setup(char *this_opt)
while ((options = strsep(&this_opt, ",")) != NULL) {
if (!strncmp(options, "font:", 5)) {
- strlcpy(fontname, options + 5, sizeof(fontname));
+ strscpy(fontname, options + 5, sizeof(fontname));
continue;
}
@@ -1060,9 +1060,9 @@ static void fbcon_init(struct vc_data *vc, int init)
vc->vc_complement_mask <<= 1;
}
- if (!*svc->vc_uni_pagedir_loc)
+ if (!*svc->uni_pagedict_loc)
con_set_default_unimap(svc);
- if (!*vc->vc_uni_pagedir_loc)
+ if (!*vc->uni_pagedict_loc)
con_copy_unimap(vc, svc);
ops = info->fbcon_par;
@@ -1384,9 +1384,9 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
vc->vc_complement_mask <<= 1;
}
- if (!*svc->vc_uni_pagedir_loc)
+ if (!*svc->uni_pagedict_loc)
con_set_default_unimap(svc);
- if (!*vc->vc_uni_pagedir_loc)
+ if (!*vc->uni_pagedict_loc)
con_copy_unimap(vc, svc);
cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
@@ -2401,15 +2401,21 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
- int resize;
+ int resize, ret, old_userfont, old_width, old_height, old_charcount;
char *old_data = NULL;
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
if (p->userfont)
old_data = vc->vc_font.data;
vc->vc_font.data = (void *)(p->fontdata = data);
+ old_userfont = p->userfont;
if ((p->userfont = userfont))
REFCOUNT(data)++;
+
+ old_width = vc->vc_font.width;
+ old_height = vc->vc_font.height;
+ old_charcount = vc->vc_font.charcount;
+
vc->vc_font.width = w;
vc->vc_font.height = h;
vc->vc_font.charcount = charcount;
@@ -2425,7 +2431,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
cols /= w;
rows /= h;
- vc_resize(vc, cols, rows);
+ ret = vc_resize(vc, cols, rows);
+ if (ret)
+ goto err_out;
} else if (con_is_visible(vc)
&& vc->vc_mode == KD_TEXT) {
fbcon_clear_margins(vc, 0);
@@ -2435,6 +2443,21 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
if (old_data && (--REFCOUNT(old_data) == 0))
kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
return 0;
+
+err_out:
+ p->fontdata = old_data;
+ vc->vc_font.data = (void *)old_data;
+
+ if (userfont) {
+ p->userfont = old_userfont;
+ REFCOUNT(data)--;
+ }
+
+ vc->vc_font.width = old_width;
+ vc->vc_font.height = old_height;
+ vc->vc_font.charcount = old_charcount;
+
+ return ret;
}
/*
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index c2a60b187467..4d7f63892dcc 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -84,6 +84,10 @@ void framebuffer_release(struct fb_info *info)
if (WARN_ON(refcount_read(&info->count)))
return;
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
+ mutex_destroy(&info->bl_curve_mutex);
+#endif
+
kfree(info->apertures);
kfree(info);
}
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index d45355b9a58c..8f041f9b14c7 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -1134,7 +1134,7 @@ int cyber2000fb_attach(struct cyberpro_info *info, int idx)
info->fb_size = int_cfb_info->fb.fix.smem_len;
info->info = int_cfb_info;
- strlcpy(info->dev_name, int_cfb_info->fb.fix.id,
+ strscpy(info->dev_name, int_cfb_info->fb.fix.id,
sizeof(info->dev_name));
}
@@ -1229,7 +1229,7 @@ static int cyber2000fb_ddc_getsda(void *data)
static int cyber2000fb_setup_ddc_bus(struct cfb_info *cfb)
{
- strlcpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
+ strscpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
sizeof(cfb->ddc_adapter.name));
cfb->ddc_adapter.owner = THIS_MODULE;
cfb->ddc_adapter.class = I2C_CLASS_DDC;
@@ -1304,7 +1304,7 @@ static int cyber2000fb_i2c_getscl(void *data)
static int cyber2000fb_i2c_register(struct cfb_info *cfb)
{
- strlcpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
+ strscpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
sizeof(cfb->i2c_adapter.name));
cfb->i2c_adapter.owner = THIS_MODULE;
cfb->i2c_adapter.algo_data = &cfb->i2c_algo;
@@ -1500,7 +1500,7 @@ static int cyber2000fb_setup(char *options)
if (strncmp(opt, "font:", 5) == 0) {
static char default_font_storage[40];
- strlcpy(default_font_storage, opt + 5,
+ strscpy(default_font_storage, opt + 5,
sizeof(default_font_storage));
default_font = default_font_storage;
continue;
diff --git a/drivers/video/fbdev/dnfb.c b/drivers/video/fbdev/dnfb.c
index 3688f9165848..18405c402ec1 100644
--- a/drivers/video/fbdev/dnfb.c
+++ b/drivers/video/fbdev/dnfb.c
@@ -280,7 +280,7 @@ static struct platform_device dnfb_device = {
.name = "dnfb",
};
-int __init dnfb_init(void)
+static int __init dnfb_init(void)
{
int ret;
diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c
index b3d580e57221..7cba3969a970 100644
--- a/drivers/video/fbdev/ffb.c
+++ b/drivers/video/fbdev/ffb.c
@@ -883,7 +883,7 @@ static void ffb_init_fix(struct fb_info *info)
} else
ffb_type_name = "Elite 3D";
- strlcpy(info->fix.id, ffb_type_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, ffb_type_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
diff --git a/drivers/video/fbdev/fm2fb.c b/drivers/video/fbdev/fm2fb.c
index 3b727d528fde..942e382cf1cf 100644
--- a/drivers/video/fbdev/fm2fb.c
+++ b/drivers/video/fbdev/fm2fb.c
@@ -293,7 +293,7 @@ static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id)
return 0;
}
-int __init fm2fb_setup(char *options)
+static int __init fm2fb_setup(char *options)
{
char *this_opt;
@@ -309,7 +309,7 @@ int __init fm2fb_setup(char *options)
return 0;
}
-int __init fm2fb_init(void)
+static int __init fm2fb_init(void)
{
char *option = NULL;
diff --git a/drivers/video/fbdev/geode/gx1fb_core.c b/drivers/video/fbdev/geode/gx1fb_core.c
index 5d34d89fb665..e41204ecb0e3 100644
--- a/drivers/video/fbdev/geode/gx1fb_core.c
+++ b/drivers/video/fbdev/geode/gx1fb_core.c
@@ -410,13 +410,13 @@ static void __init gx1fb_setup(char *options)
continue;
if (!strncmp(this_opt, "mode:", 5))
- strlcpy(mode_option, this_opt + 5, sizeof(mode_option));
+ strscpy(mode_option, this_opt + 5, sizeof(mode_option));
else if (!strncmp(this_opt, "crt:", 4))
crt_option = !!simple_strtoul(this_opt + 4, NULL, 0);
else if (!strncmp(this_opt, "panel:", 6))
- strlcpy(panel_option, this_opt + 6, sizeof(panel_option));
+ strscpy(panel_option, this_opt + 6, sizeof(panel_option));
else
- strlcpy(mode_option, this_opt, sizeof(mode_option));
+ strscpy(mode_option, this_opt, sizeof(mode_option));
}
}
#endif
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index e5475ae1e158..94588b809ebf 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -650,7 +650,7 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cardtype = ent->driver_data;
par->refclk_ps = cardinfo[cardtype].refclk_ps;
info->fix = gxt4500_fix;
- strlcpy(info->fix.id, cardinfo[cardtype].cardname,
+ strscpy(info->fix.id, cardinfo[cardtype].cardname,
sizeof(info->fix.id));
info->pseudo_palette = par->pseudo_palette;
diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c
index 8d418abdd767..cdd44e5deafe 100644
--- a/drivers/video/fbdev/hpfb.c
+++ b/drivers/video/fbdev/hpfb.c
@@ -375,7 +375,7 @@ static struct dio_driver hpfb_driver = {
.remove = hpfb_remove_one,
};
-int __init hpfb_init(void)
+static int __init hpfb_init(void)
{
unsigned int sid;
unsigned char i;
@@ -415,7 +415,7 @@ int __init hpfb_init(void)
return 0;
}
-void __exit hpfb_cleanup_module(void)
+static void __exit hpfb_cleanup_module(void)
{
dio_unregister_driver(&hpfb_driver);
}
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 886c564787f1..b58b445bb529 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -74,10 +74,6 @@
#define SYNTHVID_DEPTH_WIN8 32
#define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
-
-
enum pipe_msg_type {
PIPE_MSG_INVALID,
PIPE_MSG_DATA,
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 09dd85553d4f..bd30d8314b68 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -159,7 +159,7 @@ static int i740fb_setup_ddc_bus(struct fb_info *info)
{
struct i740fb_par *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
@@ -400,7 +400,7 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
u32 xres, right, hslen, left, xtotal;
u32 yres, lower, vslen, upper, ytotal;
u32 vxres, xoffset, vyres, yoffset;
- u32 bpp, base, dacspeed24, mem;
+ u32 bpp, base, dacspeed24, mem, freq;
u8 r7;
int i;
@@ -643,7 +643,12 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
par->atc[VGA_ATC_OVERSCAN] = 0;
/* Calculate VCLK that most closely matches the requested dot clock */
- i740_calc_vclk((((u32)1e9) / var->pixclock) * (u32)(1e3), par);
+ freq = (((u32)1e9) / var->pixclock) * (u32)(1e3);
+ if (freq < I740_RFREQ_FIX) {
+ fb_dbg(info, "invalid pixclock\n");
+ freq = I740_RFREQ_FIX;
+ }
+ i740_calc_vclk(freq, par);
/* Since we program the clocks ourselves, always use VCLK2. */
par->misc |= 0x0C;
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index a2f644c97f28..94f3bc637fc8 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -41,7 +41,18 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
-#include <linux/platform_data/video-imxfb.h>
+#define PCR_TFT (1 << 31)
+#define PCR_BPIX_8 (3 << 25)
+#define PCR_BPIX_12 (4 << 25)
+#define PCR_BPIX_16 (5 << 25)
+#define PCR_BPIX_18 (6 << 25)
+
+struct imx_fb_videomode {
+ struct fb_videomode mode;
+ u32 pcr;
+ bool aus_mode;
+ unsigned char bpp;
+};
/*
* Complain if VAR is out of range.
@@ -656,7 +667,6 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
static int imxfb_init_fbinfo(struct platform_device *pdev)
{
- struct imx_fb_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct fb_info *info = platform_get_drvdata(pdev);
struct imxfb_info *fbi = info->par;
struct device_node *np;
@@ -671,7 +681,7 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
fbi->devtype = pdev->id_entry->driver_data;
- strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
+ strscpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
@@ -690,25 +700,20 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
info->fbops = &imxfb_ops;
info->flags = FBINFO_FLAG_DEFAULT |
FBINFO_READS_FAST;
- if (pdata) {
- fbi->lscr1 = pdata->lscr1;
- fbi->dmacr = pdata->dmacr;
- fbi->pwmr = pdata->pwmr;
- } else {
- np = pdev->dev.of_node;
- info->var.grayscale = of_property_read_bool(np,
- "cmap-greyscale");
- fbi->cmap_inverse = of_property_read_bool(np, "cmap-inverse");
- fbi->cmap_static = of_property_read_bool(np, "cmap-static");
- fbi->lscr1 = IMXFB_LSCR1_DEFAULT;
+ np = pdev->dev.of_node;
+ info->var.grayscale = of_property_read_bool(np,
+ "cmap-greyscale");
+ fbi->cmap_inverse = of_property_read_bool(np, "cmap-inverse");
+ fbi->cmap_static = of_property_read_bool(np, "cmap-static");
- of_property_read_u32(np, "fsl,lpccr", &fbi->pwmr);
+ fbi->lscr1 = IMXFB_LSCR1_DEFAULT;
- of_property_read_u32(np, "fsl,lscr1", &fbi->lscr1);
+ of_property_read_u32(np, "fsl,lpccr", &fbi->pwmr);
- of_property_read_u32(np, "fsl,dmacr", &fbi->dmacr);
- }
+ of_property_read_u32(np, "fsl,lscr1", &fbi->lscr1);
+
+ of_property_read_u32(np, "fsl,dmacr", &fbi->dmacr);
return 0;
}
@@ -863,10 +868,10 @@ static int imxfb_probe(struct platform_device *pdev)
struct imxfb_info *fbi;
struct lcd_device *lcd;
struct fb_info *info;
- struct imx_fb_platform_data *pdata;
struct resource *res;
struct imx_fb_videomode *m;
const struct of_device_id *of_id;
+ struct device_node *display_np;
int ret, i;
int bytes_per_pixel;
@@ -884,8 +889,6 @@ static int imxfb_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- pdata = dev_get_platdata(&pdev->dev);
-
info = framebuffer_alloc(sizeof(struct imxfb_info), &pdev->dev);
if (!info)
return -ENOMEM;
@@ -898,43 +901,34 @@ static int imxfb_probe(struct platform_device *pdev)
if (ret < 0)
goto failed_init;
- if (pdata) {
- if (!fb_mode)
- fb_mode = pdata->mode[0].mode.name;
-
- fbi->mode = pdata->mode;
- fbi->num_modes = pdata->num_modes;
- } else {
- struct device_node *display_np;
- fb_mode = NULL;
-
- display_np = of_parse_phandle(pdev->dev.of_node, "display", 0);
- if (!display_np) {
- dev_err(&pdev->dev, "No display defined in devicetree\n");
- ret = -EINVAL;
- goto failed_of_parse;
- }
+ fb_mode = NULL;
- /*
- * imxfb does not support more modes, we choose only the native
- * mode.
- */
- fbi->num_modes = 1;
-
- fbi->mode = devm_kzalloc(&pdev->dev,
- sizeof(struct imx_fb_videomode), GFP_KERNEL);
- if (!fbi->mode) {
- ret = -ENOMEM;
- of_node_put(display_np);
- goto failed_of_parse;
- }
+ display_np = of_parse_phandle(pdev->dev.of_node, "display", 0);
+ if (!display_np) {
+ dev_err(&pdev->dev, "No display defined in devicetree\n");
+ ret = -EINVAL;
+ goto failed_of_parse;
+ }
- ret = imxfb_of_read_mode(&pdev->dev, display_np, fbi->mode);
+ /*
+ * imxfb does not support more modes, we choose only the native
+ * mode.
+ */
+ fbi->num_modes = 1;
+
+ fbi->mode = devm_kzalloc(&pdev->dev,
+ sizeof(struct imx_fb_videomode), GFP_KERNEL);
+ if (!fbi->mode) {
+ ret = -ENOMEM;
of_node_put(display_np);
- if (ret)
- goto failed_of_parse;
+ goto failed_of_parse;
}
+ ret = imxfb_of_read_mode(&pdev->dev, display_np, fbi->mode);
+ of_node_put(display_np);
+ if (ret)
+ goto failed_of_parse;
+
/* Calculate maximum bytes used per pixel. In most cases this should
* be the same as m->bpp/8 */
m = &fbi->mode[0];
@@ -943,13 +937,6 @@ static int imxfb_probe(struct platform_device *pdev)
info->fix.smem_len = max_t(size_t, info->fix.smem_len,
m->mode.xres * m->mode.yres * bytes_per_pixel);
- res = request_mem_region(res->start, resource_size(res),
- DRIVER_NAME);
- if (!res) {
- ret = -EBUSY;
- goto failed_req;
- }
-
fbi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(fbi->clk_ipg)) {
ret = PTR_ERR(fbi->clk_ipg);
@@ -983,10 +970,10 @@ static int imxfb_probe(struct platform_device *pdev)
goto failed_getclock;
}
- fbi->regs = ioremap(res->start, resource_size(res));
- if (fbi->regs == NULL) {
+ fbi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fbi->regs)) {
dev_err(&pdev->dev, "Cannot map frame buffer registers\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(fbi->regs);
goto failed_ioremap;
}
@@ -1001,13 +988,6 @@ static int imxfb_probe(struct platform_device *pdev)
info->fix.smem_start = fbi->map_dma;
- if (pdata && pdata->init) {
- ret = pdata->init(fbi->pdev);
- if (ret)
- goto failed_platform_init;
- }
-
-
INIT_LIST_HEAD(&info->modelist);
for (i = 0; i < fbi->num_modes; i++)
fb_add_videomode(&fbi->mode[i].mode, &info->modelist);
@@ -1059,17 +1039,12 @@ failed_lcd:
failed_register:
fb_dealloc_cmap(&info->cmap);
failed_cmap:
- if (pdata && pdata->exit)
- pdata->exit(fbi->pdev);
-failed_platform_init:
dma_free_wc(&pdev->dev, fbi->map_size, info->screen_buffer,
fbi->map_dma);
failed_map:
- iounmap(fbi->regs);
failed_ioremap:
failed_getclock:
release_mem_region(res->start, resource_size(res));
-failed_req:
failed_of_parse:
kfree(info->pseudo_palette);
failed_init:
@@ -1079,26 +1054,15 @@ failed_init:
static int imxfb_remove(struct platform_device *pdev)
{
- struct imx_fb_platform_data *pdata;
struct fb_info *info = platform_get_drvdata(pdev);
struct imxfb_info *fbi = info->par;
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
imxfb_disable_controller(fbi);
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata && pdata->exit)
- pdata->exit(fbi->pdev);
dma_free_wc(&pdev->dev, fbi->map_size, info->screen_buffer,
fbi->map_dma);
- iounmap(fbi->regs);
- release_mem_region(res->start, resource_size(res));
kfree(info->pseudo_palette);
framebuffer_release(info);
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 236521b19daf..68bba2688f4c 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -2383,9 +2383,9 @@ static int __init matroxfb_setup(char *options) {
else if (!strncmp(this_opt, "mem:", 4))
mem = simple_strtoul(this_opt+4, NULL, 0);
else if (!strncmp(this_opt, "mode:", 5))
- strlcpy(videomode, this_opt+5, sizeof(videomode));
+ strscpy(videomode, this_opt + 5, sizeof(videomode));
else if (!strncmp(this_opt, "outputs:", 8))
- strlcpy(outputs, this_opt+8, sizeof(outputs));
+ strscpy(outputs, this_opt + 8, sizeof(outputs));
else if (!strncmp(this_opt, "dfp:", 4)) {
dfp_type = simple_strtoul(this_opt+4, NULL, 0);
dfp = 1;
@@ -2455,7 +2455,7 @@ static int __init matroxfb_setup(char *options) {
else if (!strcmp(this_opt, "dfp"))
dfp = value;
else {
- strlcpy(videomode, this_opt, sizeof(videomode));
+ strscpy(videomode, this_opt, sizeof(videomode));
}
}
}
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index b1acb1ebebe9..91001990e351 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#ifdef CONFIG_PPC32
diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
index 9d9fe5c3a7a1..161fc65d6b57 100644
--- a/drivers/video/fbdev/omap/hwa742.c
+++ b/drivers/video/fbdev/omap/hwa742.c
@@ -489,7 +489,7 @@ static void hwa742_update_window_auto(struct timer_list *unused)
__hwa742_update_window_auto(false);
}
-int hwa742_update_window_async(struct fb_info *fbi,
+static int hwa742_update_window_async(struct fb_info *fbi,
struct omapfb_update_window *win,
void (*complete_callback)(void *arg),
void *complete_callback_data)
@@ -522,7 +522,6 @@ int hwa742_update_window_async(struct fb_info *fbi,
out:
return r;
}
-EXPORT_SYMBOL(hwa742_update_window_async);
static int hwa742_setup_plane(int plane, int channel_out,
unsigned long offset, int screen_width,
diff --git a/drivers/video/fbdev/omap/omapfb.h b/drivers/video/fbdev/omap/omapfb.h
index beb841ccb99c..ab1cb6e7f5f8 100644
--- a/drivers/video/fbdev/omap/omapfb.h
+++ b/drivers/video/fbdev/omap/omapfb.h
@@ -227,13 +227,4 @@ extern int omapfb_register_client(struct omapfb_notifier_block *nb,
omapfb_notifier_callback_t callback,
void *callback_data);
extern int omapfb_unregister_client(struct omapfb_notifier_block *nb);
-extern int omapfb_update_window_async(struct fb_info *fbi,
- struct omapfb_update_window *win,
- void (*callback)(void *),
- void *callback_data);
-extern int hwa742_update_window_async(struct fb_info *fbi,
- struct omapfb_update_window *win,
- void (*callback)(void *),
- void *callback_data);
-
#endif /* __OMAPFB_H */
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 292fcb0a24fc..17cda5765683 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -668,7 +668,7 @@ static int omapfb_set_par(struct fb_info *fbi)
return r;
}
-int omapfb_update_window_async(struct fb_info *fbi,
+static int omapfb_update_window_async(struct fb_info *fbi,
struct omapfb_update_window *win,
void (*callback)(void *),
void *callback_data)
@@ -714,7 +714,6 @@ int omapfb_update_window_async(struct fb_info *fbi,
return fbdev->ctrl->update_window(fbi, win, callback, callback_data);
}
-EXPORT_SYMBOL(omapfb_update_window_async);
static int omapfb_update_win(struct fb_info *fbi,
struct omapfb_update_window *win)
@@ -1643,15 +1642,13 @@ static int omapfb_do_probe(struct platform_device *pdev,
goto cleanup;
}
fbdev->int_irq = platform_get_irq(pdev, 0);
- if (!fbdev->int_irq) {
- dev_err(&pdev->dev, "unable to get irq\n");
+ if (fbdev->int_irq < 0) {
r = ENXIO;
goto cleanup;
}
fbdev->ext_irq = platform_get_irq(pdev, 1);
- if (!fbdev->ext_irq) {
- dev_err(&pdev->dev, "unable to get irq\n");
+ if (fbdev->ext_irq < 0) {
r = ENXIO;
goto cleanup;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index afa688e754b9..5ccddcfce722 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1331,7 +1331,7 @@ static void clear_fb_info(struct fb_info *fbi)
{
memset(&fbi->var, 0, sizeof(fbi->var));
memset(&fbi->fix, 0, sizeof(fbi->fix));
- strlcpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
+ strscpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
}
static int omapfb_free_all_fbmem(struct omapfb2_device *fbdev)
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index d3be2c64f1c0..8fd79deb1e2a 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -617,6 +617,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
return -EINVAL;
}
+ if (!var->pixclock) {
+ DPRINTK("pixclock is zero\n");
+ return -EINVAL;
+ }
+
if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
DPRINTK("pixclock too high (%ldKHz)\n",
PICOS2KHZ(var->pixclock));
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index e943300d23e8..d5d0bbd39213 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -640,7 +640,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
info->node = -1;
- strlcpy(info->fix.id, mi->id, 16);
+ strscpy(info->fix.id, mi->id, 16);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
info->fix.xpanstep = 0;
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 66cfc3e9d3cf..696ac5431180 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2042,7 +2042,7 @@ static int __init pxafb_setup_options(void)
return -ENODEV;
if (options)
- strlcpy(g_options, options, sizeof(g_options));
+ strscpy(g_options, options, sizeof(g_options));
return 0;
}
diff --git a/drivers/video/fbdev/q40fb.c b/drivers/video/fbdev/q40fb.c
index 079a2a7fb2c5..964bc88bb89c 100644
--- a/drivers/video/fbdev/q40fb.c
+++ b/drivers/video/fbdev/q40fb.c
@@ -133,7 +133,7 @@ static struct platform_device q40fb_device = {
.name = "q40fb",
};
-int __init q40fb_init(void)
+static int __init q40fb_init(void)
{
int ret = 0;
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index b93c8eb02336..67b63a753cb2 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -248,7 +248,7 @@ static int s3fb_setup_ddc_bus(struct fb_info *info)
{
struct s3fb_info *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
@@ -905,6 +905,8 @@ static int s3fb_set_par(struct fb_info *info)
value = clamp((htotal + hsstart + 1) / 2 + 2, hsstart + 4, htotal + 1);
svga_wcrt_multi(par->state.vgabase, s3_dtpc_regs, value);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index e31cf63b0a62..017c8efe8267 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1224,47 +1224,6 @@ int __init sa1100fb_init(void)
return platform_driver_register(&sa1100fb_driver);
}
-int __init sa1100fb_setup(char *options)
-{
-#if 0
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
-
- if (!strncmp(this_opt, "bpp:", 4))
- current_par.max_bpp =
- simple_strtoul(this_opt + 4, NULL, 0);
-
- if (!strncmp(this_opt, "lccr0:", 6))
- lcd_shadow.lccr0 =
- simple_strtoul(this_opt + 6, NULL, 0);
- if (!strncmp(this_opt, "lccr1:", 6)) {
- lcd_shadow.lccr1 =
- simple_strtoul(this_opt + 6, NULL, 0);
- current_par.max_xres =
- (lcd_shadow.lccr1 & 0x3ff) + 16;
- }
- if (!strncmp(this_opt, "lccr2:", 6)) {
- lcd_shadow.lccr2 =
- simple_strtoul(this_opt + 6, NULL, 0);
- current_par.max_yres =
- (lcd_shadow.
- lccr0 & LCCR0_SDS) ? ((lcd_shadow.
- lccr2 & 0x3ff) +
- 1) *
- 2 : ((lcd_shadow.lccr2 & 0x3ff) + 1);
- }
- if (!strncmp(this_opt, "lccr3:", 6))
- lcd_shadow.lccr3 =
- simple_strtoul(this_opt + 6, NULL, 0);
- }
-#endif
- return 0;
-}
-
module_init(sa1100fb_init);
MODULE_DESCRIPTION("StrongARM-1100/1110 framebuffer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index cf2a90ecd64e..e770b4a356b5 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -355,7 +355,7 @@ static int simplefb_regulators_get(struct simplefb_par *par,
if (!p || p == prop->name)
continue;
- strlcpy(name, prop->name,
+ strscpy(name, prop->name,
strlen(prop->name) - strlen(SUPPLY_SUFFIX) + 1);
regulator = devm_regulator_get_optional(&pdev->dev, name);
if (IS_ERR(regulator)) {
diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
index b568c646a76c..2ba91d62af92 100644
--- a/drivers/video/fbdev/sis/init.c
+++ b/drivers/video/fbdev/sis/init.c
@@ -355,12 +355,12 @@ SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay,
}
break;
case 400:
- if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDwidth >= 600))) {
+ if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDheight >= 600))) {
if(VDisplay == 300) ModeIndex = ModeIndex_400x300[Depth];
}
break;
case 512:
- if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDwidth >= 768))) {
+ if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDheight >= 768))) {
if(VDisplay == 384) ModeIndex = ModeIndex_512x384[Depth];
}
break;
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index f28fd69d5eb7..c9e77429dfa3 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -649,37 +649,37 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
u16 xres=0, yres, myres;
#ifdef CONFIG_FB_SIS_300
- if(ivideo->sisvga_engine == SIS_300_VGA) {
- if(!(sisbios_mode[myindex].chipset & MD_SIS300))
+ if (ivideo->sisvga_engine == SIS_300_VGA) {
+ if (!(sisbios_mode[myindex].chipset & MD_SIS300))
return -1 ;
}
#endif
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- if(!(sisbios_mode[myindex].chipset & MD_SIS315))
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
+ if (!(sisbios_mode[myindex].chipset & MD_SIS315))
return -1;
}
#endif
myres = sisbios_mode[myindex].yres;
- switch(vbflags & VB_DISPTYPE_DISP2) {
+ switch (vbflags & VB_DISPTYPE_DISP2) {
case CRT2_LCD:
xres = ivideo->lcdxres; yres = ivideo->lcdyres;
- if((ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL848) &&
- (ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL856)) {
- if(sisbios_mode[myindex].xres > xres)
+ if ((ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL848) &&
+ (ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL856)) {
+ if (sisbios_mode[myindex].xres > xres)
return -1;
- if(myres > yres)
+ if (myres > yres)
return -1;
}
- if(ivideo->sisfb_fstn) {
- if(sisbios_mode[myindex].xres == 320) {
- if(myres == 240) {
- switch(sisbios_mode[myindex].mode_no[1]) {
+ if (ivideo->sisfb_fstn) {
+ if (sisbios_mode[myindex].xres == 320) {
+ if (myres == 240) {
+ switch (sisbios_mode[myindex].mode_no[1]) {
case 0x50: myindex = MODE_FSTN_8; break;
case 0x56: myindex = MODE_FSTN_16; break;
case 0x53: return -1;
@@ -688,7 +688,7 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
}
}
- if(SiS_GetModeID_LCD(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_LCD(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->sisfb_fstn,
ivideo->SiS_Pr.SiS_CustomT, xres, yres, ivideo->vbflags2) < 0x14) {
return -1;
@@ -696,14 +696,14 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
break;
case CRT2_TV:
- if(SiS_GetModeID_TV(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_TV(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) {
return -1;
}
break;
case CRT2_VGA:
- if(SiS_GetModeID_VGA2(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_VGA2(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) {
return -1;
}
@@ -1872,7 +1872,7 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- strlcpy(fix->id, ivideo->myid, sizeof(fix->id));
+ strscpy(fix->id, ivideo->myid, sizeof(fix->id));
mutex_lock(&info->mm_lock);
fix->smem_start = ivideo->video_base + ivideo->video_offset;
@@ -2204,82 +2204,88 @@ static bool sisfb_test_DDC1(struct sis_video_info *ivideo)
static void sisfb_sense_crt1(struct sis_video_info *ivideo)
{
- bool mustwait = false;
- u8 sr1F, cr17;
+ bool mustwait = false;
+ u8 sr1F, cr17;
#ifdef CONFIG_FB_SIS_315
- u8 cr63=0;
+ u8 cr63 = 0;
#endif
- u16 temp = 0xffff;
- int i;
+ u16 temp = 0xffff;
+ int i;
+
+ sr1F = SiS_GetReg(SISSR, 0x1F);
+ SiS_SetRegOR(SISSR, 0x1F, 0x04);
+ SiS_SetRegAND(SISSR, 0x1F, 0x3F);
- sr1F = SiS_GetReg(SISSR, 0x1F);
- SiS_SetRegOR(SISSR, 0x1F, 0x04);
- SiS_SetRegAND(SISSR, 0x1F, 0x3F);
- if(sr1F & 0xc0) mustwait = true;
+ if (sr1F & 0xc0)
+ mustwait = true;
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- cr63 = SiS_GetReg(SISCR, ivideo->SiS_Pr.SiS_MyCR63);
- cr63 &= 0x40;
- SiS_SetRegAND(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF);
- }
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
+ cr63 = SiS_GetReg(SISCR, ivideo->SiS_Pr.SiS_MyCR63);
+ cr63 &= 0x40;
+ SiS_SetRegAND(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF);
+ }
#endif
- cr17 = SiS_GetReg(SISCR, 0x17);
- cr17 &= 0x80;
- if(!cr17) {
- SiS_SetRegOR(SISCR, 0x17, 0x80);
- mustwait = true;
- SiS_SetReg(SISSR, 0x00, 0x01);
- SiS_SetReg(SISSR, 0x00, 0x03);
- }
+ cr17 = SiS_GetReg(SISCR, 0x17);
+ cr17 &= 0x80;
- if(mustwait) {
- for(i=0; i < 10; i++) sisfbwaitretracecrt1(ivideo);
- }
+ if (!cr17) {
+ SiS_SetRegOR(SISCR, 0x17, 0x80);
+ mustwait = true;
+ SiS_SetReg(SISSR, 0x00, 0x01);
+ SiS_SetReg(SISSR, 0x00, 0x03);
+ }
+ if (mustwait) {
+ for (i = 0; i < 10; i++)
+ sisfbwaitretracecrt1(ivideo);
+ }
#ifdef CONFIG_FB_SIS_315
- if(ivideo->chip >= SIS_330) {
- SiS_SetRegAND(SISCR, 0x32, ~0x20);
- if(ivideo->chip >= SIS_340) {
- SiS_SetReg(SISCR, 0x57, 0x4a);
- } else {
- SiS_SetReg(SISCR, 0x57, 0x5f);
- }
- SiS_SetRegOR(SISCR, 0x53, 0x02);
- while ((SiS_GetRegByte(SISINPSTAT)) & 0x01) break;
- while (!((SiS_GetRegByte(SISINPSTAT)) & 0x01)) break;
- if ((SiS_GetRegByte(SISMISCW)) & 0x10) temp = 1;
- SiS_SetRegAND(SISCR, 0x53, 0xfd);
- SiS_SetRegAND(SISCR, 0x57, 0x00);
- }
+ if (ivideo->chip >= SIS_330) {
+ SiS_SetRegAND(SISCR, 0x32, ~0x20);
+ if (ivideo->chip >= SIS_340)
+ SiS_SetReg(SISCR, 0x57, 0x4a);
+ else
+ SiS_SetReg(SISCR, 0x57, 0x5f);
+
+ SiS_SetRegOR(SISCR, 0x53, 0x02);
+ while ((SiS_GetRegByte(SISINPSTAT)) & 0x01)
+ break;
+ while (!((SiS_GetRegByte(SISINPSTAT)) & 0x01))
+ break;
+ if ((SiS_GetRegByte(SISMISCW)) & 0x10)
+ temp = 1;
+
+ SiS_SetRegAND(SISCR, 0x53, 0xfd);
+ SiS_SetRegAND(SISCR, 0x57, 0x00);
+ }
#endif
- if(temp == 0xffff) {
- i = 3;
- do {
- temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
- ivideo->sisvga_engine, 0, 0, NULL, ivideo->vbflags2);
- } while(((temp == 0) || (temp == 0xffff)) && i--);
+ if (temp == 0xffff) {
+ i = 3;
- if((temp == 0) || (temp == 0xffff)) {
- if(sisfb_test_DDC1(ivideo)) temp = 1;
- }
- }
+ do {
+ temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
+ ivideo->sisvga_engine, 0, 0, NULL, ivideo->vbflags2);
+ } while (((temp == 0) || (temp == 0xffff)) && i--);
- if((temp) && (temp != 0xffff)) {
- SiS_SetRegOR(SISCR, 0x32, 0x20);
- }
+ if ((temp == 0) || (temp == 0xffff)) {
+ if (sisfb_test_DDC1(ivideo))
+ temp = 1;
+ }
+ }
+
+ if ((temp) && (temp != 0xffff))
+ SiS_SetRegOR(SISCR, 0x32, 0x20);
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF, cr63);
- }
+ if (ivideo->sisvga_engine == SIS_315_VGA)
+ SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF, cr63);
#endif
- SiS_SetRegANDOR(SISCR, 0x17, 0x7F, cr17);
-
- SiS_SetReg(SISSR, 0x1F, sr1F);
+ SiS_SetRegANDOR(SISCR, 0x17, 0x7F, cr17);
+ SiS_SetReg(SISSR, 0x1F, sr1F);
}
/* Determine and detect attached devices on SiS30x */
@@ -2293,25 +2299,25 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
ivideo->SiS_Pr.PanelSelfDetected = false;
/* LCD detection only for TMDS bridges */
- if(!(ivideo->vbflags2 & VB2_SISTMDSBRIDGE))
+ if (!(ivideo->vbflags2 & VB2_SISTMDSBRIDGE))
return;
- if(ivideo->vbflags2 & VB2_30xBDH)
+ if (ivideo->vbflags2 & VB2_30xBDH)
return;
/* If LCD already set up by BIOS, skip it */
reg = SiS_GetReg(SISCR, 0x32);
- if(reg & 0x08)
+ if (reg & 0x08)
return;
realcrtno = 1;
- if(ivideo->SiS_Pr.DDCPortMixup)
+ if (ivideo->SiS_Pr.DDCPortMixup)
realcrtno = 0;
/* Check DDC capabilities */
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags, ivideo->sisvga_engine,
realcrtno, 0, &buffer[0], ivideo->vbflags2);
- if((!temp) || (temp == 0xffff) || (!(temp & 0x02)))
+ if ((!temp) || (temp == 0xffff) || (!(temp & 0x02)))
return;
/* Read DDC data */
@@ -2320,17 +2326,17 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
ivideo->sisvga_engine, realcrtno, 1,
&buffer[0], ivideo->vbflags2);
- } while((temp) && i--);
+ } while ((temp) && i--);
- if(temp)
+ if (temp)
return;
/* No digital device */
- if(!(buffer[0x14] & 0x80))
+ if (!(buffer[0x14] & 0x80))
return;
/* First detailed timing preferred timing? */
- if(!(buffer[0x18] & 0x02))
+ if (!(buffer[0x18] & 0x02))
return;
xres = buffer[0x38] | ((buffer[0x3a] & 0xf0) << 4);
@@ -2338,26 +2344,26 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
switch(xres) {
case 1024:
- if(yres == 768)
+ if (yres == 768)
paneltype = 0x02;
break;
case 1280:
- if(yres == 1024)
+ if (yres == 1024)
paneltype = 0x03;
break;
case 1600:
- if((yres == 1200) && (ivideo->vbflags2 & VB2_30xC))
+ if ((yres == 1200) && (ivideo->vbflags2 & VB2_30xC))
paneltype = 0x0b;
break;
}
- if(!paneltype)
+ if (!paneltype)
return;
- if(buffer[0x23])
+ if (buffer[0x23])
cr37 |= 0x10;
- if((buffer[0x47] & 0x18) == 0x18)
+ if ((buffer[0x47] & 0x18) == 0x18)
cr37 |= ((((buffer[0x47] & 0x06) ^ 0x06) << 5) | 0x20);
else
cr37 |= 0xc0;
@@ -2372,31 +2378,34 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
static int SISDoSense(struct sis_video_info *ivideo, u16 type, u16 test)
{
- int temp, mytest, result, i, j;
-
- for(j = 0; j < 10; j++) {
- result = 0;
- for(i = 0; i < 3; i++) {
- mytest = test;
- SiS_SetReg(SISPART4, 0x11, (type & 0x00ff));
- temp = (type >> 8) | (mytest & 0x00ff);
- SiS_SetRegANDOR(SISPART4, 0x10, 0xe0, temp);
- SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1500);
- mytest >>= 8;
- mytest &= 0x7f;
- temp = SiS_GetReg(SISPART4, 0x03);
- temp ^= 0x0e;
- temp &= mytest;
- if(temp == mytest) result++;
+ int temp, mytest, result, i, j;
+
+ for (j = 0; j < 10; j++) {
+ result = 0;
+ for (i = 0; i < 3; i++) {
+ mytest = test;
+ SiS_SetReg(SISPART4, 0x11, (type & 0x00ff));
+ temp = (type >> 8) | (mytest & 0x00ff);
+ SiS_SetRegANDOR(SISPART4, 0x10, 0xe0, temp);
+ SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1500);
+ mytest >>= 8;
+ mytest &= 0x7f;
+ temp = SiS_GetReg(SISPART4, 0x03);
+ temp ^= 0x0e;
+ temp &= mytest;
+ if (temp == mytest)
+ result++;
#if 1
- SiS_SetReg(SISPART4, 0x11, 0x00);
- SiS_SetRegAND(SISPART4, 0x10, 0xe0);
- SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1000);
+ SiS_SetReg(SISPART4, 0x11, 0x00);
+ SiS_SetRegAND(SISPART4, 0x10, 0xe0);
+ SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1000);
#endif
- }
- if((result == 0) || (result >= 2)) break;
- }
- return result;
+ }
+
+ if ((result == 0) || (result >= 2))
+ break;
+ }
+ return result;
}
static void SiS_Sense30x(struct sis_video_info *ivideo)
@@ -4262,18 +4271,17 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
- for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
-
+ for (k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
RankCapacity = buswidth * SiS_DRAMType[k][3];
- if(RankCapacity != PseudoRankCapacity)
+ if (RankCapacity != PseudoRankCapacity)
continue;
- if((SiS_DRAMType[k][2] + SiS_DRAMType[k][0]) > PseudoAdrPinCount)
+ if ((SiS_DRAMType[k][2] + SiS_DRAMType[k][0]) > PseudoAdrPinCount)
continue;
BankNumHigh = RankCapacity * 16 * iteration - 1;
- if(iteration == 3) { /* Rank No */
+ if (iteration == 3) { /* Rank No */
BankNumMid = RankCapacity * 16 - 1;
} else {
BankNumMid = RankCapacity * 16 * iteration / 2 - 1;
@@ -4287,18 +4295,22 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
SiS_SetRegAND(SISSR, 0x15, 0xFB); /* Test */
SiS_SetRegOR(SISSR, 0x15, 0x04); /* Test */
sr14 = (SiS_DRAMType[k][3] * buswidth) - 1;
- if(buswidth == 4) sr14 |= 0x80;
- else if(buswidth == 2) sr14 |= 0x40;
+
+ if (buswidth == 4)
+ sr14 |= 0x80;
+ else if (buswidth == 2)
+ sr14 |= 0x40;
+
SiS_SetReg(SISSR, 0x13, SiS_DRAMType[k][4]);
SiS_SetReg(SISSR, 0x14, sr14);
BankNumHigh <<= 16;
BankNumMid <<= 16;
- if((BankNumHigh + PhysicalAdrHigh >= mapsize) ||
- (BankNumMid + PhysicalAdrHigh >= mapsize) ||
- (BankNumHigh + PhysicalAdrHalfPage >= mapsize) ||
- (BankNumHigh + PhysicalAdrOtherPage >= mapsize))
+ if ((BankNumHigh + PhysicalAdrHigh >= mapsize) ||
+ (BankNumMid + PhysicalAdrHigh >= mapsize) ||
+ (BankNumHigh + PhysicalAdrHalfPage >= mapsize) ||
+ (BankNumHigh + PhysicalAdrOtherPage >= mapsize))
continue;
/* Write data */
@@ -4312,7 +4324,7 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
(FBAddr + BankNumHigh + PhysicalAdrOtherPage));
/* Read data */
- if(readw(FBAddr + BankNumHigh + PhysicalAdrHigh) == PhysicalAdrHigh)
+ if (readw(FBAddr + BankNumHigh + PhysicalAdrHigh) == PhysicalAdrHigh)
return 1;
}
@@ -5867,7 +5879,7 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ivideo->cardnumber++;
}
- strlcpy(ivideo->myid, chipinfo->chip_name, sizeof(ivideo->myid));
+ strscpy(ivideo->myid, chipinfo->chip_name, sizeof(ivideo->myid));
ivideo->warncount = 0;
ivideo->chip_id = pdev->device;
@@ -6150,24 +6162,20 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
int result = 1;
- /* if((ivideo->chip == SIS_315H) ||
- (ivideo->chip == SIS_315) ||
- (ivideo->chip == SIS_315PRO) ||
- (ivideo->chip == SIS_330)) {
- sisfb_post_sis315330(pdev);
- } else */ if(ivideo->chip == XGI_20) {
+
+ if (ivideo->chip == XGI_20) {
result = sisfb_post_xgi(pdev);
ivideo->sisfb_can_post = 1;
- } else if((ivideo->chip == XGI_40) && ivideo->haveXGIROM) {
+ } else if ((ivideo->chip == XGI_40) && ivideo->haveXGIROM) {
result = sisfb_post_xgi(pdev);
ivideo->sisfb_can_post = 1;
} else {
printk(KERN_INFO "sisfb: Card is not "
"POSTed and sisfb can't do this either.\n");
}
- if(!result) {
+ if (!result) {
printk(KERN_ERR "sisfb: Failed to POST card\n");
ret = -ENODEV;
goto error_3;
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index d119b1d08007..8ab9a3fbd281 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -131,8 +131,6 @@ static struct fb_info info;
*/
static struct xxx_par __initdata current_par;
-int xxxfb_init(void);
-
/**
* xxxfb_open - Optional function. Called when the framebuffer is
* first accessed.
@@ -886,7 +884,7 @@ static struct pci_driver xxxfb_driver = {
MODULE_DEVICE_TABLE(pci, xxxfb_id_table);
-int __init xxxfb_init(void)
+static int __init xxxfb_init(void)
{
/*
* For kernel boot options (in 'video=xxxfb:<options>' format)
@@ -967,7 +965,7 @@ static struct platform_device *xxxfb_device;
* Only necessary if your driver takes special options,
* otherwise we fall back on the generic fb_setup().
*/
-int __init xxxfb_setup(char *options)
+static int __init xxxfb_setup(char *options)
{
/* Parse user specified options (`video=xxxfb:') */
}
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 6a52eba64559..fce6cfbadfd6 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1719,7 +1719,7 @@ static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head,
enable = 0;
}
- strlcpy(fb->fix.id, fbname, sizeof(fb->fix.id));
+ strscpy(fb->fix.id, fbname, sizeof(fb->fix.id));
memcpy(&par->ops,
(head == HEAD_CRT) ? &sm501fb_ops_crt : &sm501fb_ops_pnl,
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 5c765655d000..52e4ed9da78c 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -450,7 +450,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
if (ret < 0)
return ret;
- /* Set Set Area Color Mode ON/OFF & Low Power Display Mode */
+ /* Set Area Color Mode ON/OFF & Low Power Display Mode */
if (par->area_color_enable || par->low_power) {
u32 mode;
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index 27d4b0ace2d6..cd4d640f9477 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -1382,7 +1382,7 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail;
}
sst_get_memsize(info, &fix->smem_len);
- strlcpy(fix->id, spec->name, sizeof(fix->id));
+ strscpy(fix->id, spec->name, sizeof(fix->id));
printk(KERN_INFO "%s (revision %d) with %s dac\n",
fix->id, par->revision, par->dac_sw.name);
diff --git a/drivers/video/fbdev/sunxvr1000.c b/drivers/video/fbdev/sunxvr1000.c
index 15b079505a00..490bd9a14763 100644
--- a/drivers/video/fbdev/sunxvr1000.c
+++ b/drivers/video/fbdev/sunxvr1000.c
@@ -80,7 +80,7 @@ static int gfb_set_fbinfo(struct gfb_info *gp)
info->pseudo_palette = gp->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "gfb", sizeof(info->fix.id));
+ strscpy(info->fix.id, "gfb", sizeof(info->fix.id));
info->fix.smem_start = gp->fb_base_phys;
info->fix.smem_len = gp->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
diff --git a/drivers/video/fbdev/sunxvr2500.c b/drivers/video/fbdev/sunxvr2500.c
index 1d3bacd9d5ac..1279b02234f8 100644
--- a/drivers/video/fbdev/sunxvr2500.c
+++ b/drivers/video/fbdev/sunxvr2500.c
@@ -84,7 +84,7 @@ static int s3d_set_fbinfo(struct s3d_info *sp)
info->pseudo_palette = sp->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "s3d", sizeof(info->fix.id));
+ strscpy(info->fix.id, "s3d", sizeof(info->fix.id));
info->fix.smem_start = sp->fb_base_phys;
info->fix.smem_len = sp->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
diff --git a/drivers/video/fbdev/sunxvr500.c b/drivers/video/fbdev/sunxvr500.c
index 9daf17b11106..f7b463633ba0 100644
--- a/drivers/video/fbdev/sunxvr500.c
+++ b/drivers/video/fbdev/sunxvr500.c
@@ -207,7 +207,7 @@ static int e3d_set_fbinfo(struct e3d_info *ep)
info->pseudo_palette = ep->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "e3d", sizeof(info->fix.id));
+ strscpy(info->fix.id, "e3d", sizeof(info->fix.id));
info->fix.smem_start = ep->fb_base_phys;
info->fix.smem_len = ep->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index 1638a40fed22..01d87f53324d 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -333,7 +333,7 @@ tcx_init_fix(struct fb_info *info, int linebytes)
else
tcx_name = "TCX24";
- strlcpy(info->fix.id, tcx_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, tcx_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index 67e37a62b07c..8a8122f8bfeb 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -1264,7 +1264,7 @@ static int tdfxfb_setup_ddc_bus(struct tdfxfb_i2c_chan *chan, const char *name,
{
int rc;
- strlcpy(chan->adapter.name, name, sizeof(chan->adapter.name));
+ strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
chan->adapter.class = I2C_CLASS_DDC;
chan->adapter.algo_data = &chan->algo;
@@ -1293,7 +1293,7 @@ static int tdfxfb_setup_i2c_bus(struct tdfxfb_i2c_chan *chan, const char *name,
{
int rc;
- strlcpy(chan->adapter.name, name, sizeof(chan->adapter.name));
+ strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
chan->adapter.algo_data = &chan->algo;
chan->adapter.dev.parent = dev;
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index ae0cf5540636..1fff5fd7ab51 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -1344,7 +1344,7 @@ tgafb_init_fix(struct fb_info *info)
memory_size = 16777216;
}
- strlcpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 319131bd72cf..cda095420ee8 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -270,7 +270,7 @@ static int tridentfb_setup_ddc_bus(struct fb_info *info)
{
struct tridentfb_par *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index a6c9d4f26669..1007023a5e88 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -90,11 +90,7 @@ struct fb_info_valkyrie {
u32 pseudo_palette[16];
};
-/*
- * Exported functions
- */
-int valkyriefb_init(void);
-int valkyriefb_setup(char*);
+static int valkyriefb_setup(char*);
static int valkyriefb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
@@ -302,7 +298,7 @@ static void __init valkyrie_choose_mode(struct fb_info_valkyrie *p)
default_vmode, default_cmode);
}
-int __init valkyriefb_init(void)
+static int __init valkyriefb_init(void)
{
struct fb_info_valkyrie *p;
unsigned long frame_buffer_phys, cmap_regs_phys;
@@ -549,7 +545,7 @@ static int __init valkyrie_init_info(struct fb_info *info,
/*
* Parse user specified options (`video=valkyriefb:')
*/
-int __init valkyriefb_setup(char *options)
+static int __init valkyriefb_setup(char *options)
{
char *this_opt;
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index a92a8c670cf0..4274c6efb249 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -507,6 +507,8 @@ static int vt8623fb_set_par(struct fb_info *info)
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, 1,
1, info->node);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
diff --git a/drivers/virt/nitro_enclaves/Kconfig b/drivers/virt/nitro_enclaves/Kconfig
index ce91add81401..dc4d25c26256 100644
--- a/drivers/virt/nitro_enclaves/Kconfig
+++ b/drivers/virt/nitro_enclaves/Kconfig
@@ -17,7 +17,7 @@ config NITRO_ENCLAVES
config NITRO_ENCLAVES_MISC_DEV_TEST
bool "Tests for the misc device functionality of the Nitro Enclaves" if !KUNIT_ALL_TESTS
- depends on NITRO_ENCLAVES && KUNIT
+ depends on NITRO_ENCLAVES && KUNIT=y
default KUNIT_ALL_TESTS
help
Enable KUnit tests for the misc device functionality of the Nitro
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 56c77f63cd22..0a53a61231c2 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -35,11 +35,12 @@ if VIRTIO_MENU
config VIRTIO_HARDEN_NOTIFICATION
bool "Harden virtio notification"
+ depends on BROKEN
help
Enable this to harden the device notifications and suppress
those that happen at a time where notifications are illegal.
- Experimental: Note that several drivers still have bugs that
+ Experimental: Note that several drivers still have issues that
may cause crashes or hangs when correct handling of
notifications is enforced; depending on the subset of
drivers and devices you use, this may or may not work.
@@ -126,9 +127,11 @@ config VIRTIO_MEM
This driver provides access to virtio-mem paravirtualized memory
devices, allowing to hotplug and hotunplug memory.
- This driver was only tested under x86-64 and arm64, but should
- theoretically work on all architectures that support memory hotplug
- and hotremove.
+ This driver currently only supports x86-64 and arm64. Although it
+ should compile on other architectures that implement memory
+ hot(un)plug, architecture-specific and/or common
+ code changes may be required for virtio-mem, kdump and kexec to work as
+ expected.
If unsure, say M.
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 14c142d77fba..828ced060742 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -428,7 +428,9 @@ int register_virtio_device(struct virtio_device *dev)
goto out;
dev->index = err;
- dev_set_name(&dev->dev, "virtio%u", dev->index);
+ err = dev_set_name(&dev->dev, "virtio%u", dev->index);
+ if (err)
+ goto out_ida_remove;
err = virtio_device_of_init(dev);
if (err)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bd360b91e9d3..3f78a3a1eb75 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -856,7 +856,7 @@ static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
vb->shrinker.count_objects = virtio_balloon_shrinker_count;
vb->shrinker.seeks = DEFAULT_SEEKS;
- return register_shrinker(&vb->shrinker);
+ return register_shrinker(&vb->shrinker, "virtio-balloon");
}
static int virtballoon_probe(struct virtio_device *vdev)
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index e07486f01999..0c2892ec6817 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -862,8 +862,7 @@ static void virtio_mem_sbm_notify_online(struct virtio_mem *vm,
unsigned long mb_id,
unsigned long start_pfn)
{
- const bool is_movable = page_zonenum(pfn_to_page(start_pfn)) ==
- ZONE_MOVABLE;
+ const bool is_movable = is_zone_movable_page(pfn_to_page(start_pfn));
int new_state;
switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
@@ -1158,8 +1157,7 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
*/
static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
{
- const bool is_movable = page_zonenum(pfn_to_page(pfn)) ==
- ZONE_MOVABLE;
+ const bool is_movable = is_zone_movable_page(pfn_to_page(pfn));
int rc, retry_count;
/*
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 083ff1eb743d..3ff746e3f24a 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -403,6 +403,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
goto error_new_virtqueue;
}
+ vq->num_max = num;
+
/* Activate the queue */
writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
if (vm_dev->version == 1) {
@@ -487,6 +489,9 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
if (err)
return err;
+ if (of_property_read_bool(vm_dev->pdev->dev.of_node, "wakeup-source"))
+ enable_irq_wake(irq);
+
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index ca51fcc9daab..ad258a9d3b9f 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -214,9 +214,15 @@ static void vp_del_vq(struct virtqueue *vq)
struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
unsigned long flags;
- spin_lock_irqsave(&vp_dev->lock, flags);
- list_del(&info->node);
- spin_unlock_irqrestore(&vp_dev->lock, flags);
+ /*
+ * If it fails during re-enable reset vq. This way we won't rejoin
+ * info->node to the queue. Prevent unexpected irqs.
+ */
+ if (!vq->reset) {
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+ }
vp_dev->del_vq(info);
kfree(info);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index a5e5721145c7..2257f1b3d8ae 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -135,6 +135,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!vq)
return ERR_PTR(-ENOMEM);
+ vq->num_max = num;
+
q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
if (q_pfn >> 32) {
dev_err(&vp_dev->pci_dev->dev,
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 623906b4996c..c3b9f2761849 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
+
+ if (features & BIT_ULL(VIRTIO_F_RING_RESET))
+ __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
}
/* virtio config->finalize_features() implementation */
@@ -176,6 +179,110 @@ static void vp_reset(struct virtio_device *vdev)
vp_synchronize_vectors(vdev);
}
+static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ unsigned long index;
+
+ index = vq->index;
+
+ /* activate the queue */
+ vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
+ vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
+ virtqueue_get_avail_addr(vq),
+ virtqueue_get_used_addr(vq));
+
+ if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
+ msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
+ if (msix_vec == VIRTIO_MSI_NO_VECTOR)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vp_modern_disable_vq_and_reset(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_vq_info *info;
+ unsigned long flags;
+
+ if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
+ return -ENOENT;
+
+ vp_modern_set_queue_reset(mdev, vq->index);
+
+ info = vp_dev->vqs[vq->index];
+
+ /* delete vq from irq handler */
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+ INIT_LIST_HEAD(&info->node);
+
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ __virtqueue_break(vq);
+#endif
+
+ /* For the case where vq has an exclusive irq, call synchronize_irq() to
+ * wait for completion.
+ *
+ * note: We can't use disable_irq() since it conflicts with the affinity
+ * managed IRQ that is used by some drivers.
+ */
+ if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
+ synchronize_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
+
+ vq->reset = true;
+
+ return 0;
+}
+
+static int vp_modern_enable_vq_after_reset(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_vq_info *info;
+ unsigned long flags, index;
+ int err;
+
+ if (!vq->reset)
+ return -EBUSY;
+
+ index = vq->index;
+ info = vp_dev->vqs[index];
+
+ if (vp_modern_get_queue_reset(mdev, index))
+ return -EBUSY;
+
+ if (vp_modern_get_queue_enable(mdev, index))
+ return -EBUSY;
+
+ err = vp_active_vq(vq, info->msix_vector);
+ if (err)
+ return err;
+
+ if (vq->callback) {
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_add(&info->node, &vp_dev->virtqueues);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+ } else {
+ INIT_LIST_HEAD(&info->node);
+ }
+
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ __virtqueue_unbreak(vq);
+#endif
+
+ vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
+ vq->reset = false;
+
+ return 0;
+}
+
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
return vp_modern_config_vector(&vp_dev->mdev, vector);
@@ -218,32 +325,21 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!vq)
return ERR_PTR(-ENOMEM);
- /* activate the queue */
- vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
- vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
- virtqueue_get_avail_addr(vq),
- virtqueue_get_used_addr(vq));
+ vq->num_max = num;
+
+ err = vp_active_vq(vq, msix_vec);
+ if (err)
+ goto err;
vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL);
if (!vq->priv) {
err = -ENOMEM;
- goto err_map_notify;
- }
-
- if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
- msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
- if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
- err = -EBUSY;
- goto err_assign_vector;
- }
+ goto err;
}
return vq;
-err_assign_vector:
- if (!mdev->notify_base)
- pci_iounmap(mdev->pci_dev, (void __iomem __force *)vq->priv);
-err_map_notify:
+err:
vring_del_virtqueue(vq);
return ERR_PTR(err);
}
@@ -401,6 +497,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity,
.get_shm_region = vp_get_shm_region,
+ .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
+ .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
};
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -419,6 +517,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity,
.get_shm_region = vp_get_shm_region,
+ .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
+ .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
};
/* the PCI probing function */
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index fa2a9445bb18..869cb46bef96 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -3,6 +3,7 @@
#include <linux/virtio_pci_modern.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/delay.h>
/*
* vp_modern_map_capability - map a part of virtio pci capability
@@ -475,6 +476,44 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
EXPORT_SYMBOL_GPL(vp_modern_set_status);
/*
+ * vp_modern_get_queue_reset - get the queue reset status
+ * @mdev: the modern virtio-pci device
+ * @index: queue index
+ */
+int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+
+ vp_iowrite16(index, &cfg->cfg.queue_select);
+ return vp_ioread16(&cfg->queue_reset);
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_queue_reset);
+
+/*
+ * vp_modern_set_queue_reset - reset the queue
+ * @mdev: the modern virtio-pci device
+ * @index: queue index
+ */
+void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+
+ vp_iowrite16(index, &cfg->cfg.queue_select);
+ vp_iowrite16(1, &cfg->queue_reset);
+
+ while (vp_ioread16(&cfg->queue_reset))
+ msleep(1);
+
+ while (vp_ioread16(&cfg->cfg.queue_enable))
+ msleep(1);
+}
+EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset);
+
+/*
* vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
* @mdev: the modern virtio-pci device
* @index: queue index
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 643ca779fcc6..4620e9d79dde 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -85,6 +85,71 @@ struct vring_desc_extra {
u16 next; /* The next desc state in a list. */
};
+struct vring_virtqueue_split {
+ /* Actual memory layout for this queue. */
+ struct vring vring;
+
+ /* Last written value to avail->flags */
+ u16 avail_flags_shadow;
+
+ /*
+ * Last written value to avail->idx in
+ * guest byte order.
+ */
+ u16 avail_idx_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_split *desc_state;
+ struct vring_desc_extra *desc_extra;
+
+ /* DMA address and size information */
+ dma_addr_t queue_dma_addr;
+ size_t queue_size_in_bytes;
+
+ /*
+ * The parameters for creating vrings are reserved for creating new
+ * vring.
+ */
+ u32 vring_align;
+ bool may_reduce_num;
+};
+
+struct vring_virtqueue_packed {
+ /* Actual memory layout for this queue. */
+ struct {
+ unsigned int num;
+ struct vring_packed_desc *desc;
+ struct vring_packed_desc_event *driver;
+ struct vring_packed_desc_event *device;
+ } vring;
+
+ /* Driver ring wrap counter. */
+ bool avail_wrap_counter;
+
+ /* Avail used flags. */
+ u16 avail_used_flags;
+
+ /* Index of the next avail descriptor. */
+ u16 next_avail_idx;
+
+ /*
+ * Last written value to driver->flags in
+ * guest byte order.
+ */
+ u16 event_flags_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_packed *desc_state;
+ struct vring_desc_extra *desc_extra;
+
+ /* DMA address and size information */
+ dma_addr_t ring_dma_addr;
+ dma_addr_t driver_event_dma_addr;
+ dma_addr_t device_event_dma_addr;
+ size_t ring_size_in_bytes;
+ size_t event_size_in_bytes;
+};
+
struct vring_virtqueue {
struct virtqueue vq;
@@ -124,64 +189,10 @@ struct vring_virtqueue {
union {
/* Available for split ring */
- struct {
- /* Actual memory layout for this queue. */
- struct vring vring;
-
- /* Last written value to avail->flags */
- u16 avail_flags_shadow;
-
- /*
- * Last written value to avail->idx in
- * guest byte order.
- */
- u16 avail_idx_shadow;
-
- /* Per-descriptor state. */
- struct vring_desc_state_split *desc_state;
- struct vring_desc_extra *desc_extra;
-
- /* DMA address and size information */
- dma_addr_t queue_dma_addr;
- size_t queue_size_in_bytes;
- } split;
+ struct vring_virtqueue_split split;
/* Available for packed ring */
- struct {
- /* Actual memory layout for this queue. */
- struct {
- unsigned int num;
- struct vring_packed_desc *desc;
- struct vring_packed_desc_event *driver;
- struct vring_packed_desc_event *device;
- } vring;
-
- /* Driver ring wrap counter. */
- bool avail_wrap_counter;
-
- /* Avail used flags. */
- u16 avail_used_flags;
-
- /* Index of the next avail descriptor. */
- u16 next_avail_idx;
-
- /*
- * Last written value to driver->flags in
- * guest byte order.
- */
- u16 event_flags_shadow;
-
- /* Per-descriptor state. */
- struct vring_desc_state_packed *desc_state;
- struct vring_desc_extra *desc_extra;
-
- /* DMA address and size information */
- dma_addr_t ring_dma_addr;
- dma_addr_t driver_event_dma_addr;
- dma_addr_t device_event_dma_addr;
- size_t ring_size_in_bytes;
- size_t event_size_in_bytes;
- } packed;
+ struct vring_virtqueue_packed packed;
};
/* How to notify other side. FIXME: commonalize hcalls! */
@@ -200,6 +211,16 @@ struct vring_virtqueue {
#endif
};
+static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name);
+static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
+static void vring_free(struct virtqueue *_vq);
/*
* Helpers.
@@ -364,6 +385,24 @@ static int vring_mapping_error(const struct vring_virtqueue *vq,
return dma_mapping_error(vring_dma_dev(vq), addr);
}
+static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
+{
+ vq->vq.num_free = num;
+
+ if (vq->packed_ring)
+ vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
+ else
+ vq->last_used_idx = 0;
+
+ vq->event_triggered = false;
+ vq->num_added = 0;
+
+#ifdef DEBUG
+ vq->in_use = false;
+ vq->last_add_time_valid = false;
+#endif
+}
+
/*
* Split ring specific functions - *_split().
@@ -907,28 +946,107 @@ static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
return NULL;
}
-static struct virtqueue *vring_create_virtqueue_split(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split,
+ struct vring_virtqueue *vq)
+{
+ struct virtio_device *vdev;
+
+ vdev = vq->vq.vdev;
+
+ vring_split->avail_flags_shadow = 0;
+ vring_split->avail_idx_shadow = 0;
+
+ /* No callback? Tell other side not to bother us. */
+ if (!vq->vq.callback) {
+ vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+ if (!vq->event)
+ vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
+ vring_split->avail_flags_shadow);
+ }
+}
+
+static void virtqueue_reinit_split(struct vring_virtqueue *vq)
+{
+ int num;
+
+ num = vq->split.vring.num;
+
+ vq->split.vring.avail->flags = 0;
+ vq->split.vring.avail->idx = 0;
+
+ /* reset avail event */
+ vq->split.vring.avail->ring[num] = 0;
+
+ vq->split.vring.used->flags = 0;
+ vq->split.vring.used->idx = 0;
+
+ /* reset used event */
+ *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0;
+
+ virtqueue_init(vq, num);
+
+ virtqueue_vring_init_split(&vq->split, vq);
+}
+
+static void virtqueue_vring_attach_split(struct vring_virtqueue *vq,
+ struct vring_virtqueue_split *vring_split)
+{
+ vq->split = *vring_split;
+
+ /* Put everything in free lists. */
+ vq->free_head = 0;
+}
+
+static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split)
+{
+ struct vring_desc_state_split *state;
+ struct vring_desc_extra *extra;
+ u32 num = vring_split->vring.num;
+
+ state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL);
+ if (!state)
+ goto err_state;
+
+ extra = vring_alloc_desc_extra(num);
+ if (!extra)
+ goto err_extra;
+
+ memset(state, 0, num * sizeof(struct vring_desc_state_split));
+
+ vring_split->desc_state = state;
+ vring_split->desc_extra = extra;
+ return 0;
+
+err_extra:
+ kfree(state);
+err_state:
+ return -ENOMEM;
+}
+
+static void vring_free_split(struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev)
+{
+ vring_free_queue(vdev, vring_split->queue_size_in_bytes,
+ vring_split->vring.desc,
+ vring_split->queue_dma_addr);
+
+ kfree(vring_split->desc_state);
+ kfree(vring_split->desc_extra);
+}
+
+static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ u32 num,
+ unsigned int vring_align,
+ bool may_reduce_num)
{
- struct virtqueue *vq;
void *queue = NULL;
dma_addr_t dma_addr;
- size_t queue_size_in_bytes;
- struct vring vring;
/* We assume num is a power of 2. */
if (num & (num - 1)) {
dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
- return NULL;
+ return -EINVAL;
}
/* TODO: allocate each queue chunk individually */
@@ -939,11 +1057,11 @@ static struct virtqueue *vring_create_virtqueue_split(
if (queue)
break;
if (!may_reduce_num)
- return NULL;
+ return -ENOMEM;
}
if (!num)
- return NULL;
+ return -ENOMEM;
if (!queue) {
/* Try to get a single page. You are my only hope! */
@@ -951,26 +1069,85 @@ static struct virtqueue *vring_create_virtqueue_split(
&dma_addr, GFP_KERNEL|__GFP_ZERO);
}
if (!queue)
- return NULL;
+ return -ENOMEM;
+
+ vring_init(&vring_split->vring, num, queue, vring_align);
+
+ vring_split->queue_dma_addr = dma_addr;
+ vring_split->queue_size_in_bytes = vring_size(num, vring_align);
+
+ vring_split->vring_align = vring_align;
+ vring_split->may_reduce_num = may_reduce_num;
+
+ return 0;
+}
+
+static struct virtqueue *vring_create_virtqueue_split(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct vring_virtqueue_split vring_split = {};
+ struct virtqueue *vq;
+ int err;
- queue_size_in_bytes = vring_size(num, vring_align);
- vring_init(&vring, num, queue, vring_align);
+ err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
+ may_reduce_num);
+ if (err)
+ return NULL;
- vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
+ vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name);
if (!vq) {
- vring_free_queue(vdev, queue_size_in_bytes, queue,
- dma_addr);
+ vring_free_split(&vring_split, vdev);
return NULL;
}
- to_vvq(vq)->split.queue_dma_addr = dma_addr;
- to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
to_vvq(vq)->we_own_ring = true;
return vq;
}
+static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
+{
+ struct vring_virtqueue_split vring_split = {};
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+ int err;
+
+ err = vring_alloc_queue_split(&vring_split, vdev, num,
+ vq->split.vring_align,
+ vq->split.may_reduce_num);
+ if (err)
+ goto err;
+
+ err = vring_alloc_state_extra_split(&vring_split);
+ if (err)
+ goto err_state_extra;
+
+ vring_free(&vq->vq);
+
+ virtqueue_vring_init_split(&vring_split, vq);
+
+ virtqueue_init(vq, vring_split.vring.num);
+ virtqueue_vring_attach_split(vq, &vring_split);
+
+ return 0;
+
+err_state_extra:
+ vring_free_split(&vring_split, vdev);
+err:
+ virtqueue_reinit_split(vq);
+ return -ENOMEM;
+}
+
/*
* Packed ring specific functions - *_packed().
@@ -1637,8 +1814,7 @@ static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
return NULL;
}
-static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
- unsigned int num)
+static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
{
struct vring_desc_extra *desc_extra;
unsigned int i;
@@ -1656,19 +1832,32 @@ static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *v
return desc_extra;
}
-static struct virtqueue *vring_create_virtqueue_packed(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
+ struct virtio_device *vdev)
+{
+ if (vring_packed->vring.desc)
+ vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
+ vring_packed->vring.desc,
+ vring_packed->ring_dma_addr);
+
+ if (vring_packed->vring.driver)
+ vring_free_queue(vdev, vring_packed->event_size_in_bytes,
+ vring_packed->vring.driver,
+ vring_packed->driver_event_dma_addr);
+
+ if (vring_packed->vring.device)
+ vring_free_queue(vdev, vring_packed->event_size_in_bytes,
+ vring_packed->vring.device,
+ vring_packed->device_event_dma_addr);
+
+ kfree(vring_packed->desc_state);
+ kfree(vring_packed->desc_extra);
+}
+
+static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
+ struct virtio_device *vdev,
+ u32 num)
{
- struct vring_virtqueue *vq;
struct vring_packed_desc *ring;
struct vring_packed_desc_event *driver, *device;
dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
@@ -1680,7 +1869,11 @@ static struct virtqueue *vring_create_virtqueue_packed(
&ring_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!ring)
- goto err_ring;
+ goto err;
+
+ vring_packed->vring.desc = ring;
+ vring_packed->ring_dma_addr = ring_dma_addr;
+ vring_packed->ring_size_in_bytes = ring_size_in_bytes;
event_size_in_bytes = sizeof(struct vring_packed_desc_event);
@@ -1688,13 +1881,112 @@ static struct virtqueue *vring_create_virtqueue_packed(
&driver_event_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!driver)
- goto err_driver;
+ goto err;
+
+ vring_packed->vring.driver = driver;
+ vring_packed->event_size_in_bytes = event_size_in_bytes;
+ vring_packed->driver_event_dma_addr = driver_event_dma_addr;
device = vring_alloc_queue(vdev, event_size_in_bytes,
&device_event_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!device)
- goto err_device;
+ goto err;
+
+ vring_packed->vring.device = device;
+ vring_packed->device_event_dma_addr = device_event_dma_addr;
+
+ vring_packed->vring.num = num;
+
+ return 0;
+
+err:
+ vring_free_packed(vring_packed, vdev);
+ return -ENOMEM;
+}
+
+static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed)
+{
+ struct vring_desc_state_packed *state;
+ struct vring_desc_extra *extra;
+ u32 num = vring_packed->vring.num;
+
+ state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL);
+ if (!state)
+ goto err_desc_state;
+
+ memset(state, 0, num * sizeof(struct vring_desc_state_packed));
+
+ extra = vring_alloc_desc_extra(num);
+ if (!extra)
+ goto err_desc_extra;
+
+ vring_packed->desc_state = state;
+ vring_packed->desc_extra = extra;
+
+ return 0;
+
+err_desc_extra:
+ kfree(state);
+err_desc_state:
+ return -ENOMEM;
+}
+
+static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed,
+ bool callback)
+{
+ vring_packed->next_avail_idx = 0;
+ vring_packed->avail_wrap_counter = 1;
+ vring_packed->event_flags_shadow = 0;
+ vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
+
+ /* No callback? Tell other side not to bother us. */
+ if (!callback) {
+ vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
+ vring_packed->vring.driver->flags =
+ cpu_to_le16(vring_packed->event_flags_shadow);
+ }
+}
+
+static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq,
+ struct vring_virtqueue_packed *vring_packed)
+{
+ vq->packed = *vring_packed;
+
+ /* Put everything in free lists. */
+ vq->free_head = 0;
+}
+
+static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
+{
+ memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes);
+ memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes);
+
+ /* we need to reset the desc.flags. For more, see is_used_desc_packed() */
+ memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes);
+
+ virtqueue_init(vq, vq->packed.vring.num);
+ virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
+}
+
+static struct virtqueue *vring_create_virtqueue_packed(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct vring_virtqueue_packed vring_packed = {};
+ struct vring_virtqueue *vq;
+ int err;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num))
+ goto err_ring;
vq = kmalloc(sizeof(*vq), GFP_KERNEL);
if (!vq)
@@ -1703,8 +1995,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
- vq->vq.num_free = num;
vq->vq.index = index;
+ vq->vq.reset = false;
vq->we_own_ring = true;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
@@ -1713,15 +2005,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
#else
vq->broken = false;
#endif
- vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
- vq->event_triggered = false;
- vq->num_added = 0;
vq->packed_ring = true;
vq->use_dma_api = vring_use_dma_api(vdev);
-#ifdef DEBUG
- vq->in_use = false;
- vq->last_add_time_valid = false;
-#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -1730,65 +2015,58 @@ static struct virtqueue *vring_create_virtqueue_packed(
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
- vq->packed.ring_dma_addr = ring_dma_addr;
- vq->packed.driver_event_dma_addr = driver_event_dma_addr;
- vq->packed.device_event_dma_addr = device_event_dma_addr;
+ err = vring_alloc_state_extra_packed(&vring_packed);
+ if (err)
+ goto err_state_extra;
- vq->packed.ring_size_in_bytes = ring_size_in_bytes;
- vq->packed.event_size_in_bytes = event_size_in_bytes;
+ virtqueue_vring_init_packed(&vring_packed, !!callback);
- vq->packed.vring.num = num;
- vq->packed.vring.desc = ring;
- vq->packed.vring.driver = driver;
- vq->packed.vring.device = device;
-
- vq->packed.next_avail_idx = 0;
- vq->packed.avail_wrap_counter = 1;
- vq->packed.event_flags_shadow = 0;
- vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
-
- vq->packed.desc_state = kmalloc_array(num,
- sizeof(struct vring_desc_state_packed),
- GFP_KERNEL);
- if (!vq->packed.desc_state)
- goto err_desc_state;
-
- memset(vq->packed.desc_state, 0,
- num * sizeof(struct vring_desc_state_packed));
-
- /* Put everything in free lists. */
- vq->free_head = 0;
-
- vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
- if (!vq->packed.desc_extra)
- goto err_desc_extra;
-
- /* No callback? Tell other side not to bother us. */
- if (!callback) {
- vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
- vq->packed.vring.driver->flags =
- cpu_to_le16(vq->packed.event_flags_shadow);
- }
+ virtqueue_init(vq, num);
+ virtqueue_vring_attach_packed(vq, &vring_packed);
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
-err_desc_extra:
- kfree(vq->packed.desc_state);
-err_desc_state:
+err_state_extra:
kfree(vq);
err_vq:
- vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
-err_device:
- vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
-err_driver:
- vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
+ vring_free_packed(&vring_packed, vdev);
err_ring:
return NULL;
}
+static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
+{
+ struct vring_virtqueue_packed vring_packed = {};
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+ int err;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num))
+ goto err_ring;
+
+ err = vring_alloc_state_extra_packed(&vring_packed);
+ if (err)
+ goto err_state_extra;
+
+ vring_free(&vq->vq);
+
+ virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback);
+
+ virtqueue_init(vq, vring_packed.vring.num);
+ virtqueue_vring_attach_packed(vq, &vring_packed);
+
+ return 0;
+
+err_state_extra:
+ vring_free_packed(&vring_packed, vdev);
+err_ring:
+ virtqueue_reinit_packed(vq);
+ return -ENOMEM;
+}
+
/*
* Generic functions and exported symbols.
@@ -2131,8 +2409,8 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
* @_vq: the struct virtqueue we're talking about.
*
* Returns NULL or the "data" token handed to virtqueue_add_*().
- * This is not valid on an active queue; it is useful only for device
- * shutdown.
+ * This is not valid on an active queue; it is useful for device
+ * shutdown or the reset queue.
*/
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
@@ -2148,6 +2426,14 @@ static inline bool more_used(const struct vring_virtqueue *vq)
return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
}
+/**
+ * vring_interrupt - notify a virtqueue on an interrupt
+ * @irq: the IRQ number (ignored)
+ * @_vq: the struct virtqueue to notify
+ *
+ * Calls the callback function of @_vq to process the virtqueue
+ * notification.
+ */
irqreturn_t vring_interrupt(int irq, void *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -2180,16 +2466,17 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
EXPORT_SYMBOL_GPL(vring_interrupt);
/* Only available for split ring */
-struct virtqueue *__vring_new_virtqueue(unsigned int index,
- struct vring vring,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
{
struct vring_virtqueue *vq;
+ int err;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
@@ -2202,8 +2489,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
- vq->vq.num_free = vring.num;
vq->vq.index = index;
+ vq->vq.reset = false;
vq->we_own_ring = false;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
@@ -2212,14 +2499,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
#else
vq->broken = false;
#endif
- vq->last_used_idx = 0;
- vq->event_triggered = false;
- vq->num_added = 0;
vq->use_dma_api = vring_use_dma_api(vdev);
-#ifdef DEBUG
- vq->in_use = false;
- vq->last_add_time_valid = false;
-#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2228,47 +2508,22 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
- vq->split.queue_dma_addr = 0;
- vq->split.queue_size_in_bytes = 0;
-
- vq->split.vring = vring;
- vq->split.avail_flags_shadow = 0;
- vq->split.avail_idx_shadow = 0;
-
- /* No callback? Tell other side not to bother us. */
- if (!callback) {
- vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- if (!vq->event)
- vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
- vq->split.avail_flags_shadow);
+ err = vring_alloc_state_extra_split(vring_split);
+ if (err) {
+ kfree(vq);
+ return NULL;
}
- vq->split.desc_state = kmalloc_array(vring.num,
- sizeof(struct vring_desc_state_split), GFP_KERNEL);
- if (!vq->split.desc_state)
- goto err_state;
-
- vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
- if (!vq->split.desc_extra)
- goto err_extra;
+ virtqueue_vring_init_split(vring_split, vq);
- /* Put everything in free lists. */
- vq->free_head = 0;
- memset(vq->split.desc_state, 0, vring.num *
- sizeof(struct vring_desc_state_split));
+ virtqueue_init(vq, vring_split->vring.num);
+ virtqueue_vring_attach_split(vq, vring_split);
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
-
-err_extra:
- kfree(vq->split.desc_state);
-err_state:
- kfree(vq);
- return NULL;
}
-EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
struct virtqueue *vring_create_virtqueue(
unsigned int index,
@@ -2294,6 +2549,75 @@ struct virtqueue *vring_create_virtqueue(
}
EXPORT_SYMBOL_GPL(vring_create_virtqueue);
+/**
+ * virtqueue_resize - resize the vring of vq
+ * @_vq: the struct virtqueue we're talking about.
+ * @num: new ring num
+ * @recycle: callback for recycle the useless buffer
+ *
+ * When it is really necessary to create a new vring, it will set the current vq
+ * into the reset state. Then call the passed callback to recycle the buffer
+ * that is no longer used. Only after the new vring is successfully created, the
+ * old vring will be released.
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error.
+ * 0: success.
+ * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
+ * vq can still work normally
+ * -EBUSY: Failed to sync with device, vq may not work properly
+ * -ENOENT: Transport or device not supported
+ * -E2BIG/-EINVAL: num error
+ * -EPERM: Operation not permitted
+ *
+ */
+int virtqueue_resize(struct virtqueue *_vq, u32 num,
+ void (*recycle)(struct virtqueue *vq, void *buf))
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = vq->vq.vdev;
+ void *buf;
+ int err;
+
+ if (!vq->we_own_ring)
+ return -EPERM;
+
+ if (num > vq->vq.num_max)
+ return -E2BIG;
+
+ if (!num)
+ return -EINVAL;
+
+ if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
+ return 0;
+
+ if (!vdev->config->disable_vq_and_reset)
+ return -ENOENT;
+
+ if (!vdev->config->enable_vq_after_reset)
+ return -ENOENT;
+
+ err = vdev->config->disable_vq_and_reset(_vq);
+ if (err)
+ return err;
+
+ while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
+ recycle(_vq, buf);
+
+ if (vq->packed_ring)
+ err = virtqueue_resize_packed(_vq, num);
+ else
+ err = virtqueue_resize_split(_vq, num);
+
+ if (vdev->config->enable_vq_after_reset(_vq))
+ return -EBUSY;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(virtqueue_resize);
+
/* Only available for split ring */
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
@@ -2306,25 +2630,21 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name)
{
- struct vring vring;
+ struct vring_virtqueue_split vring_split = {};
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
- vring_init(&vring, num, pages, vring_align);
- return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
+ vring_init(&vring_split.vring, num, pages, vring_align);
+ return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name);
}
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
-void vring_del_virtqueue(struct virtqueue *_vq)
+static void vring_free(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- spin_lock(&vq->vq.vdev->vqs_list_lock);
- list_del(&_vq->list);
- spin_unlock(&vq->vq.vdev->vqs_list_lock);
-
if (vq->we_own_ring) {
if (vq->packed_ring) {
vring_free_queue(vq->vq.vdev,
@@ -2355,6 +2675,18 @@ void vring_del_virtqueue(struct virtqueue *_vq)
kfree(vq->split.desc_state);
kfree(vq->split.desc_extra);
}
+}
+
+void vring_del_virtqueue(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ spin_lock(&vq->vq.vdev->vqs_list_lock);
+ list_del(&_vq->list);
+ spin_unlock(&vq->vq.vdev->vqs_list_lock);
+
+ vring_free(_vq);
+
kfree(vq);
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -2402,6 +2734,30 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
+/*
+ * This function should only be called by the core, not directly by the driver.
+ */
+void __virtqueue_break(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, true);
+}
+EXPORT_SYMBOL_GPL(__virtqueue_break);
+
+/*
+ * This function should only be called by the core, not directly by the driver.
+ */
+void __virtqueue_unbreak(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, false);
+}
+EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
+
bool virtqueue_is_broken(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index c40f7deb6b5a..9670cc79371d 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -183,6 +183,8 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
goto error_new_virtqueue;
}
+ vq->num_max = max_num;
+
/* Setup virtqueue callback */
cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
cb.private = info;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0796f6a9e8ff..9295492d24f7 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1963,6 +1963,14 @@ config MEN_A21_WDT
# PPC64 Architecture
+config PSERIES_WDT
+ tristate "POWER Architecture Platform Watchdog Timer"
+ depends on PPC_PSERIES
+ select WATCHDOG_CORE
+ help
+ Driver for virtual watchdog timers provided by PAPR
+ hypervisors (e.g. PowerVM, KVM).
+
config WATCHDOG_RTAS
tristate "RTAS watchdog"
depends on PPC_RTAS
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index c324e9d820e9..cdeb119e6e61 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -187,6 +187,7 @@ obj-$(CONFIG_BOOKE_WDT) += booke_wdt.o
obj-$(CONFIG_MEN_A21_WDT) += mena21_wdt.o
# PPC64 Architecture
+obj-$(CONFIG_PSERIES_WDT) += pseries-wdt.o
obj-$(CONFIG_WATCHDOG_RTAS) += wdrtas.o
# S390 Architecture
diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c
index 1635f421ef2c..854b1cc723cb 100644
--- a/drivers/watchdog/armada_37xx_wdt.c
+++ b/drivers/watchdog/armada_37xx_wdt.c
@@ -274,6 +274,8 @@ static int armada_37xx_wdt_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
dev->reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!dev->reg)
+ return -ENOMEM;
/* init clock */
dev->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index 1ffcf6aca6ae..9388838899ac 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -192,7 +192,6 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int bcm7038_wdt_suspend(struct device *dev)
{
struct bcm7038_watchdog *wdt = dev_get_drvdata(dev);
@@ -212,10 +211,9 @@ static int bcm7038_wdt_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(bcm7038_wdt_pm_ops, bcm7038_wdt_suspend,
- bcm7038_wdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(bcm7038_wdt_pm_ops,
+ bcm7038_wdt_suspend, bcm7038_wdt_resume);
static const struct of_device_id bcm7038_wdt_match[] = {
{ .compatible = "brcm,bcm6345-wdt" },
@@ -236,7 +234,7 @@ static struct platform_driver bcm7038_wdt_driver = {
.driver = {
.name = "bcm7038-wdt",
.of_match_table = bcm7038_wdt_match,
- .pm = &bcm7038_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&bcm7038_wdt_pm_ops),
}
};
module_platform_driver(bcm7038_wdt_driver);
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 5e4dc1a0f2c6..75da5cd02615 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -74,7 +74,7 @@ static unsigned long long period_to_sec(unsigned int period)
/*
* This procedure will find the highest period which will give a timeout
* greater than the one required. e.g. for a bus speed of 66666666 and
- * and a parameter of 2 secs, then this procedure will return a value of 38.
+ * a parameter of 2 secs, then this procedure will return a value of 38.
*/
static unsigned int sec_to_period(unsigned int secs)
{
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index cd578843277e..52962e8d11a6 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -218,7 +218,7 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
/*
* Set the new value in the watchdog. Some versions of dw_wdt
- * have have TOPINIT in the TIMEOUT_RANGE register (as per
+ * have TOPINIT in the TIMEOUT_RANGE register (as per
* CP_WDT_DUAL_TOP in WDT_COMP_PARAMS_1). On those we
* effectively get a pat of the watchdog right here.
*/
@@ -375,7 +375,6 @@ static irqreturn_t dw_wdt_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-#ifdef CONFIG_PM_SLEEP
static int dw_wdt_suspend(struct device *dev)
{
struct dw_wdt *dw_wdt = dev_get_drvdata(dev);
@@ -410,9 +409,8 @@ static int dw_wdt_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
/*
* In case if DW WDT IP core is synthesized with fixed TOP feature disabled the
@@ -710,7 +708,7 @@ static struct platform_driver dw_wdt_driver = {
.driver = {
.name = "dw_wdt",
.of_match_table = of_match_ptr(dw_wdt_of_match),
- .pm = &dw_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&dw_wdt_pm_ops),
},
};
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 7f59c680de25..6a16d3d0bb1e 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -634,7 +634,9 @@ static int __init fintek_wdt_init(void)
pdata.type = ret;
- platform_driver_register(&fintek_wdt_driver);
+ ret = platform_driver_register(&fintek_wdt_driver);
+ if (ret)
+ return ret;
wdt_res.name = "superio port";
wdt_res.flags = IORESOURCE_IO;
diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c
index b76ad6ba0915..33835c0b06de 100644
--- a/drivers/watchdog/max77620_wdt.c
+++ b/drivers/watchdog/max77620_wdt.c
@@ -6,7 +6,7 @@
* Copyright (C) 2022 Luca Ceresoli
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- * Author: Luca Ceresoli <luca@lucaceresoli.net>
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
*/
#include <linux/err.h>
@@ -260,5 +260,5 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index f0d4e3cc7459..e97787536792 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -401,7 +401,6 @@ static int mtk_wdt_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int mtk_wdt_suspend(struct device *dev)
{
struct mtk_wdt_dev *mtk_wdt = dev_get_drvdata(dev);
@@ -423,7 +422,6 @@ static int mtk_wdt_resume(struct device *dev)
return 0;
}
-#endif
static const struct of_device_id mtk_wdt_dt_ids[] = {
{ .compatible = "mediatek,mt2712-wdt", .data = &mt2712_data },
@@ -437,16 +435,14 @@ static const struct of_device_id mtk_wdt_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
-static const struct dev_pm_ops mtk_wdt_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(mtk_wdt_suspend,
- mtk_wdt_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(mtk_wdt_pm_ops,
+ mtk_wdt_suspend, mtk_wdt_resume);
static struct platform_driver mtk_wdt_driver = {
.probe = mtk_wdt_probe,
.driver = {
.name = DRV_NAME,
- .pm = &mtk_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_wdt_pm_ops),
.of_match_table = mtk_wdt_dt_ids,
},
};
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 9f9a340427fc..c7f745caf203 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -442,7 +442,7 @@ static long pc87413_ioctl(struct file *file, unsigned int cmd,
}
}
-/* -- Notifier funtions -----------------------------------------*/
+/* -- Notifier functions -----------------------------------------*/
/**
* pc87413_notify_sys:
diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c
index 0937b8d33104..f4bfbffaf49c 100644
--- a/drivers/watchdog/pm8916_wdt.c
+++ b/drivers/watchdog/pm8916_wdt.c
@@ -9,6 +9,12 @@
#include <linux/regmap.h>
#include <linux/watchdog.h>
+#define PON_POFF_REASON1 0x0c
+#define PON_POFF_REASON1_PMIC_WD BIT(2)
+#define PON_POFF_REASON2 0x0d
+#define PON_POFF_REASON2_UVLO BIT(5)
+#define PON_POFF_REASON2_OTST3 BIT(6)
+
#define PON_INT_RT_STS 0x10
#define PMIC_WD_BARK_STS_BIT BIT(6)
@@ -58,9 +64,8 @@ static int pm8916_wdt_ping(struct watchdog_device *wdev)
{
struct pm8916_wdt *wdt = watchdog_get_drvdata(wdev);
- return regmap_update_bits(wdt->regmap,
- wdt->baseaddr + PON_PMIC_WD_RESET_PET,
- WATCHDOG_PET_BIT, WATCHDOG_PET_BIT);
+ return regmap_write(wdt->regmap, wdt->baseaddr + PON_PMIC_WD_RESET_PET,
+ WATCHDOG_PET_BIT);
}
static int pm8916_wdt_configure_timers(struct watchdog_device *wdev)
@@ -111,12 +116,14 @@ static irqreturn_t pm8916_wdt_isr(int irq, void *arg)
}
static const struct watchdog_info pm8916_wdt_ident = {
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE |
+ WDIOF_OVERHEAT | WDIOF_CARDRESET | WDIOF_POWERUNDER,
.identity = "QCOM PM8916 PON WDT",
};
static const struct watchdog_info pm8916_wdt_pt_ident = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE |
+ WDIOF_OVERHEAT | WDIOF_CARDRESET | WDIOF_POWERUNDER |
WDIOF_PRETIMEOUT,
.identity = "QCOM PM8916 PON WDT",
};
@@ -135,7 +142,9 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct pm8916_wdt *wdt;
struct device *parent;
+ unsigned int val;
int err, irq;
+ u8 poff[2];
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
@@ -176,6 +185,30 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
wdt->wdev.info = &pm8916_wdt_ident;
}
+ err = regmap_bulk_read(wdt->regmap, wdt->baseaddr + PON_POFF_REASON1,
+ &poff, ARRAY_SIZE(poff));
+ if (err) {
+ dev_err(dev, "failed to read POFF reason: %d\n", err);
+ return err;
+ }
+
+ dev_dbg(dev, "POFF reason: %#x %#x\n", poff[0], poff[1]);
+ if (poff[0] & PON_POFF_REASON1_PMIC_WD)
+ wdt->wdev.bootstatus |= WDIOF_CARDRESET;
+ if (poff[1] & PON_POFF_REASON2_UVLO)
+ wdt->wdev.bootstatus |= WDIOF_POWERUNDER;
+ if (poff[1] & PON_POFF_REASON2_OTST3)
+ wdt->wdev.bootstatus |= WDIOF_OVERHEAT;
+
+ err = regmap_read(wdt->regmap, wdt->baseaddr + PON_PMIC_WD_RESET_S2_CTL2,
+ &val);
+ if (err) {
+ dev_err(dev, "failed to check if watchdog is active: %d\n", err);
+ return err;
+ }
+ if (val & S2_RESET_EN_BIT)
+ set_bit(WDOG_HW_RUNNING, &wdt->wdev.status);
+
/* Configure watchdog to hard-reset mode */
err = regmap_write(wdt->regmap,
wdt->baseaddr + PON_PMIC_WD_RESET_S2_CTL,
diff --git a/drivers/watchdog/pseries-wdt.c b/drivers/watchdog/pseries-wdt.c
new file mode 100644
index 000000000000..7f53b5293409
--- /dev/null
+++ b/drivers/watchdog/pseries-wdt.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2022 International Business Machines, Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/time64.h>
+#include <linux/watchdog.h>
+
+#define DRV_NAME "pseries-wdt"
+
+/*
+ * H_WATCHDOG Input
+ *
+ * R4: "flags":
+ *
+ * Bits 48-55: "operation"
+ */
+#define PSERIES_WDTF_OP_START 0x100UL /* start timer */
+#define PSERIES_WDTF_OP_STOP 0x200UL /* stop timer */
+#define PSERIES_WDTF_OP_QUERY 0x300UL /* query timer capabilities */
+
+/*
+ * Bits 56-63: "timeoutAction" (for "Start Watchdog" only)
+ */
+#define PSERIES_WDTF_ACTION_HARD_POWEROFF 0x1UL /* poweroff */
+#define PSERIES_WDTF_ACTION_HARD_RESTART 0x2UL /* restart */
+#define PSERIES_WDTF_ACTION_DUMP_RESTART 0x3UL /* dump + restart */
+
+/*
+ * H_WATCHDOG Output
+ *
+ * R3: Return code
+ *
+ * H_SUCCESS The operation completed.
+ *
+ * H_BUSY The hypervisor is too busy; retry the operation.
+ *
+ * H_PARAMETER The given "flags" are somehow invalid. Either the
+ * "operation" or "timeoutAction" is invalid, or a
+ * reserved bit is set.
+ *
+ * H_P2 The given "watchdogNumber" is zero or exceeds the
+ * supported maximum value.
+ *
+ * H_P3 The given "timeoutInMs" is below the supported
+ * minimum value.
+ *
+ * H_NOOP The given "watchdogNumber" is already stopped.
+ *
+ * H_HARDWARE The operation failed for ineffable reasons.
+ *
+ * H_FUNCTION The H_WATCHDOG hypercall is not supported by this
+ * hypervisor.
+ *
+ * R4:
+ *
+ * - For the "Query Watchdog Capabilities" operation, a 64-bit
+ * structure:
+ */
+#define PSERIES_WDTQ_MIN_TIMEOUT(cap) (((cap) >> 48) & 0xffff)
+#define PSERIES_WDTQ_MAX_NUMBER(cap) (((cap) >> 32) & 0xffff)
+
+static const unsigned long pseries_wdt_action[] = {
+ [0] = PSERIES_WDTF_ACTION_HARD_POWEROFF,
+ [1] = PSERIES_WDTF_ACTION_HARD_RESTART,
+ [2] = PSERIES_WDTF_ACTION_DUMP_RESTART,
+};
+
+#define WATCHDOG_ACTION 1
+static unsigned int action = WATCHDOG_ACTION;
+module_param(action, uint, 0444);
+MODULE_PARM_DESC(action, "Action taken when watchdog expires (default="
+ __MODULE_STRING(WATCHDOG_ACTION) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0444);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+#define WATCHDOG_TIMEOUT 60
+static unsigned int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, uint, 0444);
+MODULE_PARM_DESC(timeout, "Initial watchdog timeout in seconds (default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
+
+struct pseries_wdt {
+ struct watchdog_device wd;
+ unsigned long action;
+ unsigned long num; /* Watchdog numbers are 1-based */
+};
+
+static int pseries_wdt_start(struct watchdog_device *wdd)
+{
+ struct pseries_wdt *pw = watchdog_get_drvdata(wdd);
+ struct device *dev = wdd->parent;
+ unsigned long flags, msecs;
+ long rc;
+
+ flags = pw->action | PSERIES_WDTF_OP_START;
+ msecs = wdd->timeout * MSEC_PER_SEC;
+ rc = plpar_hcall_norets(H_WATCHDOG, flags, pw->num, msecs);
+ if (rc != H_SUCCESS) {
+ dev_crit(dev, "H_WATCHDOG: %ld: failed to start timer %lu",
+ rc, pw->num);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int pseries_wdt_stop(struct watchdog_device *wdd)
+{
+ struct pseries_wdt *pw = watchdog_get_drvdata(wdd);
+ struct device *dev = wdd->parent;
+ long rc;
+
+ rc = plpar_hcall_norets(H_WATCHDOG, PSERIES_WDTF_OP_STOP, pw->num);
+ if (rc != H_SUCCESS && rc != H_NOOP) {
+ dev_crit(dev, "H_WATCHDOG: %ld: failed to stop timer %lu",
+ rc, pw->num);
+ return -EIO;
+ }
+ return 0;
+}
+
+static struct watchdog_info pseries_wdt_info = {
+ .identity = DRV_NAME,
+ .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT
+ | WDIOF_PRETIMEOUT,
+};
+
+static const struct watchdog_ops pseries_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = pseries_wdt_start,
+ .stop = pseries_wdt_stop,
+};
+
+static int pseries_wdt_probe(struct platform_device *pdev)
+{
+ unsigned long ret[PLPAR_HCALL_BUFSIZE] = { 0 };
+ struct pseries_wdt *pw;
+ unsigned long cap;
+ long msecs, rc;
+ int err;
+
+ rc = plpar_hcall(H_WATCHDOG, ret, PSERIES_WDTF_OP_QUERY);
+ if (rc == H_FUNCTION)
+ return -ENODEV;
+ if (rc != H_SUCCESS)
+ return -EIO;
+ cap = ret[0];
+
+ pw = devm_kzalloc(&pdev->dev, sizeof(*pw), GFP_KERNEL);
+ if (!pw)
+ return -ENOMEM;
+
+ /*
+ * Assume watchdogNumber 1 for now. If we ever support
+ * multiple timers we will need to devise a way to choose a
+ * distinct watchdogNumber for each platform device at device
+ * registration time.
+ */
+ pw->num = 1;
+ if (PSERIES_WDTQ_MAX_NUMBER(cap) < pw->num)
+ return -ENODEV;
+
+ if (action >= ARRAY_SIZE(pseries_wdt_action))
+ return -EINVAL;
+ pw->action = pseries_wdt_action[action];
+
+ pw->wd.parent = &pdev->dev;
+ pw->wd.info = &pseries_wdt_info;
+ pw->wd.ops = &pseries_wdt_ops;
+ msecs = PSERIES_WDTQ_MIN_TIMEOUT(cap);
+ pw->wd.min_timeout = DIV_ROUND_UP(msecs, MSEC_PER_SEC);
+ pw->wd.max_timeout = UINT_MAX / 1000; /* from linux/watchdog.h */
+ pw->wd.timeout = timeout;
+ if (watchdog_init_timeout(&pw->wd, 0, NULL))
+ return -EINVAL;
+ watchdog_set_nowayout(&pw->wd, nowayout);
+ watchdog_stop_on_reboot(&pw->wd);
+ watchdog_stop_on_unregister(&pw->wd);
+ watchdog_set_drvdata(&pw->wd, pw);
+
+ err = devm_watchdog_register_device(&pdev->dev, &pw->wd);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, &pw->wd);
+
+ return 0;
+}
+
+static int pseries_wdt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct watchdog_device *wd = platform_get_drvdata(pdev);
+
+ if (watchdog_active(wd))
+ return pseries_wdt_stop(wd);
+ return 0;
+}
+
+static int pseries_wdt_resume(struct platform_device *pdev)
+{
+ struct watchdog_device *wd = platform_get_drvdata(pdev);
+
+ if (watchdog_active(wd))
+ return pseries_wdt_start(wd);
+ return 0;
+}
+
+static const struct platform_device_id pseries_wdt_id[] = {
+ { .name = "pseries-wdt" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, pseries_wdt_id);
+
+static struct platform_driver pseries_wdt_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .id_table = pseries_wdt_id,
+ .probe = pseries_wdt_probe,
+ .resume = pseries_wdt_resume,
+ .suspend = pseries_wdt_suspend,
+};
+module_platform_driver(pseries_wdt_driver);
+
+MODULE_AUTHOR("Alexey Kardashevskiy");
+MODULE_AUTHOR("Scott Cheloha");
+MODULE_DESCRIPTION("POWER Architecture Platform Watchdog Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/realtek_otto_wdt.c b/drivers/watchdog/realtek_otto_wdt.c
index 60058a0c3ec4..2a5298c5e8e4 100644
--- a/drivers/watchdog/realtek_otto_wdt.c
+++ b/drivers/watchdog/realtek_otto_wdt.c
@@ -366,6 +366,7 @@ static const struct of_device_id otto_wdt_ids[] = {
{ .compatible = "realtek,rtl8380-wdt" },
{ .compatible = "realtek,rtl8390-wdt" },
{ .compatible = "realtek,rtl9300-wdt" },
+ { .compatible = "realtek,rtl9310-wdt" },
{ }
};
MODULE_DEVICE_TABLE(of, otto_wdt_ids);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 6db22f2e3a4f..95919392927f 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -845,8 +845,6 @@ static void s3c2410wdt_shutdown(struct platform_device *dev)
s3c2410wdt_stop(&wdt->wdt_device);
}
-#ifdef CONFIG_PM_SLEEP
-
static int s3c2410wdt_suspend(struct device *dev)
{
int ret;
@@ -885,10 +883,9 @@ static int s3c2410wdt_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(s3c2410wdt_pm_ops, s3c2410wdt_suspend,
- s3c2410wdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(s3c2410wdt_pm_ops,
+ s3c2410wdt_suspend, s3c2410wdt_resume);
static struct platform_driver s3c2410wdt_driver = {
.probe = s3c2410wdt_probe,
@@ -897,7 +894,7 @@ static struct platform_driver s3c2410wdt_driver = {
.id_table = s3c2410_wdt_ids,
.driver = {
.name = "s3c2410-wdt",
- .pm = &s3c2410wdt_pm_ops,
+ .pm = pm_sleep_ptr(&s3c2410wdt_pm_ops),
.of_match_table = of_match_ptr(s3c2410_wdt_match),
},
};
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index ec20ad4e534f..aeee934ca51b 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -339,7 +339,6 @@ static const struct of_device_id sama5d4_wdt_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sama5d4_wdt_of_match);
-#ifdef CONFIG_PM_SLEEP
static int sama5d4_wdt_suspend_late(struct device *dev)
{
struct sama5d4_wdt *wdt = dev_get_drvdata(dev);
@@ -366,18 +365,17 @@ static int sama5d4_wdt_resume_early(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops sama5d4_wdt_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(sama5d4_wdt_suspend_late,
- sama5d4_wdt_resume_early)
+ LATE_SYSTEM_SLEEP_PM_OPS(sama5d4_wdt_suspend_late,
+ sama5d4_wdt_resume_early)
};
static struct platform_driver sama5d4_wdt_driver = {
.probe = sama5d4_wdt_probe,
.driver = {
.name = "sama5d4_wdt",
- .pm = &sama5d4_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&sama5d4_wdt_pm_ops),
.of_match_table = sama5d4_wdt_of_match,
}
};
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 86ffb58fbc85..ae54dd33e233 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -402,6 +402,7 @@ out:
iounmap(addr);
release_resource(res);
+ kfree(res);
return ret;
}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index f9479a3fe2a6..78ba36689eec 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/char/watchdog/sp805-wdt.c
*
@@ -341,6 +342,10 @@ static const struct amba_id sp805_wdt_ids[] = {
.id = 0x00141805,
.mask = 0x00ffffff,
},
+ {
+ .id = 0x001bb824,
+ .mask = 0x00ffffff,
+ },
{ 0, 0 },
};
diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c
index 14ab6559c748..39abecdb9dd1 100644
--- a/drivers/watchdog/st_lpc_wdt.c
+++ b/drivers/watchdog/st_lpc_wdt.c
@@ -248,7 +248,6 @@ static int st_wdog_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int st_wdog_suspend(struct device *dev)
{
struct st_wdog *st_wdog = watchdog_get_drvdata(&st_wdog_dev);
@@ -285,16 +284,14 @@ static int st_wdog_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(st_wdog_pm_ops,
- st_wdog_suspend,
- st_wdog_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(st_wdog_pm_ops,
+ st_wdog_suspend, st_wdog_resume);
static struct platform_driver st_wdog_driver = {
.driver = {
.name = "st-lpc-wdt",
- .pm = &st_wdog_pm_ops,
+ .pm = pm_sleep_ptr(&st_wdog_pm_ops),
.of_match_table = st_wdog_match,
},
.probe = st_wdog_probe,
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index dfe06e506cad..d5de6c0657a5 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -230,8 +230,7 @@ static int tegra_wdt_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int tegra_wdt_runtime_suspend(struct device *dev)
+static int tegra_wdt_suspend(struct device *dev)
{
struct tegra_wdt *wdt = dev_get_drvdata(dev);
@@ -241,7 +240,7 @@ static int tegra_wdt_runtime_suspend(struct device *dev)
return 0;
}
-static int tegra_wdt_runtime_resume(struct device *dev)
+static int tegra_wdt_resume(struct device *dev)
{
struct tegra_wdt *wdt = dev_get_drvdata(dev);
@@ -250,7 +249,6 @@ static int tegra_wdt_runtime_resume(struct device *dev)
return 0;
}
-#endif
static const struct of_device_id tegra_wdt_of_match[] = {
{ .compatible = "nvidia,tegra30-timer", },
@@ -258,16 +256,14 @@ static const struct of_device_id tegra_wdt_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_wdt_of_match);
-static const struct dev_pm_ops tegra_wdt_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tegra_wdt_runtime_suspend,
- tegra_wdt_runtime_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(tegra_wdt_pm_ops,
+ tegra_wdt_suspend, tegra_wdt_resume);
static struct platform_driver tegra_wdt_driver = {
.probe = tegra_wdt_probe,
.driver = {
.name = "tegra-wdt",
- .pm = &tegra_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&tegra_wdt_pm_ops),
.of_match_table = tegra_wdt_of_match,
},
};
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index e6f95e99156d..aeadaa07c891 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -467,7 +467,6 @@ static int wdat_wdt_probe(struct platform_device *pdev)
return devm_watchdog_register_device(dev, &wdat->wdd);
}
-#ifdef CONFIG_PM_SLEEP
static int wdat_wdt_suspend_noirq(struct device *dev)
{
struct wdat_wdt *wdat = dev_get_drvdata(dev);
@@ -528,18 +527,16 @@ static int wdat_wdt_resume_noirq(struct device *dev)
return wdat_wdt_start(&wdat->wdd);
}
-#endif
static const struct dev_pm_ops wdat_wdt_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(wdat_wdt_suspend_noirq,
- wdat_wdt_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(wdat_wdt_suspend_noirq, wdat_wdt_resume_noirq)
};
static struct platform_driver wdat_wdt_driver = {
.probe = wdat_wdt_probe,
.driver = {
.name = "wdat_wdt",
- .pm = &wdat_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&wdat_wdt_pm_ops),
},
};
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 5e8321f43cbd..c443f04aaad7 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -45,6 +45,7 @@
#include <asm/irq.h>
#include <asm/io_apic.h>
#include <asm/i8259.h>
+#include <asm/xen/cpuid.h>
#include <asm/xen/pci.h>
#endif
#include <asm/sync_bitops.h>
@@ -2184,6 +2185,7 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
.irq_ack = ack_dynirq,
};
+#ifdef CONFIG_X86
#ifdef CONFIG_XEN_PVHVM
/* Vector callbacks are better than PCI interrupts to receive event
* channel notifications because we can receive vector callbacks on any
@@ -2196,11 +2198,48 @@ void xen_setup_callback_vector(void)
callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
if (xen_set_callback_via(callback_via)) {
pr_err("Request for Xen HVM callback vector failed\n");
- xen_have_vector_callback = 0;
+ xen_have_vector_callback = false;
}
}
}
+/*
+ * Setup per-vCPU vector-type callbacks. If this setup is unavailable,
+ * fallback to the global vector-type callback.
+ */
+static __init void xen_init_setup_upcall_vector(void)
+{
+ if (!xen_have_vector_callback)
+ return;
+
+ if ((cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_UPCALL_VECTOR) &&
+ !xen_set_upcall_vector(0))
+ xen_percpu_upcall = true;
+ else if (xen_feature(XENFEAT_hvm_callback_vector))
+ xen_setup_callback_vector();
+ else
+ xen_have_vector_callback = false;
+}
+
+int xen_set_upcall_vector(unsigned int cpu)
+{
+ int rc;
+ xen_hvm_evtchn_upcall_vector_t op = {
+ .vector = HYPERVISOR_CALLBACK_VECTOR,
+ .vcpu = per_cpu(xen_vcpu_id, cpu),
+ };
+
+ rc = HYPERVISOR_hvm_op(HVMOP_set_evtchn_upcall_vector, &op);
+ if (rc)
+ return rc;
+
+ /* Trick toolstack to think we are enlightened. */
+ if (!cpu)
+ rc = xen_set_callback_via(1);
+
+ return rc;
+}
+
static __init void xen_alloc_callback_vector(void)
{
if (!xen_have_vector_callback)
@@ -2211,8 +2250,11 @@ static __init void xen_alloc_callback_vector(void)
}
#else
void xen_setup_callback_vector(void) {}
+static inline void xen_init_setup_upcall_vector(void) {}
+int xen_set_upcall_vector(unsigned int cpu) {}
static inline void xen_alloc_callback_vector(void) {}
-#endif
+#endif /* CONFIG_XEN_PVHVM */
+#endif /* CONFIG_X86 */
bool xen_fifo_events = true;
module_param_named(fifo_events, xen_fifo_events, bool, 0);
@@ -2272,10 +2314,9 @@ void __init xen_init_IRQ(void)
if (xen_initial_domain())
pci_xen_initial_domain();
}
- if (xen_feature(XENFEAT_hvm_callback_vector)) {
- xen_setup_callback_vector();
- xen_alloc_callback_vector();
- }
+ xen_init_setup_upcall_vector();
+ xen_alloc_callback_vector();
+
if (xen_hvm_domain()) {
native_init_IRQ();
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 738029de3c67..e1ec725c2819 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1047,6 +1047,9 @@ int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
size_t size;
int i, ret;
+ if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
+ return -ENOMEM;
+
size = args->nr_pages << PAGE_SHIFT;
if (args->coherent)
args->vaddr = dma_alloc_coherent(args->dev, size,
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 3369734108af..e88e8f6f0a33 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -581,27 +581,30 @@ static int lock_pages(
struct privcmd_dm_op_buf kbufs[], unsigned int num,
struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
{
- unsigned int i;
+ unsigned int i, off = 0;
- for (i = 0; i < num; i++) {
+ for (i = 0; i < num; ) {
unsigned int requested;
int page_count;
requested = DIV_ROUND_UP(
offset_in_page(kbufs[i].uptr) + kbufs[i].size,
- PAGE_SIZE);
+ PAGE_SIZE) - off;
if (requested > nr_pages)
return -ENOSPC;
page_count = pin_user_pages_fast(
- (unsigned long) kbufs[i].uptr,
+ (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
requested, FOLL_WRITE, pages);
- if (page_count < 0)
- return page_count;
+ if (page_count <= 0)
+ return page_count ? : -EFAULT;
*pinned += page_count;
nr_pages -= page_count;
pages += page_count;
+
+ off = (requested == page_count) ? 0 : off + page_count;
+ i += !off;
}
return 0;
@@ -677,10 +680,8 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
}
rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
- if (rc < 0) {
- nr_pages = pinned;
+ if (rc < 0)
goto out;
- }
for (i = 0; i < kdata.num; i++) {
set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
@@ -692,7 +693,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
xen_preemptible_hcall_end();
out:
- unlock_pages(pages, nr_pages);
+ unlock_pages(pages, pinned);
kfree(xbufs);
kfree(pages);
kfree(kbufs);
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 3fbc21466a93..84e014490950 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -159,7 +159,7 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
return XEN_PCI_ERR_op_failed;
}
- /* The value the guest needs is actually the IDT vector, not the
+ /* The value the guest needs is actually the IDT vector, not
* the local domain's IRQ number. */
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 7a0c93acc2c5..d3dcda344989 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1121,7 +1121,7 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
"%s: writing %s", __func__, state);
return;
}
- strlcpy(phy, val, VSCSI_NAMELEN);
+ strscpy(phy, val, VSCSI_NAMELEN);
kfree(val);
/* virtual SCSI device */
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index d5f3f763717e..d4b251925796 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -382,9 +382,10 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
unsigned long ring_size = nr_pages * XEN_PAGE_SIZE;
grant_ref_t gref_head;
unsigned int i;
+ void *addr;
int ret;
- *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
+ addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
if (!*vaddr) {
ret = -ENOMEM;
goto err;
@@ -401,13 +402,15 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
unsigned long gfn;
if (is_vmalloc_addr(*vaddr))
- gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr[i]));
+ gfn = pfn_to_gfn(vmalloc_to_pfn(addr));
else
- gfn = virt_to_gfn(vaddr[i]);
+ gfn = virt_to_gfn(addr);
grefs[i] = gnttab_claim_grant_reference(&gref_head);
gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
gfn, 0);
+
+ addr += XEN_PAGE_SIZE;
}
return 0;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 597af455a522..0792fda49a15 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -128,7 +128,7 @@ static ssize_t xenbus_file_read(struct file *filp,
{
struct xenbus_file_priv *u = filp->private_data;
struct read_buffer *rb;
- unsigned i;
+ ssize_t i;
int ret;
mutex_lock(&u->reply_mutex);
@@ -148,7 +148,7 @@ again:
rb = list_entry(u->read_buffers.next, struct read_buffer, list);
i = 0;
while (i < len) {
- unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
+ size_t sz = min_t(size_t, len - i, rb->len - rb->cons);
ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 5abded97e1a7..9c09f89d8278 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -305,7 +305,7 @@ static int __init xenbus_probe_backend_init(void)
register_xenstore_notifier(&xenstore_notifier);
- if (register_shrinker(&backend_memory_shrinker))
+ if (register_shrinker(&backend_memory_shrinker, "xen-backend"))
pr_warn("shrinker registration failed\n");
return 0;
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 07b010a68fcf..f44d5a64351e 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -40,7 +40,7 @@ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
return -EINVAL;
}
- strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
+ strscpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
if (!strchr(bus_id, '/')) {
pr_warn("bus_id %s no slash\n", bus_id);
return -EINVAL;