summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/arm64/iort.c13
-rw-r--r--drivers/acpi/pci_mcfg.c7
-rw-r--r--drivers/ata/ahci_brcm.c46
-rw-r--r--drivers/clk/sifive/Kconfig2
-rw-r--r--drivers/clk/sifive/fu740-prci.c11
-rw-r--r--drivers/clk/sifive/fu740-prci.h2
-rw-r--r--drivers/clk/sifive/sifive-prci.c54
-rw-r--r--drivers/clk/sifive/sifive-prci.h13
-rw-r--r--drivers/clocksource/arm_arch_timer.c36
-rw-r--r--drivers/crypto/ccp/sev-dev.c193
-rw-r--r--drivers/crypto/ccp/sev-dev.h4
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/at_xdmac.c11
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c178
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h37
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c277
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c300
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.h2
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c77
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.h4
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-regs.h291
-rw-r--r--drivers/dma/idxd/Makefile2
-rw-r--r--drivers/dma/idxd/cdev.c132
-rw-r--r--drivers/dma/idxd/device.c283
-rw-r--r--drivers/dma/idxd/dma.c77
-rw-r--r--drivers/dma/idxd/idxd.h168
-rw-r--r--drivers/dma/idxd/init.c485
-rw-r--r--drivers/dma/idxd/irq.c29
-rw-r--r--drivers/dma/idxd/perfmon.c662
-rw-r--r--drivers/dma/idxd/perfmon.h119
-rw-r--r--drivers/dma/idxd/registers.h120
-rw-r--r--drivers/dma/idxd/submit.c42
-rw-r--r--drivers/dma/idxd/sysfs.c776
-rw-r--r--drivers/dma/k3dma.c4
-rw-r--r--drivers/dma/qcom/gpi.c1
-rw-r--r--drivers/dma/qcom/hidma.c6
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c8
-rw-r--r--drivers/firmware/dmi_scan.c1
-rw-r--r--drivers/firmware/psci/psci.c2
-rw-r--r--drivers/firmware/smccc/Makefile2
-rw-r--r--drivers/firmware/smccc/kvm_guest.c50
-rw-r--r--drivers/firmware/smccc/smccc.c1
-rw-r--r--drivers/firmware/xilinx/zynqmp.c114
-rw-r--r--drivers/gpio/Kconfig24
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c50
-rw-r--r--drivers/gpio/gpio-aggregator.c39
-rw-r--r--drivers/gpio/gpio-ich.c2
-rw-r--r--drivers/gpio/gpio-it87.c8
-rw-r--r--drivers/gpio/gpio-mockup.c9
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c47
-rw-r--r--drivers/gpio/gpio-mxs.c5
-rw-r--r--drivers/gpio/gpio-omap.c5
-rw-r--r--drivers/gpio/gpio-realtek-otto.c325
-rw-r--r--drivers/gpio/gpio-regmap.c5
-rw-r--r--drivers/gpio/gpio-sch.c198
-rw-r--r--drivers/gpio/gpiolib-acpi.c21
-rw-r--r--drivers/gpio/gpiolib-acpi.h4
-rw-r--r--drivers/gpio/gpiolib-of.c6
-rw-r--r--drivers/gpio/gpiolib.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c193
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c129
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c25
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h31
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h16
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h41
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c4
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h40
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c378
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c123
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c55
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c8
-rw-r--r--drivers/gpu/drm/i915/i915_request.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c4
-rw-r--r--drivers/gpu/drm/radeon/si.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c113
-rw-r--r--drivers/gpu/drm/tegra/dc.h6
-rw-r--r--drivers/gpu/drm/tegra/drm.c27
-rw-r--r--drivers/gpu/drm/tegra/drm.h5
-rw-r--r--drivers/gpu/drm/tegra/fb.c10
-rw-r--r--drivers/gpu/drm/tegra/gem.h6
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c4
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c4
-rw-r--r--drivers/gpu/drm/tegra/hub.c41
-rw-r--r--drivers/gpu/drm/tegra/plane.c32
-rw-r--r--drivers/gpu/drm/tegra/vic.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_memory.c2
-rw-r--r--drivers/gpu/host1x/bus.c31
-rw-r--r--drivers/gpu/host1x/cdma.c11
-rw-r--r--drivers/gpu/host1x/debug.c14
-rw-r--r--drivers/gpu/host1x/dev.c6
-rw-r--r--drivers/gpu/host1x/dev.h13
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c2
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c10
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c2
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x07_vm.h2
-rw-r--r--drivers/gpu/host1x/intr.c28
-rw-r--r--drivers/gpu/host1x/intr.h4
-rw-r--r--drivers/gpu/host1x/job.c5
-rw-r--r--drivers/gpu/host1x/syncpt.c202
-rw-r--r--drivers/gpu/host1x/syncpt.h4
-rw-r--r--drivers/hid/Kconfig20
-rw-r--r--drivers/hid/Makefile6
-rw-r--r--drivers/hid/hid-alps.c2
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-debug.c1
-rw-r--r--drivers/hid/hid-elan.c17
-rw-r--r--drivers/hid/hid-ft260.c1054
-rw-r--r--drivers/hid/hid-ids.h14
-rw-r--r--drivers/hid/hid-input.c22
-rw-r--r--drivers/hid/hid-kye.c2
-rw-r--r--drivers/hid/hid-lenovo.c147
-rw-r--r--drivers/hid/hid-lg.c24
-rw-r--r--drivers/hid/hid-logitech-dj.c131
-rw-r--r--drivers/hid/hid-logitech-hidpp.c7
-rw-r--r--drivers/hid/hid-magicmouse.c158
-rw-r--r--drivers/hid/hid-picolcd_core.c5
-rw-r--r--drivers/hid/hid-plantronics.c60
-rw-r--r--drivers/hid/hid-quirks.c5
-rw-r--r--drivers/hid/hid-sensor-custom.c5
-rw-r--r--drivers/hid/hid-sensor-hub.c4
-rw-r--r--drivers/hid/hid-thrustmaster.c371
-rw-r--r--drivers/hid/hid-uclogic-params.c8
-rw-r--r--drivers/hid/hid-uclogic-rdesc.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-acpi.c52
-rw-r--r--drivers/hid/surface-hid/Kconfig42
-rw-r--r--drivers/hid/surface-hid/Makefile7
-rw-r--r--drivers/hid/surface-hid/surface_hid.c253
-rw-r--r--drivers/hid/surface-hid/surface_hid_core.c272
-rw-r--r--drivers/hid/surface-hid/surface_hid_core.h77
-rw-r--r--drivers/hid/surface-hid/surface_kbd.c300
-rw-r--r--drivers/hid/usbhid/hid-pidff.c4
-rw-r--r--drivers/hid/usbhid/hiddev.c6
-rw-r--r--drivers/hid/usbhid/usbkbd.c18
-rw-r--r--drivers/hid/wacom_sys.c2
-rw-r--r--drivers/hid/wacom_wac.c50
-rw-r--r--drivers/hid/wacom_wac.h1
-rw-r--r--drivers/hwspinlock/Kconfig11
-rw-r--r--drivers/hwspinlock/Makefile1
-rw-r--r--drivers/hwspinlock/sirf_hwspinlock.c105
-rw-r--r--drivers/hwtracing/coresight/Kconfig24
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c29
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c119
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c161
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c19
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h83
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c1157
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.h152
-rw-r--r--drivers/i2c/busses/Kconfig22
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c268
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c1
-rw-r--r--drivers/i2c/busses/i2c-cadence.c9
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c6
-rw-r--r--drivers/i2c/busses/i2c-cp2615.c330
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h8
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c155
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c61
-rw-r--r--drivers/i2c/busses/i2c-emev2.c5
-rw-r--r--drivers/i2c/busses/i2c-hisi.c504
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-icy.c32
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c4
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c6
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c28
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c5
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c16
-rw-r--r--drivers/i2c/busses/i2c-mpc.c579
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c17
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c4
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c6
-rw-r--r--drivers/i2c/busses/i2c-omap.c8
-rw-r--r--drivers/i2c/busses/i2c-powermac.c5
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c4
-rw-r--r--drivers/i2c/busses/i2c-rcar.c89
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c9
-rw-r--r--drivers/i2c/busses/i2c-scmi.c2
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c5
-rw-r--r--drivers/i2c/busses/i2c-sprd.c5
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c82
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c79
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c1
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/i2c-boardinfo.c11
-rw-r--r--drivers/i2c/i2c-core-base.c114
-rw-r--r--drivers/i2c/i2c-dev.c9
-rw-r--r--drivers/infiniband/core/cache.c87
-rw-r--r--drivers/infiniband/core/cm.c58
-rw-r--r--drivers/infiniband/core/cm_msgs.h4
-rw-r--r--drivers/infiniband/core/cma.c116
-rw-r--r--drivers/infiniband/core/cma_configfs.c8
-rw-r--r--drivers/infiniband/core/cma_priv.h10
-rw-r--r--drivers/infiniband/core/core_priv.h28
-rw-r--r--drivers/infiniband/core/counters.c62
-rw-r--r--drivers/infiniband/core/device.c37
-rw-r--r--drivers/infiniband/core/iwpm_msg.c3
-rw-r--r--drivers/infiniband/core/mad.c79
-rw-r--r--drivers/infiniband/core/mad_rmpp.c10
-rw-r--r--drivers/infiniband/core/multicast.c8
-rw-r--r--drivers/infiniband/core/nldev.c176
-rw-r--r--drivers/infiniband/core/opa_smi.h4
-rw-r--r--drivers/infiniband/core/rdma_core.c4
-rw-r--r--drivers/infiniband/core/restrack.c3
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c52
-rw-r--r--drivers/infiniband/core/rw.c25
-rw-r--r--drivers/infiniband/core/sa.h2
-rw-r--r--drivers/infiniband/core/sa_query.c22
-rw-r--r--drivers/infiniband/core/security.c8
-rw-r--r--drivers/infiniband/core/smi.c12
-rw-r--r--drivers/infiniband/core/smi.h4
-rw-r--r--drivers/infiniband/core/sysfs.c29
-rw-r--r--drivers/infiniband/core/ucma.c8
-rw-r--r--drivers/infiniband/core/umem.c8
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c4
-rw-r--r--drivers/infiniband/core/user_mad.c34
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c25
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c32
-rw-r--r--drivers/infiniband/core/verbs.c43
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig4
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c63
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h11
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h33
-rw-r--r--drivers/infiniband/hw/efa/efa.h14
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c10
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c14
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c8
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c10
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h5
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c2
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.c6
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c1
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h20
-rw-r--r--drivers/infiniband/hw/hfi1/init.c7
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h2
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h15
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c13
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c71
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c128
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h2
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c2
-rw-r--r--drivers/infiniband/hw/hfi1/msix.c12
-rw-r--r--drivers/infiniband/hw/hfi1/netdev.h39
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c172
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h18
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c2
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h179
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c12
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h1
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c8
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h5
-rw-r--r--drivers/infiniband/hw/hfi1/vnic.h2
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c114
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h25
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c92
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h91
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c55
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c2267
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h578
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c74
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c59
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c124
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hmc.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_osdep.h22
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c6
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c14
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_virtchnl.c2
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c16
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c46
-rw-r--r--drivers/infiniband/hw/mlx4/main.c47
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h26
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c101
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h3
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c8
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c10
-rw-r--r--drivers/infiniband/hw/mlx5/counters.h2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c64
-rw-r--r--drivers/infiniband/hw/mlx5/dm.c587
-rw-r--r--drivers/infiniband/hw/mlx5/dm.h68
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c9
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c4
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h4
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c16
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c16
-rw-r--r--drivers/infiniband/hw/mlx5/main.c343
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h182
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c163
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c185
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c17
-rw-r--r--drivers/infiniband/hw/mlx5/std_types.c173
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h7
-rw-r--r--drivers/infiniband/hw/qedr/main.c8
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c9
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h11
-rw-r--r--drivers/infiniband/hw/qib/qib.h34
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h7
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c68
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h6
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h10
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c12
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h45
-rw-r--r--drivers/infiniband/sw/rdmavt/mad.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/mad.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c34
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.h11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.h4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h30
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c271
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c14
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c52
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c32
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h60
-rw-r--r--drivers/infiniband/sw/siw/iwarp.h13
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c19
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h5
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.h10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c26
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c16
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c48
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c122
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h1
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c20
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c36
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c35
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c4
-rw-r--r--drivers/input/mouse/elantech.c6
-rw-r--r--drivers/iommu/Kconfig16
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd/amd_iommu.h2
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd/init.c59
-rw-r--r--drivers/iommu/amd/iommu.c201
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c247
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h18
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c117
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h2
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c5
-rw-r--r--drivers/iommu/dma-iommu.c149
-rw-r--r--drivers/iommu/exynos-iommu.c7
-rw-r--r--drivers/iommu/fsl_pamu.c293
-rw-r--r--drivers/iommu/fsl_pamu.h12
-rw-r--r--drivers/iommu/fsl_pamu_domain.c693
-rw-r--r--drivers/iommu/fsl_pamu_domain.h46
-rw-r--r--drivers/iommu/intel/dmar.c72
-rw-r--r--drivers/iommu/intel/iommu.c233
-rw-r--r--drivers/iommu/intel/irq_remapping.c5
-rw-r--r--drivers/iommu/intel/pasid.c75
-rw-r--r--drivers/iommu/intel/pasid.h6
-rw-r--r--drivers/iommu/intel/svm.c82
-rw-r--r--drivers/iommu/io-pgfault.c461
-rw-r--r--drivers/iommu/iommu-sva-lib.h53
-rw-r--r--drivers/iommu/iommu.c161
-rw-r--r--drivers/iommu/iova.c96
-rw-r--r--drivers/iommu/ipmmu-vmsa.c6
-rw-r--r--drivers/iommu/msm_iommu.c5
-rw-r--r--drivers/iommu/mtk_iommu.c41
-rw-r--r--drivers/iommu/mtk_iommu_v1.c98
-rw-r--r--drivers/iommu/of_iommu.c5
-rw-r--r--drivers/iommu/omap-iommu.c5
-rw-r--r--drivers/iommu/rockchip-iommu.c5
-rw-r--r--drivers/iommu/s390-iommu.c4
-rw-r--r--drivers/iommu/sprd-iommu.c575
-rw-r--r--drivers/iommu/sun50i-iommu.c5
-rw-r--r--drivers/iommu/tegra-gart.c5
-rw-r--r--drivers/iommu/tegra-smmu.c5
-rw-r--r--drivers/iommu/virtio-iommu.c6
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c18
-rw-r--r--drivers/leds/Kconfig7
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/blink/Kconfig33
-rw-r--r--drivers/leds/blink/Makefile2
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c2
-rw-r--r--drivers/leds/flash/Kconfig11
-rw-r--r--drivers/leds/flash/Makefile1
-rw-r--r--drivers/leds/flash/leds-rt4505.c430
-rw-r--r--drivers/leds/leds-lm3642.c4
-rw-r--r--drivers/leds/leds-pca9532.c2
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c2
-rw-r--r--drivers/macintosh/via-pmu.c4
-rw-r--r--drivers/macintosh/windfarm_core.c2
-rw-r--r--drivers/macintosh/windfarm_pm121.c2
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c2
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-clone-metadata.c6
-rw-r--r--drivers/md/dm-ebs-target.c6
-rw-r--r--drivers/md/dm-integrity.c85
-rw-r--r--drivers/md/dm-ioctl.c294
-rw-r--r--drivers/md/dm-raid.c44
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-snap-persistent.c6
-rw-r--r--drivers/md/dm-snap.c5
-rw-r--r--drivers/md/dm-table.c30
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm-verity-target.c40
-rw-r--r--drivers/md/dm-writecache.c2
-rw-r--r--drivers/md/dm.c63
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c8
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c2
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.h8
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c9
-rw-r--r--drivers/media/usb/uvc/uvc_video.c94
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h5
-rw-r--r--drivers/misc/uacce/uacce.c39
-rw-r--r--drivers/mtd/ubi/build.c1
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c1
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c14
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c2
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c4
-rw-r--r--drivers/pci/ats.c2
-rw-r--r--drivers/pci/controller/Kconfig17
-rw-r--r--drivers/pci/controller/Makefile8
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c24
-rw-r--r--drivers/pci/controller/dwc/Kconfig12
-rw-r--r--drivers/pci/controller/dwc/Makefile10
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c14
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c11
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h1
-rw-r--r--drivers/pci/controller/dwc/pcie-fu740.c309
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c5
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c108
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig3
-rw-r--r--drivers/pci/controller/pci-host-common.c1
-rw-r--r--drivers/pci/controller/pci-hyperv.c4
-rw-r--r--drivers/pci/controller/pci-tegra.c349
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c2
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c13
-rw-r--r--drivers/pci/controller/pci-xgene.c3
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c4
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c20
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c2
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c1027
-rw-r--r--drivers/pci/controller/pcie-mediatek.c7
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c12
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c355
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c7
-rw-r--r--drivers/pci/controller/pcie-xilinx.c246
-rw-r--r--drivers/pci/controller/vmd.c63
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c16
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c22
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c2
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c2
-rw-r--r--drivers/pci/hotplug/acpiphp.h3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c1
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c5
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c5
-rw-r--r--drivers/pci/msi.c45
-rw-r--r--drivers/pci/of.c22
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/pci-label.c218
-rw-r--r--drivers/pci/pci-sysfs.c260
-rw-r--r--drivers/pci/pci.c48
-rw-r--r--drivers/pci/pci.h24
-rw-r--r--drivers/pci/pcie/aer.c6
-rw-r--r--drivers/pci/pcie/pme.c2
-rw-r--r--drivers/pci/pcie/rcec.c2
-rw-r--r--drivers/pci/probe.c5
-rw-r--r--drivers/pci/quirks.c29
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/vpd.c232
-rw-r--r--drivers/pci/xen-pcifront.c2
-rw-r--r--drivers/perf/arm_pmu.c30
-rw-r--r--drivers/pinctrl/Kconfig21
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/bcm/Kconfig62
-rw-r--r--drivers/pinctrl/bcm/Makefile7
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6318.c498
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm63268.c643
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6328.c404
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6358.c369
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6362.c617
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6368.c523
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm63xx.c109
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm63xx.h43
-rw-r--r--drivers/pinctrl/core.c39
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx25.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx27.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx35.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx50.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx51.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx53.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6dl.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6q.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sl.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sll.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sx.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6ul.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7d.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7ulp.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8dxl.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mm.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mn.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mp.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mq.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8qm.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8qxp.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c5
-rw-r--r--drivers/pinctrl/mediatek/Kconfig6
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8195.c850
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c19
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8195.h1669
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-cp110.c4
-rw-r--r--drivers/pinctrl/pinconf-generic.c6
-rw-r--r--drivers/pinctrl/pinconf.c4
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c8
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c16
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c1649
-rw-r--r--drivers/pinctrl/pinctrl-k210.c1
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c4
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c956
-rw-r--r--drivers/pinctrl/pinctrl-single.c71
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c4
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c906
-rw-r--r--drivers/pinctrl/pinmux.c106
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c4
-rw-r--r--drivers/pinctrl/qcom/Kconfig4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c24
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c123
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c21
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c7
-rw-r--r--drivers/pinctrl/renesas/core.c20
-rw-r--r--drivers/pinctrl/renesas/core.h8
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a73a4.c48
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7740.c46
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7778.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7791.c387
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7792.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77950.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77951.c31
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7796.c31
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77965.c35
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77970.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77980.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77990.c35
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77995.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779a0.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-sh73a0.c46
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c109
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h24
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c10
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c18
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c7
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c100
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_microb.c6
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/ptp/Makefile2
-rw-r--r--drivers/ptp/ptp_kvm_arm.c28
-rw-r--r--drivers/ptp/ptp_kvm_common.c (renamed from drivers/ptp/ptp_kvm.c)85
-rw-r--r--drivers/ptp/ptp_kvm_x86.c97
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c48
-rw-r--r--drivers/pwm/pwm-ab8500.c54
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c3
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c42
-rw-r--r--drivers/pwm/pwm-atmel.c30
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c5
-rw-r--r--drivers/pwm/pwm-bcm-kona.c8
-rw-r--r--drivers/pwm/pwm-bcm2835.c40
-rw-r--r--drivers/pwm/pwm-berlin.c1
-rw-r--r--drivers/pwm/pwm-brcmstb.c1
-rw-r--r--drivers/pwm/pwm-clps711x.c1
-rw-r--r--drivers/pwm/pwm-crc.c1
-rw-r--r--drivers/pwm/pwm-cros-ec.c4
-rw-r--r--drivers/pwm/pwm-dwc.c1
-rw-r--r--drivers/pwm/pwm-ep93xx.c1
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c1
-rw-r--r--drivers/pwm/pwm-hibvt.c1
-rw-r--r--drivers/pwm/pwm-img.c1
-rw-r--r--drivers/pwm/pwm-imx-tpm.c5
-rw-r--r--drivers/pwm/pwm-imx1.c1
-rw-r--r--drivers/pwm/pwm-imx27.c1
-rw-r--r--drivers/pwm/pwm-intel-lgm.c1
-rw-r--r--drivers/pwm/pwm-iqs620a.c1
-rw-r--r--drivers/pwm/pwm-jz4740.c1
-rw-r--r--drivers/pwm/pwm-keembay.c1
-rw-r--r--drivers/pwm/pwm-lp3943.c1
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c5
-rw-r--r--drivers/pwm/pwm-lpc32xx.c5
-rw-r--r--drivers/pwm/pwm-lpss.c7
-rw-r--r--drivers/pwm/pwm-mediatek.c7
-rw-r--r--drivers/pwm/pwm-meson.c1
-rw-r--r--drivers/pwm/pwm-mtk-disp.c1
-rw-r--r--drivers/pwm/pwm-mxs.c1
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c1
-rw-r--r--drivers/pwm/pwm-pca9685.c303
-rw-r--r--drivers/pwm/pwm-pxa.c1
-rw-r--r--drivers/pwm/pwm-rcar.c1
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c1
-rw-r--r--drivers/pwm/pwm-rockchip.c1
-rw-r--r--drivers/pwm/pwm-samsung.c1
-rw-r--r--drivers/pwm/pwm-sifive.c1
-rw-r--r--drivers/pwm/pwm-sl28cpld.c1
-rw-r--r--drivers/pwm/pwm-spear.c1
-rw-r--r--drivers/pwm/pwm-sprd.c4
-rw-r--r--drivers/pwm/pwm-sti.c7
-rw-r--r--drivers/pwm/pwm-stm32-lp.c1
-rw-r--r--drivers/pwm/pwm-stm32.c1
-rw-r--r--drivers/pwm/pwm-stmpe.c1
-rw-r--r--drivers/pwm/pwm-sun4i.c1
-rw-r--r--drivers/pwm/pwm-tegra.c1
-rw-r--r--drivers/pwm/pwm-tiecap.c1
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c1
-rw-r--r--drivers/pwm/pwm-twl-led.c1
-rw-r--r--drivers/pwm/pwm-twl.c1
-rw-r--r--drivers/pwm/pwm-visconti.c190
-rw-r--r--drivers/pwm/pwm-vt8500.c1
-rw-r--r--drivers/remoteproc/Kconfig7
-rw-r--r--drivers/remoteproc/imx_rproc.c322
-rw-r--r--drivers/remoteproc/ingenic_rproc.c2
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c2
-rw-r--r--drivers/remoteproc/mtk_scp.c6
-rw-r--r--drivers/remoteproc/omap_remoteproc.c2
-rw-r--r--drivers/remoteproc/pru_rproc.c47
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c26
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c19
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c599
-rw-r--r--drivers/remoteproc/qcom_wcnss.c10
-rw-r--r--drivers/remoteproc/remoteproc_cdev.c21
-rw-r--r--drivers/remoteproc/remoteproc_core.c337
-rw-r--r--drivers/remoteproc/remoteproc_coredump.c8
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c2
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c21
-rw-r--r--drivers/remoteproc/remoteproc_internal.h12
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c21
-rw-r--r--drivers/remoteproc/st_slim_rproc.c2
-rw-r--r--drivers/remoteproc/stm32_rproc.c205
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c2
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c2
-rw-r--r--drivers/reset/Kconfig1
-rw-r--r--drivers/reset/core.c215
-rw-r--r--drivers/rpmsg/qcom_glink_native.c17
-rw-r--r--drivers/rpmsg/qcom_smd.c16
-rw-r--r--drivers/rpmsg/rpmsg_char.c11
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c62
-rw-r--r--drivers/rtc/Kconfig3
-rw-r--r--drivers/rtc/interface.c34
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c135
-rw-r--r--drivers/rtc/rtc-ds1307.c56
-rw-r--r--drivers/rtc/rtc-ds1511.c6
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c1
-rw-r--r--drivers/rtc/rtc-imx-sc.c11
-rw-r--r--drivers/rtc/rtc-imxdi.c4
-rw-r--r--drivers/rtc/rtc-m48t59.c2
-rw-r--r--drivers/rtc/rtc-mxc.c2
-rw-r--r--drivers/rtc/rtc-omap.c5
-rw-r--r--drivers/rtc/rtc-pcf85063.c7
-rw-r--r--drivers/rtc/rtc-pcf8523.c196
-rw-r--r--drivers/rtc/rtc-pm8xxx.c11
-rw-r--r--drivers/rtc/rtc-rv3028.c4
-rw-r--r--drivers/rtc/rtc-rx6110.c7
-rw-r--r--drivers/rtc/rtc-s5m.c6
-rw-r--r--drivers/rtc/rtc-spear.c6
-rw-r--r--drivers/rtc/rtc-tps65910.c1
-rw-r--r--drivers/rtc/sysfs.c2
-rw-r--r--drivers/scsi/cxlflash/main.c3
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c55
-rw-r--r--drivers/soc/tegra/pmc.c4
-rw-r--r--drivers/spi/spi-imx.c5
-rw-r--r--drivers/staging/media/tegra-video/vi.c6
-rw-r--r--drivers/thermal/amlogic_thermal.c4
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c1
-rw-r--r--drivers/thermal/cpufreq_cooling.c49
-rw-r--r--drivers/thermal/cpuidle_cooling.c37
-rw-r--r--drivers/thermal/devfreq_cooling.c25
-rw-r--r--drivers/thermal/gov_fair_share.c11
-rw-r--r--drivers/thermal/gov_power_allocator.c32
-rw-r--r--drivers/thermal/hisi_thermal.c10
-rw-r--r--drivers/thermal/intel/Kconfig11
-rw-r--r--drivers/thermal/intel/Makefile1
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c129
-rw-r--r--drivers/thermal/mtk_thermal.c12
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c91
-rw-r--r--drivers/thermal/qcom/tsens-8960.c235
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c98
-rw-r--r--drivers/thermal/qcom/tsens-v1.c4
-rw-r--r--drivers/thermal/qcom/tsens.c165
-rw-r--r--drivers/thermal/qcom/tsens.h6
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c3
-rw-r--r--drivers/thermal/sun8i_thermal.c4
-rw-r--r--drivers/thermal/tegra/soctherm.c15
-rw-r--r--drivers/thermal/thermal_core.c57
-rw-r--r--drivers/thermal/thermal_core.h1
-rw-r--r--drivers/thermal/thermal_helpers.c27
-rw-r--r--drivers/thermal/thermal_mmio.c5
-rw-r--r--drivers/thermal/thermal_of.c7
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c37
-rw-r--r--drivers/vdpa/Kconfig15
-rw-r--r--drivers/vdpa/Makefile1
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c24
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h26
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c86
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c85
-rw-r--r--drivers/vdpa/vdpa.c12
-rw-r--r--drivers/vdpa/vdpa_sim/Makefile1
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c127
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h2
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c338
-rw-r--r--drivers/vdpa/virtio_pci/Makefile2
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c484
-rw-r--r--drivers/vfio/vfio_iommu_type1.c31
-rw-r--r--drivers/vhost/vdpa.c26
-rw-r--r--drivers/vhost/vringh.c69
-rw-r--r--drivers/video/fbdev/efifb.c6
-rw-r--r--drivers/virt/nitro_enclaves/ne_misc_dev.c43
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c27
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c67
-rw-r--r--drivers/xen/swiotlb-xen.c182
879 files changed, 37849 insertions, 12966 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 8f3fee8281ad..5a6d613e868d 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_DMADEVICES) += dma/
obj-y += soc/
obj-$(CONFIG_VIRTIO) += virtio/
+obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio/
obj-$(CONFIG_VDPA) += vdpa/
obj-$(CONFIG_XEN) += xen/
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 2494138a6905..3912a1f6058e 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -968,15 +968,16 @@ static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
static void iort_named_component_init(struct device *dev,
struct acpi_iort_node *node)
{
+ struct property_entry props[2] = {};
struct acpi_iort_named_component *nc;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-
- if (!fwspec)
- return;
nc = (struct acpi_iort_named_component *)node->node_data;
- fwspec->num_pasid_bits = FIELD_GET(ACPI_IORT_NC_PASID_BITS,
- nc->node_flags);
+ props[0] = PROPERTY_ENTRY_U32("pasid-num-bits",
+ FIELD_GET(ACPI_IORT_NC_PASID_BITS,
+ nc->node_flags));
+
+ if (device_add_properties(dev, props))
+ dev_warn(dev, "Could not add device properties\n");
}
static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 95f23acd5b80..53cab975f612 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -116,6 +116,13 @@ static struct mcfg_fixup mcfg_quirks[] = {
THUNDER_ECAM_QUIRK(2, 12),
THUNDER_ECAM_QUIRK(2, 13),
+ { "NVIDIA", "TEGRA194", 1, 0, MCFG_BUS_ANY, &tegra194_pcie_ops},
+ { "NVIDIA", "TEGRA194", 1, 1, MCFG_BUS_ANY, &tegra194_pcie_ops},
+ { "NVIDIA", "TEGRA194", 1, 2, MCFG_BUS_ANY, &tegra194_pcie_ops},
+ { "NVIDIA", "TEGRA194", 1, 3, MCFG_BUS_ANY, &tegra194_pcie_ops},
+ { "NVIDIA", "TEGRA194", 1, 4, MCFG_BUS_ANY, &tegra194_pcie_ops},
+ { "NVIDIA", "TEGRA194", 1, 5, MCFG_BUS_ANY, &tegra194_pcie_ops},
+
#define XGENE_V1_ECAM_MCFG(rev, seg) \
{"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \
&xgene_v1_pcie_ecam_ops }
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 5b32df5d33ad..6e9c5ade4c2e 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -86,7 +86,8 @@ struct brcm_ahci_priv {
u32 port_mask;
u32 quirks;
enum brcm_ahci_version version;
- struct reset_control *rcdev;
+ struct reset_control *rcdev_rescal;
+ struct reset_control *rcdev_ahci;
};
static inline u32 brcm_sata_readreg(void __iomem *addr)
@@ -352,8 +353,8 @@ static int brcm_ahci_suspend(struct device *dev)
else
ret = 0;
- if (priv->version != BRCM_SATA_BCM7216)
- reset_control_assert(priv->rcdev);
+ reset_control_assert(priv->rcdev_ahci);
+ reset_control_rearm(priv->rcdev_rescal);
return ret;
}
@@ -365,10 +366,10 @@ static int __maybe_unused brcm_ahci_resume(struct device *dev)
struct brcm_ahci_priv *priv = hpriv->plat_data;
int ret = 0;
- if (priv->version == BRCM_SATA_BCM7216)
- ret = reset_control_reset(priv->rcdev);
- else
- ret = reset_control_deassert(priv->rcdev);
+ ret = reset_control_deassert(priv->rcdev_ahci);
+ if (ret)
+ return ret;
+ ret = reset_control_reset(priv->rcdev_rescal);
if (ret)
return ret;
@@ -434,7 +435,6 @@ static int brcm_ahci_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
- const char *reset_name = NULL;
struct brcm_ahci_priv *priv;
struct ahci_host_priv *hpriv;
struct resource *res;
@@ -456,15 +456,15 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
- /* Reset is optional depending on platform and named differently */
- if (priv->version == BRCM_SATA_BCM7216)
- reset_name = "rescal";
- else
- reset_name = "ahci";
-
- priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name);
- if (IS_ERR(priv->rcdev))
- return PTR_ERR(priv->rcdev);
+ if (priv->version == BRCM_SATA_BCM7216) {
+ priv->rcdev_rescal = devm_reset_control_get_optional_shared(
+ &pdev->dev, "rescal");
+ if (IS_ERR(priv->rcdev_rescal))
+ return PTR_ERR(priv->rcdev_rescal);
+ }
+ priv->rcdev_ahci = devm_reset_control_get_optional(&pdev->dev, "ahci");
+ if (IS_ERR(priv->rcdev_ahci))
+ return PTR_ERR(priv->rcdev_ahci);
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
@@ -485,10 +485,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
break;
}
- if (priv->version == BRCM_SATA_BCM7216)
- ret = reset_control_reset(priv->rcdev);
- else
- ret = reset_control_deassert(priv->rcdev);
+ ret = reset_control_reset(priv->rcdev_rescal);
+ if (ret)
+ return ret;
+ ret = reset_control_deassert(priv->rcdev_ahci);
if (ret)
return ret;
@@ -539,8 +539,8 @@ out_disable_regulators:
out_disable_clks:
ahci_platform_disable_clks(hpriv);
out_reset:
- if (priv->version != BRCM_SATA_BCM7216)
- reset_control_assert(priv->rcdev);
+ reset_control_assert(priv->rcdev_ahci);
+ reset_control_rearm(priv->rcdev_rescal);
return ret;
}
diff --git a/drivers/clk/sifive/Kconfig b/drivers/clk/sifive/Kconfig
index 1c14eb20c066..9132c3c4aa86 100644
--- a/drivers/clk/sifive/Kconfig
+++ b/drivers/clk/sifive/Kconfig
@@ -10,6 +10,8 @@ if CLK_SIFIVE
config CLK_SIFIVE_PRCI
bool "PRCI driver for SiFive SoCs"
+ select RESET_CONTROLLER
+ select RESET_SIMPLE
select CLK_ANALOGBITS_WRPLL_CLN28HPC
help
Supports the Power Reset Clock interface (PRCI) IP block found in
diff --git a/drivers/clk/sifive/fu740-prci.c b/drivers/clk/sifive/fu740-prci.c
index 764d1097aa51..53f6e00a03b9 100644
--- a/drivers/clk/sifive/fu740-prci.c
+++ b/drivers/clk/sifive/fu740-prci.c
@@ -72,6 +72,12 @@ static const struct clk_ops sifive_fu740_prci_hfpclkplldiv_clk_ops = {
.recalc_rate = sifive_prci_hfpclkplldiv_recalc_rate,
};
+static const struct clk_ops sifive_fu740_prci_pcie_aux_clk_ops = {
+ .enable = sifive_prci_pcie_aux_clock_enable,
+ .disable = sifive_prci_pcie_aux_clock_disable,
+ .is_enabled = sifive_prci_pcie_aux_clock_is_enabled,
+};
+
/* List of clock controls provided by the PRCI */
struct __prci_clock __prci_init_clocks_fu740[] = {
[PRCI_CLK_COREPLL] = {
@@ -120,4 +126,9 @@ struct __prci_clock __prci_init_clocks_fu740[] = {
.parent_name = "hfpclkpll",
.ops = &sifive_fu740_prci_hfpclkplldiv_clk_ops,
},
+ [PRCI_CLK_PCIE_AUX] = {
+ .name = "pcie_aux",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu740_prci_pcie_aux_clk_ops,
+ },
};
diff --git a/drivers/clk/sifive/fu740-prci.h b/drivers/clk/sifive/fu740-prci.h
index 13ef971f7764..511a0bf7ba2b 100644
--- a/drivers/clk/sifive/fu740-prci.h
+++ b/drivers/clk/sifive/fu740-prci.h
@@ -9,7 +9,7 @@
#include "sifive-prci.h"
-#define NUM_CLOCK_FU740 8
+#define NUM_CLOCK_FU740 9
extern struct __prci_clock __prci_init_clocks_fu740[NUM_CLOCK_FU740];
diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
index 1490b01ce629..0d79ba31a793 100644
--- a/drivers/clk/sifive/sifive-prci.c
+++ b/drivers/clk/sifive/sifive-prci.c
@@ -453,6 +453,47 @@ void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd)
r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
}
+/* PCIE AUX clock APIs for enable, disable. */
+int sifive_prci_pcie_aux_clock_is_enabled(struct clk_hw *hw)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_data *pd = pc->pd;
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET);
+
+ if (r & PRCI_PCIE_AUX_EN_MASK)
+ return 1;
+ else
+ return 0;
+}
+
+int sifive_prci_pcie_aux_clock_enable(struct clk_hw *hw)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_data *pd = pc->pd;
+ u32 r __maybe_unused;
+
+ if (sifive_prci_pcie_aux_clock_is_enabled(hw))
+ return 0;
+
+ __prci_writel(1, PRCI_PCIE_AUX_OFFSET, pd);
+ r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */
+
+ return 0;
+}
+
+void sifive_prci_pcie_aux_clock_disable(struct clk_hw *hw)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_data *pd = pc->pd;
+ u32 r __maybe_unused;
+
+ __prci_writel(0, PRCI_PCIE_AUX_OFFSET, pd);
+ r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */
+
+}
+
/**
* __prci_register_clocks() - register clock controls in the PRCI
* @dev: Linux struct device
@@ -547,6 +588,19 @@ static int sifive_prci_probe(struct platform_device *pdev)
if (IS_ERR(pd->va))
return PTR_ERR(pd->va);
+ pd->reset.rcdev.owner = THIS_MODULE;
+ pd->reset.rcdev.nr_resets = PRCI_RST_NR;
+ pd->reset.rcdev.ops = &reset_simple_ops;
+ pd->reset.rcdev.of_node = pdev->dev.of_node;
+ pd->reset.active_low = true;
+ pd->reset.membase = pd->va + PRCI_DEVICESRESETREG_OFFSET;
+ spin_lock_init(&pd->reset.lock);
+
+ r = devm_reset_controller_register(&pdev->dev, &pd->reset.rcdev);
+ if (r) {
+ dev_err(dev, "could not register reset controller: %d\n", r);
+ return r;
+ }
r = __prci_register_clocks(dev, pd, desc);
if (r) {
dev_err(dev, "could not register clocks: %d\n", r);
diff --git a/drivers/clk/sifive/sifive-prci.h b/drivers/clk/sifive/sifive-prci.h
index dbdbd1722688..91658a88af4e 100644
--- a/drivers/clk/sifive/sifive-prci.h
+++ b/drivers/clk/sifive/sifive-prci.h
@@ -11,6 +11,7 @@
#include <linux/clk/analogbits-wrpll-cln28hpc.h>
#include <linux/clk-provider.h>
+#include <linux/reset/reset-simple.h>
#include <linux/platform_device.h>
/*
@@ -67,6 +68,11 @@
#define PRCI_DDRPLLCFG1_CKE_SHIFT 31
#define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
+/* PCIEAUX */
+#define PRCI_PCIE_AUX_OFFSET 0x14
+#define PRCI_PCIE_AUX_EN_SHIFT 0
+#define PRCI_PCIE_AUX_EN_MASK (0x1 << PRCI_PCIE_AUX_EN_SHIFT)
+
/* GEMGXLPLLCFG0 */
#define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
#define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
@@ -116,6 +122,8 @@
#define PRCI_DEVICESRESETREG_CHIPLINK_RST_N_MASK \
(0x1 << PRCI_DEVICESRESETREG_CHIPLINK_RST_N_SHIFT)
+#define PRCI_RST_NR 7
+
/* CLKMUXSTATUSREG */
#define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
@@ -216,6 +224,7 @@
*/
struct __prci_data {
void __iomem *va;
+ struct reset_simple_data reset;
struct clk_hw_onecell_data hw_clks;
};
@@ -296,4 +305,8 @@ unsigned long sifive_prci_tlclksel_recalc_rate(struct clk_hw *hw,
unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate);
+int sifive_prci_pcie_aux_clock_is_enabled(struct clk_hw *hw);
+int sifive_prci_pcie_aux_clock_enable(struct clk_hw *hw);
+void sifive_prci_pcie_aux_clock_disable(struct clk_hw *hw);
+
#endif /* __SIFIVE_CLK_SIFIVE_PRCI_H */
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 4fb1f4da27ec..fe1a82627d57 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -16,6 +16,7 @@
#include <linux/cpu_pm.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
+#include <linux/clocksource_ids.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
@@ -24,6 +25,8 @@
#include <linux/sched/clock.h>
#include <linux/sched_clock.h>
#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/ptp_kvm.h>
#include <asm/arch_timer.h>
#include <asm/virt.h>
@@ -200,6 +203,7 @@ static u64 arch_counter_read_cc(const struct cyclecounter *cc)
static struct clocksource clocksource_counter = {
.name = "arch_sys_counter",
+ .id = CSID_ARM_ARCH_COUNTER,
.rating = 400,
.read = arch_counter_read,
.mask = CLOCKSOURCE_MASK(56),
@@ -1676,3 +1680,35 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
}
TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
#endif
+
+int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts,
+ struct clocksource **cs)
+{
+ struct arm_smccc_res hvc_res;
+ u32 ptp_counter;
+ ktime_t ktime;
+
+ if (!IS_ENABLED(CONFIG_HAVE_ARM_SMCCC_DISCOVERY))
+ return -EOPNOTSUPP;
+
+ if (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
+ ptp_counter = KVM_PTP_VIRT_COUNTER;
+ else
+ ptp_counter = KVM_PTP_PHYS_COUNTER;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID,
+ ptp_counter, &hvc_res);
+
+ if ((int)(hvc_res.a0) < 0)
+ return -EOPNOTSUPP;
+
+ ktime = (u64)hvc_res.a0 << 32 | hvc_res.a1;
+ *ts = ktime_to_timespec64(ktime);
+ if (cycle)
+ *cycle = (u64)hvc_res.a2 << 32 | hvc_res.a3;
+ if (cs)
+ *cs = &clocksource_counter;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_arch_ptp_get_crosststamp);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index da3872c48308..3506b2050fb8 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -130,6 +130,7 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report);
+ case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel);
default: return 0;
}
@@ -142,6 +143,7 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
struct sev_device *sev;
unsigned int phys_lsb, phys_msb;
unsigned int reg, ret = 0;
+ int buf_len;
if (!psp || !psp->sev_data)
return -ENODEV;
@@ -151,15 +153,27 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
sev = psp->sev_data;
+ buf_len = sev_cmd_buffer_len(cmd);
+ if (WARN_ON_ONCE(!data != !buf_len))
+ return -EINVAL;
+
+ /*
+ * Copy the incoming data to driver's scratch buffer as __pa() will not
+ * work for some memory, e.g. vmalloc'd addresses, and @data may not be
+ * physically contiguous.
+ */
+ if (data)
+ memcpy(sev->cmd_buf, data, buf_len);
+
/* Get the physical address of the command buffer */
- phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
- phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
+ phys_lsb = data ? lower_32_bits(__psp_pa(sev->cmd_buf)) : 0;
+ phys_msb = data ? upper_32_bits(__psp_pa(sev->cmd_buf)) : 0;
dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
cmd, phys_msb, phys_lsb, psp_timeout);
print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- sev_cmd_buffer_len(cmd), false);
+ buf_len, false);
iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
@@ -195,7 +209,14 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
}
print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- sev_cmd_buffer_len(cmd), false);
+ buf_len, false);
+
+ /*
+ * Copy potential output from the PSP back to data. Do this even on
+ * failure in case the caller wants to glean something from the error.
+ */
+ if (data)
+ memcpy(data, sev->cmd_buf, buf_len);
return ret;
}
@@ -214,6 +235,7 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret)
static int __sev_platform_init_locked(int *error)
{
struct psp_device *psp = psp_master;
+ struct sev_data_init data;
struct sev_device *sev;
int rc = 0;
@@ -225,6 +247,7 @@ static int __sev_platform_init_locked(int *error)
if (sev->state == SEV_STATE_INIT)
return 0;
+ memset(&data, 0, sizeof(data));
if (sev_es_tmr) {
u64 tmr_pa;
@@ -234,12 +257,12 @@ static int __sev_platform_init_locked(int *error)
*/
tmr_pa = __pa(sev_es_tmr);
- sev->init_cmd_buf.flags |= SEV_INIT_FLAGS_SEV_ES;
- sev->init_cmd_buf.tmr_address = tmr_pa;
- sev->init_cmd_buf.tmr_len = SEV_ES_TMR_SIZE;
+ data.flags |= SEV_INIT_FLAGS_SEV_ES;
+ data.tmr_address = tmr_pa;
+ data.tmr_len = SEV_ES_TMR_SIZE;
}
- rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error);
+ rc = __sev_do_cmd_locked(SEV_CMD_INIT, &data, error);
if (rc)
return rc;
@@ -296,15 +319,14 @@ static int sev_platform_shutdown(int *error)
static int sev_get_platform_state(int *state, int *error)
{
- struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_status data;
int rc;
- rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
- &sev->status_cmd_buf, error);
+ rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, error);
if (rc)
return rc;
- *state = sev->status_cmd_buf.state;
+ *state = data.state;
return rc;
}
@@ -342,15 +364,14 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
{
- struct sev_device *sev = psp_master->sev_data;
- struct sev_user_data_status *data = &sev->status_cmd_buf;
+ struct sev_user_data_status data;
int ret;
- ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error);
if (ret)
return ret;
- if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
+ if (copy_to_user((void __user *)argp->data, &data, sizeof(data)))
ret = -EFAULT;
return ret;
@@ -377,7 +398,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_csr input;
- struct sev_data_pek_csr *data;
+ struct sev_data_pek_csr data;
void __user *input_address;
void *blob = NULL;
int ret;
@@ -388,9 +409,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ memset(&data, 0, sizeof(data));
/* userspace wants to query CSR length */
if (!input.address || !input.length)
@@ -398,19 +417,15 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
/* allocate a physically contiguous buffer to store the CSR blob */
input_address = (void __user *)input.address;
- if (input.length > SEV_FW_BLOB_MAX_SIZE) {
- ret = -EFAULT;
- goto e_free;
- }
+ if (input.length > SEV_FW_BLOB_MAX_SIZE)
+ return -EFAULT;
blob = kmalloc(input.length, GFP_KERNEL);
- if (!blob) {
- ret = -ENOMEM;
- goto e_free;
- }
+ if (!blob)
+ return -ENOMEM;
- data->address = __psp_pa(blob);
- data->len = input.length;
+ data.address = __psp_pa(blob);
+ data.len = input.length;
cmd:
if (sev->state == SEV_STATE_UNINIT) {
@@ -419,10 +434,10 @@ cmd:
goto e_free_blob;
}
- ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, &data, &argp->error);
/* If we query the CSR length, FW responded with expected data. */
- input.length = data->len;
+ input.length = data.len;
if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
ret = -EFAULT;
@@ -436,8 +451,6 @@ cmd:
e_free_blob:
kfree(blob);
-e_free:
- kfree(data);
return ret;
}
@@ -457,21 +470,20 @@ EXPORT_SYMBOL_GPL(psp_copy_user_blob);
static int sev_get_api_version(void)
{
struct sev_device *sev = psp_master->sev_data;
- struct sev_user_data_status *status;
+ struct sev_user_data_status status;
int error = 0, ret;
- status = &sev->status_cmd_buf;
- ret = sev_platform_status(status, &error);
+ ret = sev_platform_status(&status, &error);
if (ret) {
dev_err(sev->dev,
"SEV: failed to get status. Error: %#x\n", error);
return 1;
}
- sev->api_major = status->api_major;
- sev->api_minor = status->api_minor;
- sev->build = status->build;
- sev->state = status->state;
+ sev->api_major = status.api_major;
+ sev->api_minor = status.api_minor;
+ sev->build = status.build;
+ sev->state = status.state;
return 0;
}
@@ -569,7 +581,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_cert_import input;
- struct sev_data_pek_cert_import *data;
+ struct sev_data_pek_cert_import data;
void *pek_blob, *oca_blob;
int ret;
@@ -579,19 +591,14 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
/* copy PEK certificate blobs from userspace */
pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
- if (IS_ERR(pek_blob)) {
- ret = PTR_ERR(pek_blob);
- goto e_free;
- }
+ if (IS_ERR(pek_blob))
+ return PTR_ERR(pek_blob);
- data->pek_cert_address = __psp_pa(pek_blob);
- data->pek_cert_len = input.pek_cert_len;
+ data.reserved = 0;
+ data.pek_cert_address = __psp_pa(pek_blob);
+ data.pek_cert_len = input.pek_cert_len;
/* copy PEK certificate blobs from userspace */
oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
@@ -600,8 +607,8 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
goto e_free_pek;
}
- data->oca_cert_address = __psp_pa(oca_blob);
- data->oca_cert_len = input.oca_cert_len;
+ data.oca_cert_address = __psp_pa(oca_blob);
+ data.oca_cert_len = input.oca_cert_len;
/* If platform is not in INIT state then transition it to INIT */
if (sev->state != SEV_STATE_INIT) {
@@ -610,21 +617,19 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
goto e_free_oca;
}
- ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error);
e_free_oca:
kfree(oca_blob);
e_free_pek:
kfree(pek_blob);
-e_free:
- kfree(data);
return ret;
}
static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
{
struct sev_user_data_get_id2 input;
- struct sev_data_get_id *data;
+ struct sev_data_get_id data;
void __user *input_address;
void *id_blob = NULL;
int ret;
@@ -638,28 +643,25 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
input_address = (void __user *)input.address;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
if (input.address && input.length) {
id_blob = kmalloc(input.length, GFP_KERNEL);
- if (!id_blob) {
- kfree(data);
+ if (!id_blob)
return -ENOMEM;
- }
- data->address = __psp_pa(id_blob);
- data->len = input.length;
+ data.address = __psp_pa(id_blob);
+ data.len = input.length;
+ } else {
+ data.address = 0;
+ data.len = 0;
}
- ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, &data, &argp->error);
/*
* Firmware will return the length of the ID value (either the minimum
* required length or the actual length written), return it to the user.
*/
- input.length = data->len;
+ input.length = data.len;
if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
ret = -EFAULT;
@@ -667,7 +669,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
}
if (id_blob) {
- if (copy_to_user(input_address, id_blob, data->len)) {
+ if (copy_to_user(input_address, id_blob, data.len)) {
ret = -EFAULT;
goto e_free;
}
@@ -675,7 +677,6 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
e_free:
kfree(id_blob);
- kfree(data);
return ret;
}
@@ -725,7 +726,7 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pdh_cert_export input;
void *pdh_blob = NULL, *cert_blob = NULL;
- struct sev_data_pdh_cert_export *data;
+ struct sev_data_pdh_cert_export data;
void __user *input_cert_chain_address;
void __user *input_pdh_cert_address;
int ret;
@@ -743,9 +744,7 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ memset(&data, 0, sizeof(data));
/* Userspace wants to query the certificate length. */
if (!input.pdh_cert_address ||
@@ -757,25 +756,19 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
input_cert_chain_address = (void __user *)input.cert_chain_address;
/* Allocate a physically contiguous buffer to store the PDH blob. */
- if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) {
- ret = -EFAULT;
- goto e_free;
- }
+ if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE)
+ return -EFAULT;
/* Allocate a physically contiguous buffer to store the cert chain blob. */
- if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) {
- ret = -EFAULT;
- goto e_free;
- }
+ if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE)
+ return -EFAULT;
pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
- if (!pdh_blob) {
- ret = -ENOMEM;
- goto e_free;
- }
+ if (!pdh_blob)
+ return -ENOMEM;
- data->pdh_cert_address = __psp_pa(pdh_blob);
- data->pdh_cert_len = input.pdh_cert_len;
+ data.pdh_cert_address = __psp_pa(pdh_blob);
+ data.pdh_cert_len = input.pdh_cert_len;
cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
if (!cert_blob) {
@@ -783,15 +776,15 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
goto e_free_pdh;
}
- data->cert_chain_address = __psp_pa(cert_blob);
- data->cert_chain_len = input.cert_chain_len;
+ data.cert_chain_address = __psp_pa(cert_blob);
+ data.cert_chain_len = input.cert_chain_len;
cmd:
- ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error);
/* If we query the length, FW responded with expected data. */
- input.cert_chain_len = data->cert_chain_len;
- input.pdh_cert_len = data->pdh_cert_len;
+ input.cert_chain_len = data.cert_chain_len;
+ input.pdh_cert_len = data.pdh_cert_len;
if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
ret = -EFAULT;
@@ -816,8 +809,6 @@ e_free_cert:
kfree(cert_blob);
e_free_pdh:
kfree(pdh_blob);
-e_free:
- kfree(data);
return ret;
}
@@ -982,6 +973,10 @@ int sev_dev_init(struct psp_device *psp)
if (!sev)
goto e_err;
+ sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
+ if (!sev->cmd_buf)
+ goto e_sev;
+
psp->sev_data = sev;
sev->dev = dev;
@@ -993,7 +988,7 @@ int sev_dev_init(struct psp_device *psp)
if (!sev->vdata) {
ret = -ENODEV;
dev_err(dev, "sev: missing driver data\n");
- goto e_err;
+ goto e_buf;
}
psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
@@ -1008,6 +1003,10 @@ int sev_dev_init(struct psp_device *psp)
e_irq:
psp_clear_sev_irq_handler(psp);
+e_buf:
+ devm_free_pages(dev, (unsigned long)sev->cmd_buf);
+e_sev:
+ devm_kfree(dev, sev);
e_err:
psp->sev_data = NULL;
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
index dd5c4fe82914..666c21eb81ab 100644
--- a/drivers/crypto/ccp/sev-dev.h
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -46,12 +46,12 @@ struct sev_device {
unsigned int int_rcvd;
wait_queue_head_t int_queue;
struct sev_misc_dev *misc;
- struct sev_user_data_status status_cmd_buf;
- struct sev_data_init init_cmd_buf;
u8 api_major;
u8 api_minor;
u8 build;
+
+ void *cmd_buf;
};
int sev_dev_init(struct psp_device *psp);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a0836ffc22e0..6ab9d9a488a6 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,18 @@ config INTEL_IDXD_SVM
depends on PCI_PASID
depends on PCI_IOV
+config INTEL_IDXD_PERFMON
+ bool "Intel Data Accelerators performance monitor support"
+ depends on INTEL_IDXD
+ help
+ Enable performance monitor (pmu) support for the Intel(R)
+ data accelerators present in Intel Xeon CPU. With this
+ enabled, perf can be used to monitor the DSA (Intel Data
+ Streaming Accelerator) events described in the Intel DSA
+ spec.
+
+ If unsure, say N.
+
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86_64
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index fe45ad5d06c4..64a52bf4d737 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -344,17 +344,6 @@ static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
}
-static inline int at_xdmac_csize(u32 maxburst)
-{
- int csize;
-
- csize = ffs(maxburst) - 1;
- if (csize > 4)
- csize = -EINVAL;
-
- return csize;
-};
-
static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
{
return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 08d71dafa001..53289927dd0d 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -81,8 +81,13 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
* - Even chunks originate CB equal to 1
*/
chunk->cb = !(desc->chunks_alloc % 2);
- chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off;
- chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off;
+ if (chan->dir == EDMA_DIR_WRITE) {
+ chunk->ll_region.paddr = dw->ll_region_wr[chan->id].paddr;
+ chunk->ll_region.vaddr = dw->ll_region_wr[chan->id].vaddr;
+ } else {
+ chunk->ll_region.paddr = dw->ll_region_rd[chan->id].paddr;
+ chunk->ll_region.vaddr = dw->ll_region_rd[chan->id].vaddr;
+ }
if (desc->chunk) {
/* Create and add new element into the linked list */
@@ -329,22 +334,22 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
struct dw_edma_chunk *chunk;
struct dw_edma_burst *burst;
struct dw_edma_desc *desc;
- u32 cnt;
+ u32 cnt = 0;
int i;
if (!chan->configured)
return NULL;
switch (chan->config.direction) {
- case DMA_DEV_TO_MEM: /* local dma */
+ case DMA_DEV_TO_MEM: /* local DMA */
if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
break;
return NULL;
- case DMA_MEM_TO_DEV: /* local dma */
+ case DMA_MEM_TO_DEV: /* local DMA */
if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
break;
return NULL;
- default: /* remote dma */
+ default: /* remote DMA */
if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
break;
if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
@@ -352,12 +357,19 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
return NULL;
}
- if (xfer->cyclic) {
+ if (xfer->type == EDMA_XFER_CYCLIC) {
if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
return NULL;
- } else {
+ } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
if (xfer->xfer.sg.len < 1)
return NULL;
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ if (!xfer->xfer.il->numf)
+ return NULL;
+ if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0)
+ return NULL;
+ } else {
+ return NULL;
}
desc = dw_edma_alloc_desc(chan);
@@ -368,18 +380,28 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (unlikely(!chunk))
goto err_alloc;
- src_addr = chan->config.src_addr;
- dst_addr = chan->config.dst_addr;
+ if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ src_addr = xfer->xfer.il->src_start;
+ dst_addr = xfer->xfer.il->dst_start;
+ } else {
+ src_addr = chan->config.src_addr;
+ dst_addr = chan->config.dst_addr;
+ }
- if (xfer->cyclic) {
+ if (xfer->type == EDMA_XFER_CYCLIC) {
cnt = xfer->xfer.cyclic.cnt;
- } else {
+ } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
cnt = xfer->xfer.sg.len;
sg = xfer->xfer.sg.sgl;
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ if (xfer->xfer.il->numf > 0)
+ cnt = xfer->xfer.il->numf;
+ else
+ cnt = xfer->xfer.il->frame_size;
}
for (i = 0; i < cnt; i++) {
- if (!xfer->cyclic && !sg)
+ if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
break;
if (chunk->bursts_alloc == chan->ll_max) {
@@ -392,20 +414,23 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (unlikely(!burst))
goto err_alloc;
- if (xfer->cyclic)
+ if (xfer->type == EDMA_XFER_CYCLIC)
burst->sz = xfer->xfer.cyclic.len;
- else
+ else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
burst->sz = sg_dma_len(sg);
+ else if (xfer->type == EDMA_XFER_INTERLEAVED)
+ burst->sz = xfer->xfer.il->sgl[i].size;
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
if (chan->dir == EDMA_DIR_WRITE) {
burst->sar = src_addr;
- if (xfer->cyclic) {
+ if (xfer->type == EDMA_XFER_CYCLIC) {
burst->dar = xfer->xfer.cyclic.paddr;
- } else {
- burst->dar = dst_addr;
+ } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
+ src_addr += sg_dma_len(sg);
+ burst->dar = sg_dma_address(sg);
/* Unlike the typical assumption by other
* drivers/IPs the peripheral memory isn't
* a FIFO memory, in this case, it's a
@@ -416,10 +441,11 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
}
} else {
burst->dar = dst_addr;
- if (xfer->cyclic) {
+ if (xfer->type == EDMA_XFER_CYCLIC) {
burst->sar = xfer->xfer.cyclic.paddr;
- } else {
- burst->sar = src_addr;
+ } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
+ dst_addr += sg_dma_len(sg);
+ burst->sar = sg_dma_address(sg);
/* Unlike the typical assumption by other
* drivers/IPs the peripheral memory isn't
* a FIFO memory, in this case, it's a
@@ -430,10 +456,22 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
}
}
- if (!xfer->cyclic) {
- src_addr += sg_dma_len(sg);
- dst_addr += sg_dma_len(sg);
+ if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
sg = sg_next(sg);
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED &&
+ xfer->xfer.il->frame_size > 0) {
+ struct dma_interleaved_template *il = xfer->xfer.il;
+ struct data_chunk *dc = &il->sgl[i];
+
+ if (il->src_sgl) {
+ src_addr += burst->sz;
+ src_addr += dmaengine_get_src_icg(il, dc);
+ }
+
+ if (il->dst_sgl) {
+ dst_addr += burst->sz;
+ dst_addr += dmaengine_get_dst_icg(il, dc);
+ }
}
}
@@ -459,7 +497,7 @@ dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
xfer.xfer.sg.sgl = sgl;
xfer.xfer.sg.len = len;
xfer.flags = flags;
- xfer.cyclic = false;
+ xfer.type = EDMA_XFER_SCATTER_GATHER;
return dw_edma_device_transfer(&xfer);
}
@@ -478,7 +516,23 @@ dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
xfer.xfer.cyclic.len = len;
xfer.xfer.cyclic.cnt = count;
xfer.flags = flags;
- xfer.cyclic = true;
+ xfer.type = EDMA_XFER_CYCLIC;
+
+ return dw_edma_device_transfer(&xfer);
+}
+
+static struct dma_async_tx_descriptor *
+dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
+ struct dma_interleaved_template *ilt,
+ unsigned long flags)
+{
+ struct dw_edma_transfer xfer;
+
+ xfer.dchan = dchan;
+ xfer.direction = ilt->dir;
+ xfer.xfer.il = ilt;
+ xfer.flags = flags;
+ xfer.type = EDMA_XFER_INTERLEAVED;
return dw_edma_device_transfer(&xfer);
}
@@ -642,24 +696,13 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
struct device *dev = chip->dev;
struct dw_edma *dw = chip->dw;
struct dw_edma_chan *chan;
- size_t ll_chunk, dt_chunk;
struct dw_edma_irq *irq;
struct dma_device *dma;
- u32 i, j, cnt, ch_cnt;
u32 alloc, off_alloc;
+ u32 i, j, cnt;
int err = 0;
u32 pos;
- ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
- ll_chunk = dw->ll_region.sz;
- dt_chunk = dw->dt_region.sz;
-
- /* Calculate linked list chunk for each channel */
- ll_chunk /= roundup_pow_of_two(ch_cnt);
-
- /* Calculate linked list chunk for each channel */
- dt_chunk /= roundup_pow_of_two(ch_cnt);
-
if (write) {
i = 0;
cnt = dw->wr_ch_cnt;
@@ -691,14 +734,14 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
chan->request = EDMA_REQ_NONE;
chan->status = EDMA_ST_IDLE;
- chan->ll_off = (ll_chunk * i);
- chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1;
-
- chan->dt_off = (dt_chunk * i);
+ if (write)
+ chan->ll_max = (dw->ll_region_wr[j].sz / EDMA_LL_SZ);
+ else
+ chan->ll_max = (dw->ll_region_rd[j].sz / EDMA_LL_SZ);
+ chan->ll_max -= 1;
- dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n",
- write ? "write" : "read", j,
- chan->ll_off, chan->ll_max);
+ dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
+ write ? "write" : "read", j, chan->ll_max);
if (dw->nr_irqs == 1)
pos = 0;
@@ -723,12 +766,15 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
chan->vc.desc_free = vchan_free_desc;
vchan_init(&chan->vc, dma);
- dt_region->paddr = dw->dt_region.paddr + chan->dt_off;
- dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off;
- dt_region->sz = dt_chunk;
-
- dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n",
- write ? "write" : "read", j, chan->dt_off);
+ if (write) {
+ dt_region->paddr = dw->dt_region_wr[j].paddr;
+ dt_region->vaddr = dw->dt_region_wr[j].vaddr;
+ dt_region->sz = dw->dt_region_wr[j].sz;
+ } else {
+ dt_region->paddr = dw->dt_region_rd[j].paddr;
+ dt_region->vaddr = dw->dt_region_rd[j].vaddr;
+ dt_region->sz = dw->dt_region_rd[j].sz;
+ }
dw_edma_v0_core_device_config(chan);
}
@@ -738,6 +784,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
dma_cap_set(DMA_SLAVE, dma->cap_mask);
dma_cap_set(DMA_CYCLIC, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
@@ -756,6 +803,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
dma->device_tx_status = dw_edma_device_tx_status;
dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
+ dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma;
dma_set_max_seg_size(dma->dev, U32_MAX);
@@ -863,14 +911,15 @@ int dw_edma_probe(struct dw_edma_chip *chip)
raw_spin_lock_init(&dw->lock);
- /* Find out how many write channels are supported by hardware */
- dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE);
- if (!dw->wr_ch_cnt)
- return -EINVAL;
+ dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt,
+ dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
+ dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
- /* Find out how many read channels are supported by hardware */
- dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ);
- if (!dw->rd_ch_cnt)
+ dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt,
+ dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
+ dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
+
+ if (!dw->wr_ch_cnt && !dw->rd_ch_cnt)
return -EINVAL;
dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
@@ -937,24 +986,23 @@ int dw_edma_remove(struct dw_edma_chip *chip)
/* Power management */
pm_runtime_disable(dev);
+ /* Deregister eDMA device */
+ dma_async_device_unregister(&dw->wr_edma);
list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
vc.chan.device_node) {
- list_del(&chan->vc.chan.device_node);
tasklet_kill(&chan->vc.task);
+ list_del(&chan->vc.chan.device_node);
}
+ dma_async_device_unregister(&dw->rd_edma);
list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
vc.chan.device_node) {
- list_del(&chan->vc.chan.device_node);
tasklet_kill(&chan->vc.task);
+ list_del(&chan->vc.chan.device_node);
}
- /* Deregister eDMA device */
- dma_async_device_unregister(&dw->wr_edma);
- dma_async_device_unregister(&dw->rd_edma);
-
/* Turn debugfs off */
- dw_edma_v0_core_debugfs_off();
+ dw_edma_v0_core_debugfs_off(chip);
return 0;
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index 31fc50d31792..60316d408c3e 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -15,15 +15,18 @@
#include "../virt-dma.h"
#define EDMA_LL_SZ 24
+#define EDMA_MAX_WR_CH 8
+#define EDMA_MAX_RD_CH 8
enum dw_edma_dir {
EDMA_DIR_WRITE = 0,
EDMA_DIR_READ
};
-enum dw_edma_mode {
- EDMA_MODE_LEGACY = 0,
- EDMA_MODE_UNROLL
+enum dw_edma_map_format {
+ EDMA_MF_EDMA_LEGACY = 0x0,
+ EDMA_MF_EDMA_UNROLL = 0x1,
+ EDMA_MF_HDMA_COMPAT = 0x5
};
enum dw_edma_request {
@@ -38,6 +41,12 @@ enum dw_edma_status {
EDMA_ST_BUSY
};
+enum dw_edma_xfer_type {
+ EDMA_XFER_SCATTER_GATHER = 0,
+ EDMA_XFER_CYCLIC,
+ EDMA_XFER_INTERLEAVED
+};
+
struct dw_edma_chan;
struct dw_edma_chunk;
@@ -82,11 +91,8 @@ struct dw_edma_chan {
int id;
enum dw_edma_dir dir;
- off_t ll_off;
u32 ll_max;
- off_t dt_off;
-
struct msi_msg msi;
enum dw_edma_request request;
@@ -117,19 +123,23 @@ struct dw_edma {
u16 rd_ch_cnt;
struct dw_edma_region rg_region; /* Registers */
- struct dw_edma_region ll_region; /* Linked list */
- struct dw_edma_region dt_region; /* Data */
+ struct dw_edma_region ll_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region ll_region_rd[EDMA_MAX_RD_CH];
+ struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH];
struct dw_edma_irq *irq;
int nr_irqs;
- u32 version;
- enum dw_edma_mode mode;
+ enum dw_edma_map_format mf;
struct dw_edma_chan *chan;
const struct dw_edma_core_ops *ops;
raw_spinlock_t lock; /* Only for legacy */
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif /* CONFIG_DEBUG_FS */
};
struct dw_edma_sg {
@@ -146,12 +156,13 @@ struct dw_edma_cyclic {
struct dw_edma_transfer {
struct dma_chan *dchan;
union dw_edma_xfer {
- struct dw_edma_sg sg;
- struct dw_edma_cyclic cyclic;
+ struct dw_edma_sg sg;
+ struct dw_edma_cyclic cyclic;
+ struct dma_interleaved_template *il;
} xfer;
enum dma_transfer_direction direction;
unsigned long flags;
- bool cyclic;
+ enum dw_edma_xfer_type type;
};
static inline
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 1eafc602e17e..44f6e09bdb53 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -13,45 +13,81 @@
#include <linux/dma/edma.h>
#include <linux/pci-epf.h>
#include <linux/msi.h>
+#include <linux/bitfield.h>
#include "dw-edma-core.h"
+#define DW_PCIE_VSEC_DMA_ID 0x6
+#define DW_PCIE_VSEC_DMA_BAR GENMASK(10, 8)
+#define DW_PCIE_VSEC_DMA_MAP GENMASK(2, 0)
+#define DW_PCIE_VSEC_DMA_WR_CH GENMASK(9, 0)
+#define DW_PCIE_VSEC_DMA_RD_CH GENMASK(25, 16)
+
+#define DW_BLOCK(a, b, c) \
+ { \
+ .bar = a, \
+ .off = b, \
+ .sz = c, \
+ },
+
+struct dw_edma_block {
+ enum pci_barno bar;
+ off_t off;
+ size_t sz;
+};
+
struct dw_edma_pcie_data {
/* eDMA registers location */
- enum pci_barno rg_bar;
- off_t rg_off;
- size_t rg_sz;
+ struct dw_edma_block rg;
/* eDMA memory linked list location */
- enum pci_barno ll_bar;
- off_t ll_off;
- size_t ll_sz;
+ struct dw_edma_block ll_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_block ll_rd[EDMA_MAX_RD_CH];
/* eDMA memory data location */
- enum pci_barno dt_bar;
- off_t dt_off;
- size_t dt_sz;
+ struct dw_edma_block dt_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_block dt_rd[EDMA_MAX_RD_CH];
/* Other */
- u32 version;
- enum dw_edma_mode mode;
+ enum dw_edma_map_format mf;
u8 irqs;
+ u16 wr_ch_cnt;
+ u16 rd_ch_cnt;
};
static const struct dw_edma_pcie_data snps_edda_data = {
/* eDMA registers location */
- .rg_bar = BAR_0,
- .rg_off = 0x00001000, /* 4 Kbytes */
- .rg_sz = 0x00002000, /* 8 Kbytes */
+ .rg.bar = BAR_0,
+ .rg.off = 0x00001000, /* 4 Kbytes */
+ .rg.sz = 0x00002000, /* 8 Kbytes */
/* eDMA memory linked list location */
- .ll_bar = BAR_2,
- .ll_off = 0x00000000, /* 0 Kbytes */
- .ll_sz = 0x00800000, /* 8 Mbytes */
+ .ll_wr = {
+ /* Channel 0 - BAR 2, offset 0 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00000000, 0x00000800)
+ /* Channel 1 - BAR 2, offset 2 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00200000, 0x00000800)
+ },
+ .ll_rd = {
+ /* Channel 0 - BAR 2, offset 4 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00400000, 0x00000800)
+ /* Channel 1 - BAR 2, offset 6 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00600000, 0x00000800)
+ },
/* eDMA memory data location */
- .dt_bar = BAR_2,
- .dt_off = 0x00800000, /* 8 Mbytes */
- .dt_sz = 0x03800000, /* 56 Mbytes */
+ .dt_wr = {
+ /* Channel 0 - BAR 2, offset 8 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00800000, 0x00000800)
+ /* Channel 1 - BAR 2, offset 9 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00900000, 0x00000800)
+ },
+ .dt_rd = {
+ /* Channel 0 - BAR 2, offset 10 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00a00000, 0x00000800)
+ /* Channel 1 - BAR 2, offset 11 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00b00000, 0x00000800)
+ },
/* Other */
- .version = 0,
- .mode = EDMA_MODE_UNROLL,
+ .mf = EDMA_MF_EDMA_UNROLL,
.irqs = 1,
+ .wr_ch_cnt = 2,
+ .rd_ch_cnt = 2,
};
static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)
@@ -63,14 +99,58 @@ static const struct dw_edma_core_ops dw_edma_pcie_core_ops = {
.irq_vector = dw_edma_pcie_irq_vector,
};
+static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
+ struct dw_edma_pcie_data *pdata)
+{
+ u32 val, map;
+ u16 vsec;
+ u64 off;
+
+ vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_SYNOPSYS,
+ DW_PCIE_VSEC_DMA_ID);
+ if (!vsec)
+ return;
+
+ pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
+ if (PCI_VNDR_HEADER_REV(val) != 0x00 ||
+ PCI_VNDR_HEADER_LEN(val) != 0x18)
+ return;
+
+ pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability DMA\n");
+ pci_read_config_dword(pdev, vsec + 0x8, &val);
+ map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val);
+ if (map != EDMA_MF_EDMA_LEGACY &&
+ map != EDMA_MF_EDMA_UNROLL &&
+ map != EDMA_MF_HDMA_COMPAT)
+ return;
+
+ pdata->mf = map;
+ pdata->rg.bar = FIELD_GET(DW_PCIE_VSEC_DMA_BAR, val);
+
+ pci_read_config_dword(pdev, vsec + 0xc, &val);
+ pdata->wr_ch_cnt = min_t(u16, pdata->wr_ch_cnt,
+ FIELD_GET(DW_PCIE_VSEC_DMA_WR_CH, val));
+ pdata->rd_ch_cnt = min_t(u16, pdata->rd_ch_cnt,
+ FIELD_GET(DW_PCIE_VSEC_DMA_RD_CH, val));
+
+ pci_read_config_dword(pdev, vsec + 0x14, &val);
+ off = val;
+ pci_read_config_dword(pdev, vsec + 0x10, &val);
+ off <<= 32;
+ off |= val;
+ pdata->rg.off = off;
+}
+
static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
- const struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
+ struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
+ struct dw_edma_pcie_data vsec_data;
struct device *dev = &pdev->dev;
struct dw_edma_chip *chip;
- int err, nr_irqs;
struct dw_edma *dw;
+ int err, nr_irqs;
+ int i, mask;
/* Enable PCI device */
err = pcim_enable_device(pdev);
@@ -79,11 +159,25 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return err;
}
+ memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
+
+ /*
+ * Tries to find if exists a PCIe Vendor-Specific Extended Capability
+ * for the DMA, if one exists, then reconfigures it.
+ */
+ dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data);
+
/* Mapping PCI BAR regions */
- err = pcim_iomap_regions(pdev, BIT(pdata->rg_bar) |
- BIT(pdata->ll_bar) |
- BIT(pdata->dt_bar),
- pci_name(pdev));
+ mask = BIT(vsec_data.rg.bar);
+ for (i = 0; i < vsec_data.wr_ch_cnt; i++) {
+ mask |= BIT(vsec_data.ll_wr[i].bar);
+ mask |= BIT(vsec_data.dt_wr[i].bar);
+ }
+ for (i = 0; i < vsec_data.rd_ch_cnt; i++) {
+ mask |= BIT(vsec_data.ll_rd[i].bar);
+ mask |= BIT(vsec_data.dt_rd[i].bar);
+ }
+ err = pcim_iomap_regions(pdev, mask, pci_name(pdev));
if (err) {
pci_err(pdev, "eDMA BAR I/O remapping failed\n");
return err;
@@ -125,7 +219,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -ENOMEM;
/* IRQs allocation */
- nr_irqs = pci_alloc_irq_vectors(pdev, 1, pdata->irqs,
+ nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (nr_irqs < 1) {
pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n",
@@ -139,46 +233,109 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
chip->id = pdev->devfn;
chip->irq = pdev->irq;
- dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar];
- dw->rg_region.vaddr += pdata->rg_off;
- dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
- dw->rg_region.paddr += pdata->rg_off;
- dw->rg_region.sz = pdata->rg_sz;
-
- dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar];
- dw->ll_region.vaddr += pdata->ll_off;
- dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
- dw->ll_region.paddr += pdata->ll_off;
- dw->ll_region.sz = pdata->ll_sz;
-
- dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar];
- dw->dt_region.vaddr += pdata->dt_off;
- dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
- dw->dt_region.paddr += pdata->dt_off;
- dw->dt_region.sz = pdata->dt_sz;
-
- dw->version = pdata->version;
- dw->mode = pdata->mode;
+ dw->mf = vsec_data.mf;
dw->nr_irqs = nr_irqs;
dw->ops = &dw_edma_pcie_core_ops;
+ dw->wr_ch_cnt = vsec_data.wr_ch_cnt;
+ dw->rd_ch_cnt = vsec_data.rd_ch_cnt;
- /* Debug info */
- pci_dbg(pdev, "Version:\t%u\n", dw->version);
+ dw->rg_region.vaddr = pcim_iomap_table(pdev)[vsec_data.rg.bar];
+ if (!dw->rg_region.vaddr)
+ return -ENOMEM;
+
+ dw->rg_region.vaddr += vsec_data.rg.off;
+ dw->rg_region.paddr = pdev->resource[vsec_data.rg.bar].start;
+ dw->rg_region.paddr += vsec_data.rg.off;
+ dw->rg_region.sz = vsec_data.rg.sz;
+
+ for (i = 0; i < dw->wr_ch_cnt; i++) {
+ struct dw_edma_region *ll_region = &dw->ll_region_wr[i];
+ struct dw_edma_region *dt_region = &dw->dt_region_wr[i];
+ struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
+ struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
+
+ ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
+ if (!ll_region->vaddr)
+ return -ENOMEM;
+
+ ll_region->vaddr += ll_block->off;
+ ll_region->paddr = pdev->resource[ll_block->bar].start;
+ ll_region->paddr += ll_block->off;
+ ll_region->sz = ll_block->sz;
+
+ dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
+ if (!dt_region->vaddr)
+ return -ENOMEM;
+
+ dt_region->vaddr += dt_block->off;
+ dt_region->paddr = pdev->resource[dt_block->bar].start;
+ dt_region->paddr += dt_block->off;
+ dt_region->sz = dt_block->sz;
+ }
- pci_dbg(pdev, "Mode:\t%s\n",
- dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
+ for (i = 0; i < dw->rd_ch_cnt; i++) {
+ struct dw_edma_region *ll_region = &dw->ll_region_rd[i];
+ struct dw_edma_region *dt_region = &dw->dt_region_rd[i];
+ struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
+ struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
+
+ ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
+ if (!ll_region->vaddr)
+ return -ENOMEM;
+
+ ll_region->vaddr += ll_block->off;
+ ll_region->paddr = pdev->resource[ll_block->bar].start;
+ ll_region->paddr += ll_block->off;
+ ll_region->sz = ll_block->sz;
+
+ dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
+ if (!dt_region->vaddr)
+ return -ENOMEM;
+
+ dt_region->vaddr += dt_block->off;
+ dt_region->paddr = pdev->resource[dt_block->bar].start;
+ dt_region->paddr += dt_block->off;
+ dt_region->sz = dt_block->sz;
+ }
+
+ /* Debug info */
+ if (dw->mf == EDMA_MF_EDMA_LEGACY)
+ pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", dw->mf);
+ else if (dw->mf == EDMA_MF_EDMA_UNROLL)
+ pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", dw->mf);
+ else if (dw->mf == EDMA_MF_HDMA_COMPAT)
+ pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", dw->mf);
+ else
+ pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", dw->mf);
pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
+ vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
dw->rg_region.vaddr, &dw->rg_region.paddr);
- pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
- dw->ll_region.vaddr, &dw->ll_region.paddr);
- pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
- dw->dt_region.vaddr, &dw->dt_region.paddr);
+ for (i = 0; i < dw->wr_ch_cnt; i++) {
+ pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ i, vsec_data.ll_wr[i].bar,
+ vsec_data.ll_wr[i].off, dw->ll_region_wr[i].sz,
+ dw->ll_region_wr[i].vaddr, &dw->ll_region_wr[i].paddr);
+
+ pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ i, vsec_data.dt_wr[i].bar,
+ vsec_data.dt_wr[i].off, dw->dt_region_wr[i].sz,
+ dw->dt_region_wr[i].vaddr, &dw->dt_region_wr[i].paddr);
+ }
+
+ for (i = 0; i < dw->rd_ch_cnt; i++) {
+ pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ i, vsec_data.ll_rd[i].bar,
+ vsec_data.ll_rd[i].off, dw->ll_region_rd[i].sz,
+ dw->ll_region_rd[i].vaddr, &dw->ll_region_rd[i].paddr);
+
+ pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ i, vsec_data.dt_rd[i].bar,
+ vsec_data.dt_rd[i].off, dw->dt_region_rd[i].sz,
+ dw->dt_region_rd[i].vaddr, &dw->dt_region_rd[i].paddr);
+ }
pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 692de47b1670..329fc2e57b70 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -28,35 +28,75 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
return dw->rg_region.vaddr;
}
-#define SET(dw, name, value) \
+#define SET_32(dw, name, value) \
writel(value, &(__dw_regs(dw)->name))
-#define GET(dw, name) \
+#define GET_32(dw, name) \
readl(&(__dw_regs(dw)->name))
-#define SET_RW(dw, dir, name, value) \
+#define SET_RW_32(dw, dir, name, value) \
do { \
if ((dir) == EDMA_DIR_WRITE) \
- SET(dw, wr_##name, value); \
+ SET_32(dw, wr_##name, value); \
else \
- SET(dw, rd_##name, value); \
+ SET_32(dw, rd_##name, value); \
} while (0)
-#define GET_RW(dw, dir, name) \
+#define GET_RW_32(dw, dir, name) \
((dir) == EDMA_DIR_WRITE \
- ? GET(dw, wr_##name) \
- : GET(dw, rd_##name))
+ ? GET_32(dw, wr_##name) \
+ : GET_32(dw, rd_##name))
-#define SET_BOTH(dw, name, value) \
+#define SET_BOTH_32(dw, name, value) \
do { \
- SET(dw, wr_##name, value); \
- SET(dw, rd_##name, value); \
+ SET_32(dw, wr_##name, value); \
+ SET_32(dw, rd_##name, value); \
+ } while (0)
+
+#ifdef CONFIG_64BIT
+
+#define SET_64(dw, name, value) \
+ writeq(value, &(__dw_regs(dw)->name))
+
+#define GET_64(dw, name) \
+ readq(&(__dw_regs(dw)->name))
+
+#define SET_RW_64(dw, dir, name, value) \
+ do { \
+ if ((dir) == EDMA_DIR_WRITE) \
+ SET_64(dw, wr_##name, value); \
+ else \
+ SET_64(dw, rd_##name, value); \
+ } while (0)
+
+#define GET_RW_64(dw, dir, name) \
+ ((dir) == EDMA_DIR_WRITE \
+ ? GET_64(dw, wr_##name) \
+ : GET_64(dw, rd_##name))
+
+#define SET_BOTH_64(dw, name, value) \
+ do { \
+ SET_64(dw, wr_##name, value); \
+ SET_64(dw, rd_##name, value); \
+ } while (0)
+
+#endif /* CONFIG_64BIT */
+
+#define SET_COMPAT(dw, name, value) \
+ writel(value, &(__dw_regs(dw)->type.unroll.name))
+
+#define SET_RW_COMPAT(dw, dir, name, value) \
+ do { \
+ if ((dir) == EDMA_DIR_WRITE) \
+ SET_COMPAT(dw, wr_##name, value); \
+ else \
+ SET_COMPAT(dw, rd_##name, value); \
} while (0)
static inline struct dw_edma_v0_ch_regs __iomem *
__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
{
- if (dw->mode == EDMA_MODE_LEGACY)
+ if (dw->mf == EDMA_MF_EDMA_LEGACY)
return &(__dw_regs(dw)->type.legacy.ch);
if (dir == EDMA_DIR_WRITE)
@@ -68,7 +108,7 @@ __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u32 value, void __iomem *addr)
{
- if (dw->mode == EDMA_MODE_LEGACY) {
+ if (dw->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -93,7 +133,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
{
u32 value;
- if (dw->mode == EDMA_MODE_LEGACY) {
+ if (dw->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -115,21 +155,86 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
return value;
}
-#define SET_CH(dw, dir, ch, name, value) \
+#define SET_CH_32(dw, dir, ch, name, value) \
writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
-#define GET_CH(dw, dir, ch, name) \
+#define GET_CH_32(dw, dir, ch, name) \
readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
-#define SET_LL(ll, value) \
+#define SET_LL_32(ll, value) \
writel(value, ll)
+#ifdef CONFIG_64BIT
+
+static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
+ u64 value, void __iomem *addr)
+{
+ if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ u32 viewport_sel;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&dw->lock, flags);
+
+ viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
+ if (dir == EDMA_DIR_READ)
+ viewport_sel |= BIT(31);
+
+ writel(viewport_sel,
+ &(__dw_regs(dw)->type.legacy.viewport_sel));
+ writeq(value, addr);
+
+ raw_spin_unlock_irqrestore(&dw->lock, flags);
+ } else {
+ writeq(value, addr);
+ }
+}
+
+static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
+ const void __iomem *addr)
+{
+ u32 value;
+
+ if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ u32 viewport_sel;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&dw->lock, flags);
+
+ viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
+ if (dir == EDMA_DIR_READ)
+ viewport_sel |= BIT(31);
+
+ writel(viewport_sel,
+ &(__dw_regs(dw)->type.legacy.viewport_sel));
+ value = readq(addr);
+
+ raw_spin_unlock_irqrestore(&dw->lock, flags);
+ } else {
+ value = readq(addr);
+ }
+
+ return value;
+}
+
+#define SET_CH_64(dw, dir, ch, name, value) \
+ writeq_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
+
+#define GET_CH_64(dw, dir, ch, name) \
+ readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
+
+#define SET_LL_64(ll, value) \
+ writeq(value, ll)
+
+#endif /* CONFIG_64BIT */
+
/* eDMA management callbacks */
void dw_edma_v0_core_off(struct dw_edma *dw)
{
- SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
- SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
- SET_BOTH(dw, engine_en, 0);
+ SET_BOTH_32(dw, int_mask,
+ EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
+ SET_BOTH_32(dw, int_clear,
+ EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
+ SET_BOTH_32(dw, engine_en, 0);
}
u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
@@ -137,9 +242,11 @@ u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
u32 num_ch;
if (dir == EDMA_DIR_WRITE)
- num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl));
+ num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK,
+ GET_32(dw, ctrl));
else
- num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl));
+ num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK,
+ GET_32(dw, ctrl));
if (num_ch > EDMA_V0_MAX_NR_CH)
num_ch = EDMA_V0_MAX_NR_CH;
@@ -153,7 +260,7 @@ enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
u32 tmp;
tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
- GET_CH(dw, chan->dir, chan->id, ch_control1));
+ GET_CH_32(dw, chan->dir, chan->id, ch_control1));
if (tmp == 1)
return DMA_IN_PROGRESS;
@@ -167,26 +274,28 @@ void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->chip->dw;
- SET_RW(dw, chan->dir, int_clear,
- FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
+ SET_RW_32(dw, chan->dir, int_clear,
+ FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
}
void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->chip->dw;
- SET_RW(dw, chan->dir, int_clear,
- FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
+ SET_RW_32(dw, chan->dir, int_clear,
+ FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
}
u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
{
- return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status));
+ return FIELD_GET(EDMA_V0_DONE_INT_MASK,
+ GET_RW_32(dw, dir, int_status));
}
u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
{
- return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status));
+ return FIELD_GET(EDMA_V0_ABORT_INT_MASK,
+ GET_RW_32(dw, dir, int_status));
}
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
@@ -209,15 +318,23 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE);
/* Channel control */
- SET_LL(&lli[i].control, control);
+ SET_LL_32(&lli[i].control, control);
/* Transfer size */
- SET_LL(&lli[i].transfer_size, child->sz);
- /* SAR - low, high */
- SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
- SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
- /* DAR - low, high */
- SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
- SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
+ SET_LL_32(&lli[i].transfer_size, child->sz);
+ /* SAR */
+ #ifdef CONFIG_64BIT
+ SET_LL_64(&lli[i].sar.reg, child->sar);
+ #else /* CONFIG_64BIT */
+ SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar));
+ SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar));
+ #endif /* CONFIG_64BIT */
+ /* DAR */
+ #ifdef CONFIG_64BIT
+ SET_LL_64(&lli[i].dar.reg, child->dar);
+ #else /* CONFIG_64BIT */
+ SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar));
+ SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar));
+ #endif /* CONFIG_64BIT */
i++;
}
@@ -227,10 +344,14 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
control |= DW_EDMA_V0_CB;
/* Channel control */
- SET_LL(&llp->control, control);
- /* Linked list - low, high */
- SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
- SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
+ SET_LL_32(&llp->control, control);
+ /* Linked list */
+ #ifdef CONFIG_64BIT
+ SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr);
+ #else /* CONFIG_64BIT */
+ SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr));
+ SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr));
+ #endif /* CONFIG_64BIT */
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -243,28 +364,69 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
if (first) {
/* Enable engine */
- SET_RW(dw, chan->dir, engine_en, BIT(0));
+ SET_RW_32(dw, chan->dir, engine_en, BIT(0));
+ if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+ switch (chan->id) {
+ case 0:
+ SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en,
+ BIT(0));
+ break;
+ case 1:
+ SET_RW_COMPAT(dw, chan->dir, ch1_pwr_en,
+ BIT(0));
+ break;
+ case 2:
+ SET_RW_COMPAT(dw, chan->dir, ch2_pwr_en,
+ BIT(0));
+ break;
+ case 3:
+ SET_RW_COMPAT(dw, chan->dir, ch3_pwr_en,
+ BIT(0));
+ break;
+ case 4:
+ SET_RW_COMPAT(dw, chan->dir, ch4_pwr_en,
+ BIT(0));
+ break;
+ case 5:
+ SET_RW_COMPAT(dw, chan->dir, ch5_pwr_en,
+ BIT(0));
+ break;
+ case 6:
+ SET_RW_COMPAT(dw, chan->dir, ch6_pwr_en,
+ BIT(0));
+ break;
+ case 7:
+ SET_RW_COMPAT(dw, chan->dir, ch7_pwr_en,
+ BIT(0));
+ break;
+ }
+ }
/* Interrupt unmask - done, abort */
- tmp = GET_RW(dw, chan->dir, int_mask);
+ tmp = GET_RW_32(dw, chan->dir, int_mask);
tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
- SET_RW(dw, chan->dir, int_mask, tmp);
+ SET_RW_32(dw, chan->dir, int_mask, tmp);
/* Linked list error */
- tmp = GET_RW(dw, chan->dir, linked_list_err_en);
+ tmp = GET_RW_32(dw, chan->dir, linked_list_err_en);
tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
- SET_RW(dw, chan->dir, linked_list_err_en, tmp);
+ SET_RW_32(dw, chan->dir, linked_list_err_en, tmp);
/* Channel control */
- SET_CH(dw, chan->dir, chan->id, ch_control1,
- (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
- /* Linked list - low, high */
- SET_CH(dw, chan->dir, chan->id, llp_low,
- lower_32_bits(chunk->ll_region.paddr));
- SET_CH(dw, chan->dir, chan->id, llp_high,
- upper_32_bits(chunk->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, ch_control1,
+ (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
+ /* Linked list */
+ #ifdef CONFIG_64BIT
+ SET_CH_64(dw, chan->dir, chan->id, llp.reg,
+ chunk->ll_region.paddr);
+ #else /* CONFIG_64BIT */
+ SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+ lower_32_bits(chunk->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+ upper_32_bits(chunk->ll_region.paddr));
+ #endif /* CONFIG_64BIT */
}
/* Doorbell */
- SET_RW(dw, chan->dir, doorbell,
- FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
+ SET_RW_32(dw, chan->dir, doorbell,
+ FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
}
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
@@ -273,31 +435,31 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
u32 tmp = 0;
/* MSI done addr - low, high */
- SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo);
- SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi);
+ SET_RW_32(dw, chan->dir, done_imwr.lsb, chan->msi.address_lo);
+ SET_RW_32(dw, chan->dir, done_imwr.msb, chan->msi.address_hi);
/* MSI abort addr - low, high */
- SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo);
- SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi);
+ SET_RW_32(dw, chan->dir, abort_imwr.lsb, chan->msi.address_lo);
+ SET_RW_32(dw, chan->dir, abort_imwr.msb, chan->msi.address_hi);
/* MSI data - low, high */
switch (chan->id) {
case 0:
case 1:
- tmp = GET_RW(dw, chan->dir, ch01_imwr_data);
+ tmp = GET_RW_32(dw, chan->dir, ch01_imwr_data);
break;
case 2:
case 3:
- tmp = GET_RW(dw, chan->dir, ch23_imwr_data);
+ tmp = GET_RW_32(dw, chan->dir, ch23_imwr_data);
break;
case 4:
case 5:
- tmp = GET_RW(dw, chan->dir, ch45_imwr_data);
+ tmp = GET_RW_32(dw, chan->dir, ch45_imwr_data);
break;
case 6:
case 7:
- tmp = GET_RW(dw, chan->dir, ch67_imwr_data);
+ tmp = GET_RW_32(dw, chan->dir, ch67_imwr_data);
break;
}
@@ -316,22 +478,22 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
switch (chan->id) {
case 0:
case 1:
- SET_RW(dw, chan->dir, ch01_imwr_data, tmp);
+ SET_RW_32(dw, chan->dir, ch01_imwr_data, tmp);
break;
case 2:
case 3:
- SET_RW(dw, chan->dir, ch23_imwr_data, tmp);
+ SET_RW_32(dw, chan->dir, ch23_imwr_data, tmp);
break;
case 4:
case 5:
- SET_RW(dw, chan->dir, ch45_imwr_data, tmp);
+ SET_RW_32(dw, chan->dir, ch45_imwr_data, tmp);
break;
case 6:
case 7:
- SET_RW(dw, chan->dir, ch67_imwr_data, tmp);
+ SET_RW_32(dw, chan->dir, ch67_imwr_data, tmp);
break;
}
@@ -344,7 +506,7 @@ void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
dw_edma_v0_debugfs_on(chip);
}
-void dw_edma_v0_core_debugfs_off(void)
+void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip)
{
- dw_edma_v0_debugfs_off();
+ dw_edma_v0_debugfs_off(chip);
}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h
index abae1527f1f9..2afa626b8300 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.h
@@ -23,6 +23,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
/* eDMA debug fs callbacks */
void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_core_debugfs_off(void);
+void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip);
#endif /* _DW_EDMA_V0_CORE_H */
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 6f62711a4c94..4b3bcffd15ef 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -38,7 +38,6 @@
#define CHANNEL_STR "channel"
#define REGISTERS_STR "registers"
-static struct dentry *base_dir;
static struct dw_edma *dw;
static struct dw_edma_v0_regs __iomem *regs;
@@ -55,7 +54,7 @@ struct debugfs_entries {
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
void __iomem *reg = (void __force __iomem *)data;
- if (dw->mode == EDMA_MODE_LEGACY &&
+ if (dw->mf == EDMA_MF_EDMA_LEGACY &&
reg >= (void __iomem *)&regs->type.legacy.ch) {
void __iomem *ptr = &regs->type.legacy.ch;
u32 viewport_sel = 0;
@@ -114,12 +113,12 @@ static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
REGISTER(ch_control1),
REGISTER(ch_control2),
REGISTER(transfer_size),
- REGISTER(sar_low),
- REGISTER(sar_high),
- REGISTER(dar_low),
- REGISTER(dar_high),
- REGISTER(llp_low),
- REGISTER(llp_high),
+ REGISTER(sar.lsb),
+ REGISTER(sar.msb),
+ REGISTER(dar.lsb),
+ REGISTER(dar.msb),
+ REGISTER(llp.lsb),
+ REGISTER(llp.msb),
};
nr_entries = ARRAY_SIZE(debugfs_regs);
@@ -132,17 +131,17 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)
/* eDMA global registers */
WR_REGISTER(engine_en),
WR_REGISTER(doorbell),
- WR_REGISTER(ch_arb_weight_low),
- WR_REGISTER(ch_arb_weight_high),
+ WR_REGISTER(ch_arb_weight.lsb),
+ WR_REGISTER(ch_arb_weight.msb),
/* eDMA interrupts registers */
WR_REGISTER(int_status),
WR_REGISTER(int_mask),
WR_REGISTER(int_clear),
WR_REGISTER(err_status),
- WR_REGISTER(done_imwr_low),
- WR_REGISTER(done_imwr_high),
- WR_REGISTER(abort_imwr_low),
- WR_REGISTER(abort_imwr_high),
+ WR_REGISTER(done_imwr.lsb),
+ WR_REGISTER(done_imwr.msb),
+ WR_REGISTER(abort_imwr.lsb),
+ WR_REGISTER(abort_imwr.msb),
WR_REGISTER(ch01_imwr_data),
WR_REGISTER(ch23_imwr_data),
WR_REGISTER(ch45_imwr_data),
@@ -152,8 +151,8 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)
const struct debugfs_entries debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
WR_REGISTER_UNROLL(engine_chgroup),
- WR_REGISTER_UNROLL(engine_hshake_cnt_low),
- WR_REGISTER_UNROLL(engine_hshake_cnt_high),
+ WR_REGISTER_UNROLL(engine_hshake_cnt.lsb),
+ WR_REGISTER_UNROLL(engine_hshake_cnt.msb),
WR_REGISTER_UNROLL(ch0_pwr_en),
WR_REGISTER_UNROLL(ch1_pwr_en),
WR_REGISTER_UNROLL(ch2_pwr_en),
@@ -174,7 +173,7 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
- if (dw->mode == EDMA_MODE_UNROLL) {
+ if (dw->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
@@ -200,19 +199,19 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)
/* eDMA global registers */
RD_REGISTER(engine_en),
RD_REGISTER(doorbell),
- RD_REGISTER(ch_arb_weight_low),
- RD_REGISTER(ch_arb_weight_high),
+ RD_REGISTER(ch_arb_weight.lsb),
+ RD_REGISTER(ch_arb_weight.msb),
/* eDMA interrupts registers */
RD_REGISTER(int_status),
RD_REGISTER(int_mask),
RD_REGISTER(int_clear),
- RD_REGISTER(err_status_low),
- RD_REGISTER(err_status_high),
+ RD_REGISTER(err_status.lsb),
+ RD_REGISTER(err_status.msb),
RD_REGISTER(linked_list_err_en),
- RD_REGISTER(done_imwr_low),
- RD_REGISTER(done_imwr_high),
- RD_REGISTER(abort_imwr_low),
- RD_REGISTER(abort_imwr_high),
+ RD_REGISTER(done_imwr.lsb),
+ RD_REGISTER(done_imwr.msb),
+ RD_REGISTER(abort_imwr.lsb),
+ RD_REGISTER(abort_imwr.msb),
RD_REGISTER(ch01_imwr_data),
RD_REGISTER(ch23_imwr_data),
RD_REGISTER(ch45_imwr_data),
@@ -221,8 +220,8 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)
const struct debugfs_entries debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
RD_REGISTER_UNROLL(engine_chgroup),
- RD_REGISTER_UNROLL(engine_hshake_cnt_low),
- RD_REGISTER_UNROLL(engine_hshake_cnt_high),
+ RD_REGISTER_UNROLL(engine_hshake_cnt.lsb),
+ RD_REGISTER_UNROLL(engine_hshake_cnt.msb),
RD_REGISTER_UNROLL(ch0_pwr_en),
RD_REGISTER_UNROLL(ch1_pwr_en),
RD_REGISTER_UNROLL(ch2_pwr_en),
@@ -243,7 +242,7 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
- if (dw->mode == EDMA_MODE_UNROLL) {
+ if (dw->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
@@ -272,7 +271,7 @@ static void dw_edma_debugfs_regs(void)
struct dentry *regs_dir;
int nr_entries;
- regs_dir = debugfs_create_dir(REGISTERS_STR, base_dir);
+ regs_dir = debugfs_create_dir(REGISTERS_STR, dw->debugfs);
if (!regs_dir)
return;
@@ -293,19 +292,23 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
if (!regs)
return;
- base_dir = debugfs_create_dir(dw->name, NULL);
- if (!base_dir)
+ dw->debugfs = debugfs_create_dir(dw->name, NULL);
+ if (!dw->debugfs)
return;
- debugfs_create_u32("version", 0444, base_dir, &dw->version);
- debugfs_create_u32("mode", 0444, base_dir, &dw->mode);
- debugfs_create_u16("wr_ch_cnt", 0444, base_dir, &dw->wr_ch_cnt);
- debugfs_create_u16("rd_ch_cnt", 0444, base_dir, &dw->rd_ch_cnt);
+ debugfs_create_u32("mf", 0444, dw->debugfs, &dw->mf);
+ debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);
+ debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);
dw_edma_debugfs_regs();
}
-void dw_edma_v0_debugfs_off(void)
+void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
{
- debugfs_remove_recursive(base_dir);
+ dw = chip->dw;
+ if (!dw)
+ return;
+
+ debugfs_remove_recursive(dw->debugfs);
+ dw->debugfs = NULL;
}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
index 5450a0a94193..d0ff25a9ea5c 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
@@ -13,13 +13,13 @@
#ifdef CONFIG_DEBUG_FS
void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_debugfs_off(void);
+void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip);
#else
static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
{
}
-static inline void dw_edma_v0_debugfs_off(void)
+static inline void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
{
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/dma/dw-edma/dw-edma-v0-regs.h b/drivers/dma/dw-edma/dw-edma-v0-regs.h
index dfd70e223c2f..e175f7b20480 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-regs.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-regs.h
@@ -25,134 +25,209 @@
#define EDMA_V0_CH_EVEN_MSI_DATA_MASK GENMASK(15, 0)
struct dw_edma_v0_ch_regs {
- u32 ch_control1; /* 0x000 */
- u32 ch_control2; /* 0x004 */
- u32 transfer_size; /* 0x008 */
- u32 sar_low; /* 0x00c */
- u32 sar_high; /* 0x010 */
- u32 dar_low; /* 0x014 */
- u32 dar_high; /* 0x018 */
- u32 llp_low; /* 0x01c */
- u32 llp_high; /* 0x020 */
-};
+ u32 ch_control1; /* 0x0000 */
+ u32 ch_control2; /* 0x0004 */
+ u32 transfer_size; /* 0x0008 */
+ union {
+ u64 reg; /* 0x000c..0x0010 */
+ struct {
+ u32 lsb; /* 0x000c */
+ u32 msb; /* 0x0010 */
+ };
+ } sar;
+ union {
+ u64 reg; /* 0x0014..0x0018 */
+ struct {
+ u32 lsb; /* 0x0014 */
+ u32 msb; /* 0x0018 */
+ };
+ } dar;
+ union {
+ u64 reg; /* 0x001c..0x0020 */
+ struct {
+ u32 lsb; /* 0x001c */
+ u32 msb; /* 0x0020 */
+ };
+ } llp;
+} __packed;
struct dw_edma_v0_ch {
- struct dw_edma_v0_ch_regs wr; /* 0x200 */
- u32 padding_1[55]; /* [0x224..0x2fc] */
- struct dw_edma_v0_ch_regs rd; /* 0x300 */
- u32 padding_2[55]; /* [0x324..0x3fc] */
-};
+ struct dw_edma_v0_ch_regs wr; /* 0x0200 */
+ u32 padding_1[55]; /* 0x0224..0x02fc */
+ struct dw_edma_v0_ch_regs rd; /* 0x0300 */
+ u32 padding_2[55]; /* 0x0324..0x03fc */
+} __packed;
struct dw_edma_v0_unroll {
- u32 padding_1; /* 0x0f8 */
- u32 wr_engine_chgroup; /* 0x100 */
- u32 rd_engine_chgroup; /* 0x104 */
- u32 wr_engine_hshake_cnt_low; /* 0x108 */
- u32 wr_engine_hshake_cnt_high; /* 0x10c */
- u32 padding_2[2]; /* [0x110..0x114] */
- u32 rd_engine_hshake_cnt_low; /* 0x118 */
- u32 rd_engine_hshake_cnt_high; /* 0x11c */
- u32 padding_3[2]; /* [0x120..0x124] */
- u32 wr_ch0_pwr_en; /* 0x128 */
- u32 wr_ch1_pwr_en; /* 0x12c */
- u32 wr_ch2_pwr_en; /* 0x130 */
- u32 wr_ch3_pwr_en; /* 0x134 */
- u32 wr_ch4_pwr_en; /* 0x138 */
- u32 wr_ch5_pwr_en; /* 0x13c */
- u32 wr_ch6_pwr_en; /* 0x140 */
- u32 wr_ch7_pwr_en; /* 0x144 */
- u32 padding_4[8]; /* [0x148..0x164] */
- u32 rd_ch0_pwr_en; /* 0x168 */
- u32 rd_ch1_pwr_en; /* 0x16c */
- u32 rd_ch2_pwr_en; /* 0x170 */
- u32 rd_ch3_pwr_en; /* 0x174 */
- u32 rd_ch4_pwr_en; /* 0x178 */
- u32 rd_ch5_pwr_en; /* 0x18c */
- u32 rd_ch6_pwr_en; /* 0x180 */
- u32 rd_ch7_pwr_en; /* 0x184 */
- u32 padding_5[30]; /* [0x188..0x1fc] */
- struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* [0x200..0x1120] */
-};
+ u32 padding_1; /* 0x00f8 */
+ u32 wr_engine_chgroup; /* 0x0100 */
+ u32 rd_engine_chgroup; /* 0x0104 */
+ union {
+ u64 reg; /* 0x0108..0x010c */
+ struct {
+ u32 lsb; /* 0x0108 */
+ u32 msb; /* 0x010c */
+ };
+ } wr_engine_hshake_cnt;
+ u32 padding_2[2]; /* 0x0110..0x0114 */
+ union {
+ u64 reg; /* 0x0120..0x0124 */
+ struct {
+ u32 lsb; /* 0x0120 */
+ u32 msb; /* 0x0124 */
+ };
+ } rd_engine_hshake_cnt;
+ u32 padding_3[2]; /* 0x0120..0x0124 */
+ u32 wr_ch0_pwr_en; /* 0x0128 */
+ u32 wr_ch1_pwr_en; /* 0x012c */
+ u32 wr_ch2_pwr_en; /* 0x0130 */
+ u32 wr_ch3_pwr_en; /* 0x0134 */
+ u32 wr_ch4_pwr_en; /* 0x0138 */
+ u32 wr_ch5_pwr_en; /* 0x013c */
+ u32 wr_ch6_pwr_en; /* 0x0140 */
+ u32 wr_ch7_pwr_en; /* 0x0144 */
+ u32 padding_4[8]; /* 0x0148..0x0164 */
+ u32 rd_ch0_pwr_en; /* 0x0168 */
+ u32 rd_ch1_pwr_en; /* 0x016c */
+ u32 rd_ch2_pwr_en; /* 0x0170 */
+ u32 rd_ch3_pwr_en; /* 0x0174 */
+ u32 rd_ch4_pwr_en; /* 0x0178 */
+ u32 rd_ch5_pwr_en; /* 0x018c */
+ u32 rd_ch6_pwr_en; /* 0x0180 */
+ u32 rd_ch7_pwr_en; /* 0x0184 */
+ u32 padding_5[30]; /* 0x0188..0x01fc */
+ struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* 0x0200..0x1120 */
+} __packed;
struct dw_edma_v0_legacy {
- u32 viewport_sel; /* 0x0f8 */
- struct dw_edma_v0_ch_regs ch; /* [0x100..0x120] */
-};
+ u32 viewport_sel; /* 0x00f8 */
+ struct dw_edma_v0_ch_regs ch; /* 0x0100..0x0120 */
+} __packed;
struct dw_edma_v0_regs {
/* eDMA global registers */
- u32 ctrl_data_arb_prior; /* 0x000 */
- u32 padding_1; /* 0x004 */
- u32 ctrl; /* 0x008 */
- u32 wr_engine_en; /* 0x00c */
- u32 wr_doorbell; /* 0x010 */
- u32 padding_2; /* 0x014 */
- u32 wr_ch_arb_weight_low; /* 0x018 */
- u32 wr_ch_arb_weight_high; /* 0x01c */
- u32 padding_3[3]; /* [0x020..0x028] */
- u32 rd_engine_en; /* 0x02c */
- u32 rd_doorbell; /* 0x030 */
- u32 padding_4; /* 0x034 */
- u32 rd_ch_arb_weight_low; /* 0x038 */
- u32 rd_ch_arb_weight_high; /* 0x03c */
- u32 padding_5[3]; /* [0x040..0x048] */
+ u32 ctrl_data_arb_prior; /* 0x0000 */
+ u32 padding_1; /* 0x0004 */
+ u32 ctrl; /* 0x0008 */
+ u32 wr_engine_en; /* 0x000c */
+ u32 wr_doorbell; /* 0x0010 */
+ u32 padding_2; /* 0x0014 */
+ union {
+ u64 reg; /* 0x0018..0x001c */
+ struct {
+ u32 lsb; /* 0x0018 */
+ u32 msb; /* 0x001c */
+ };
+ } wr_ch_arb_weight;
+ u32 padding_3[3]; /* 0x0020..0x0028 */
+ u32 rd_engine_en; /* 0x002c */
+ u32 rd_doorbell; /* 0x0030 */
+ u32 padding_4; /* 0x0034 */
+ union {
+ u64 reg; /* 0x0038..0x003c */
+ struct {
+ u32 lsb; /* 0x0038 */
+ u32 msb; /* 0x003c */
+ };
+ } rd_ch_arb_weight;
+ u32 padding_5[3]; /* 0x0040..0x0048 */
/* eDMA interrupts registers */
- u32 wr_int_status; /* 0x04c */
- u32 padding_6; /* 0x050 */
- u32 wr_int_mask; /* 0x054 */
- u32 wr_int_clear; /* 0x058 */
- u32 wr_err_status; /* 0x05c */
- u32 wr_done_imwr_low; /* 0x060 */
- u32 wr_done_imwr_high; /* 0x064 */
- u32 wr_abort_imwr_low; /* 0x068 */
- u32 wr_abort_imwr_high; /* 0x06c */
- u32 wr_ch01_imwr_data; /* 0x070 */
- u32 wr_ch23_imwr_data; /* 0x074 */
- u32 wr_ch45_imwr_data; /* 0x078 */
- u32 wr_ch67_imwr_data; /* 0x07c */
- u32 padding_7[4]; /* [0x080..0x08c] */
- u32 wr_linked_list_err_en; /* 0x090 */
- u32 padding_8[3]; /* [0x094..0x09c] */
- u32 rd_int_status; /* 0x0a0 */
- u32 padding_9; /* 0x0a4 */
- u32 rd_int_mask; /* 0x0a8 */
- u32 rd_int_clear; /* 0x0ac */
- u32 padding_10; /* 0x0b0 */
- u32 rd_err_status_low; /* 0x0b4 */
- u32 rd_err_status_high; /* 0x0b8 */
- u32 padding_11[2]; /* [0x0bc..0x0c0] */
- u32 rd_linked_list_err_en; /* 0x0c4 */
- u32 padding_12; /* 0x0c8 */
- u32 rd_done_imwr_low; /* 0x0cc */
- u32 rd_done_imwr_high; /* 0x0d0 */
- u32 rd_abort_imwr_low; /* 0x0d4 */
- u32 rd_abort_imwr_high; /* 0x0d8 */
- u32 rd_ch01_imwr_data; /* 0x0dc */
- u32 rd_ch23_imwr_data; /* 0x0e0 */
- u32 rd_ch45_imwr_data; /* 0x0e4 */
- u32 rd_ch67_imwr_data; /* 0x0e8 */
- u32 padding_13[4]; /* [0x0ec..0x0f8] */
+ u32 wr_int_status; /* 0x004c */
+ u32 padding_6; /* 0x0050 */
+ u32 wr_int_mask; /* 0x0054 */
+ u32 wr_int_clear; /* 0x0058 */
+ u32 wr_err_status; /* 0x005c */
+ union {
+ u64 reg; /* 0x0060..0x0064 */
+ struct {
+ u32 lsb; /* 0x0060 */
+ u32 msb; /* 0x0064 */
+ };
+ } wr_done_imwr;
+ union {
+ u64 reg; /* 0x0068..0x006c */
+ struct {
+ u32 lsb; /* 0x0068 */
+ u32 msb; /* 0x006c */
+ };
+ } wr_abort_imwr;
+ u32 wr_ch01_imwr_data; /* 0x0070 */
+ u32 wr_ch23_imwr_data; /* 0x0074 */
+ u32 wr_ch45_imwr_data; /* 0x0078 */
+ u32 wr_ch67_imwr_data; /* 0x007c */
+ u32 padding_7[4]; /* 0x0080..0x008c */
+ u32 wr_linked_list_err_en; /* 0x0090 */
+ u32 padding_8[3]; /* 0x0094..0x009c */
+ u32 rd_int_status; /* 0x00a0 */
+ u32 padding_9; /* 0x00a4 */
+ u32 rd_int_mask; /* 0x00a8 */
+ u32 rd_int_clear; /* 0x00ac */
+ u32 padding_10; /* 0x00b0 */
+ union {
+ u64 reg; /* 0x00b4..0x00b8 */
+ struct {
+ u32 lsb; /* 0x00b4 */
+ u32 msb; /* 0x00b8 */
+ };
+ } rd_err_status;
+ u32 padding_11[2]; /* 0x00bc..0x00c0 */
+ u32 rd_linked_list_err_en; /* 0x00c4 */
+ u32 padding_12; /* 0x00c8 */
+ union {
+ u64 reg; /* 0x00cc..0x00d0 */
+ struct {
+ u32 lsb; /* 0x00cc */
+ u32 msb; /* 0x00d0 */
+ };
+ } rd_done_imwr;
+ union {
+ u64 reg; /* 0x00d4..0x00d8 */
+ struct {
+ u32 lsb; /* 0x00d4 */
+ u32 msb; /* 0x00d8 */
+ };
+ } rd_abort_imwr;
+ u32 rd_ch01_imwr_data; /* 0x00dc */
+ u32 rd_ch23_imwr_data; /* 0x00e0 */
+ u32 rd_ch45_imwr_data; /* 0x00e4 */
+ u32 rd_ch67_imwr_data; /* 0x00e8 */
+ u32 padding_13[4]; /* 0x00ec..0x00f8 */
/* eDMA channel context grouping */
union dw_edma_v0_type {
- struct dw_edma_v0_legacy legacy; /* [0x0f8..0x120] */
- struct dw_edma_v0_unroll unroll; /* [0x0f8..0x1120] */
+ struct dw_edma_v0_legacy legacy; /* 0x00f8..0x0120 */
+ struct dw_edma_v0_unroll unroll; /* 0x00f8..0x1120 */
} type;
-};
+} __packed;
struct dw_edma_v0_lli {
u32 control;
u32 transfer_size;
- u32 sar_low;
- u32 sar_high;
- u32 dar_low;
- u32 dar_high;
-};
+ union {
+ u64 reg;
+ struct {
+ u32 lsb;
+ u32 msb;
+ };
+ } sar;
+ union {
+ u64 reg;
+ struct {
+ u32 lsb;
+ u32 msb;
+ };
+ } dar;
+} __packed;
struct dw_edma_v0_llp {
u32 control;
u32 reserved;
- u32 llp_low;
- u32 llp_high;
-};
+ union {
+ u64 reg;
+ struct {
+ u32 lsb;
+ u32 msb;
+ };
+ } llp;
+} __packed;
#endif /* _DW_EDMA_V0_REGS_H */
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
index 8978b898d777..6d11558756f8 100644
--- a/drivers/dma/idxd/Makefile
+++ b/drivers/dma/idxd/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_INTEL_IDXD) += idxd.o
idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
+
+idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 0db9b82ed8cf..302cba5ff779 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -39,15 +39,15 @@ struct idxd_user_context {
struct iommu_sva *sva;
};
-enum idxd_cdev_cleanup {
- CDEV_NORMAL = 0,
- CDEV_FAILED,
-};
-
static void idxd_cdev_dev_release(struct device *dev)
{
- dev_dbg(dev, "releasing cdev device\n");
- kfree(dev);
+ struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
+ struct idxd_cdev_context *cdev_ctx;
+ struct idxd_wq *wq = idxd_cdev->wq;
+
+ cdev_ctx = &ictx[wq->idxd->data->type];
+ ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
+ kfree(idxd_cdev);
}
static struct device_type idxd_cdev_device_type = {
@@ -62,14 +62,11 @@ static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
return container_of(cdev, struct idxd_cdev, cdev);
}
-static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
-{
- return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
-}
-
static inline struct idxd_wq *inode_wq(struct inode *inode)
{
- return idxd_cdev_wq(inode_idxd_cdev(inode));
+ struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);
+
+ return idxd_cdev->wq;
}
static int idxd_cdev_open(struct inode *inode, struct file *filp)
@@ -220,11 +217,10 @@ static __poll_t idxd_cdev_poll(struct file *filp,
struct idxd_user_context *ctx = filp->private_data;
struct idxd_wq *wq = ctx->wq;
struct idxd_device *idxd = wq->idxd;
- struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
unsigned long flags;
__poll_t out = 0;
- poll_wait(filp, &idxd_cdev->err_queue, wait);
+ poll_wait(filp, &wq->err_queue, wait);
spin_lock_irqsave(&idxd->dev_lock, flags);
if (idxd->sw_err.valid)
out = EPOLLIN | EPOLLRDNORM;
@@ -243,101 +239,69 @@ static const struct file_operations idxd_cdev_fops = {
int idxd_cdev_get_major(struct idxd_device *idxd)
{
- return MAJOR(ictx[idxd->type].devt);
+ return MAJOR(ictx[idxd->data->type].devt);
}
-static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
+int idxd_wq_add_cdev(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
- struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
- struct idxd_cdev_context *cdev_ctx;
+ struct idxd_cdev *idxd_cdev;
+ struct cdev *cdev;
struct device *dev;
- int minor, rc;
+ struct idxd_cdev_context *cdev_ctx;
+ int rc, minor;
- idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
- if (!idxd_cdev->dev)
+ idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
+ if (!idxd_cdev)
return -ENOMEM;
- dev = idxd_cdev->dev;
- dev->parent = &idxd->pdev->dev;
- dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
- idxd->id, wq->id);
- dev->bus = idxd_get_bus_type(idxd);
-
- cdev_ctx = &ictx[wq->idxd->type];
+ idxd_cdev->wq = wq;
+ cdev = &idxd_cdev->cdev;
+ dev = &idxd_cdev->dev;
+ cdev_ctx = &ictx[wq->idxd->data->type];
minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
if (minor < 0) {
- rc = minor;
- kfree(dev);
- goto ida_err;
- }
-
- dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
- dev->type = &idxd_cdev_device_type;
- rc = device_register(dev);
- if (rc < 0) {
- dev_err(&idxd->pdev->dev, "device register failed\n");
- goto dev_reg_err;
+ kfree(idxd_cdev);
+ return minor;
}
idxd_cdev->minor = minor;
- return 0;
-
- dev_reg_err:
- ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
- put_device(dev);
- ida_err:
- idxd_cdev->dev = NULL;
- return rc;
-}
-
-static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
- enum idxd_cdev_cleanup cdev_state)
-{
- struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
- struct idxd_cdev_context *cdev_ctx;
-
- cdev_ctx = &ictx[wq->idxd->type];
- if (cdev_state == CDEV_NORMAL)
- cdev_del(&idxd_cdev->cdev);
- device_unregister(idxd_cdev->dev);
- /*
- * The device_type->release() will be called on the device and free
- * the allocated struct device. We can just forget it.
- */
- ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
- idxd_cdev->dev = NULL;
- idxd_cdev->minor = -1;
-}
-
-int idxd_wq_add_cdev(struct idxd_wq *wq)
-{
- struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
- struct cdev *cdev = &idxd_cdev->cdev;
- struct device *dev;
- int rc;
+ device_initialize(dev);
+ dev->parent = &wq->conf_dev;
+ dev->bus = &dsa_bus_type;
+ dev->type = &idxd_cdev_device_type;
+ dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
- rc = idxd_wq_cdev_dev_setup(wq);
+ rc = dev_set_name(dev, "%s/wq%u.%u", idxd->data->name_prefix, idxd->id, wq->id);
if (rc < 0)
- return rc;
+ goto err;
- dev = idxd_cdev->dev;
+ wq->idxd_cdev = idxd_cdev;
cdev_init(cdev, &idxd_cdev_fops);
- cdev_set_parent(cdev, &dev->kobj);
- rc = cdev_add(cdev, dev->devt, 1);
+ rc = cdev_device_add(cdev, dev);
if (rc) {
dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
- idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
- return rc;
+ goto err;
}
- init_waitqueue_head(&idxd_cdev->err_queue);
return 0;
+
+ err:
+ put_device(dev);
+ wq->idxd_cdev = NULL;
+ return rc;
}
void idxd_wq_del_cdev(struct idxd_wq *wq)
{
- idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
+ struct idxd_cdev *idxd_cdev;
+ struct idxd_cdev_context *cdev_ctx;
+
+ cdev_ctx = &ictx[wq->idxd->data->type];
+ idxd_cdev = wq->idxd_cdev;
+ wq->idxd_cdev = NULL;
+ cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
+ put_device(&idxd_cdev->dev);
}
int idxd_cdev_register(void)
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 31c819544a22..420b93fe5feb 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -19,7 +19,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
/* Interrupt control bits */
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
{
- struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
+ struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
pci_msi_mask_irq(data);
}
@@ -36,7 +36,7 @@ void idxd_mask_msix_vectors(struct idxd_device *idxd)
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
{
- struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
+ struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
pci_msi_unmask_irq(data);
}
@@ -47,6 +47,7 @@ void idxd_unmask_error_interrupts(struct idxd_device *idxd)
genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
genctrl.softerr_int_en = 1;
+ genctrl.halt_int_en = 1;
iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
}
@@ -56,6 +57,7 @@ void idxd_mask_error_interrupts(struct idxd_device *idxd)
genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
genctrl.softerr_int_en = 0;
+ genctrl.halt_int_en = 0;
iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
}
@@ -144,14 +146,8 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
if (rc < 0)
return rc;
- if (idxd->type == IDXD_TYPE_DSA)
- align = 32;
- else if (idxd->type == IDXD_TYPE_IAX)
- align = 64;
- else
- return -ENODEV;
-
- wq->compls_size = num_descs * idxd->compl_size + align;
+ align = idxd->data->align;
+ wq->compls_size = num_descs * idxd->data->compl_size + align;
wq->compls_raw = dma_alloc_coherent(dev, wq->compls_size,
&wq->compls_addr_raw, GFP_KERNEL);
if (!wq->compls_raw) {
@@ -178,16 +174,14 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
struct idxd_desc *desc = wq->descs[i];
desc->hw = wq->hw_descs[i];
- if (idxd->type == IDXD_TYPE_DSA)
+ if (idxd->data->type == IDXD_TYPE_DSA)
desc->completion = &wq->compls[i];
- else if (idxd->type == IDXD_TYPE_IAX)
+ else if (idxd->data->type == IDXD_TYPE_IAX)
desc->iax_completion = &wq->iax_compls[i];
- desc->compl_dma = wq->compls_addr + idxd->compl_size * i;
+ desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i;
desc->id = i;
desc->wq = wq;
desc->cpu = -1;
- dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
- desc->txd.tx_submit = idxd_dma_tx_submit;
}
return 0;
@@ -320,6 +314,19 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
struct device *dev = &wq->idxd->pdev->dev;
devm_iounmap(dev, wq->portal);
+ wq->portal = NULL;
+}
+
+void idxd_wqs_unmap_portal(struct idxd_device *idxd)
+{
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = idxd->wqs[i];
+
+ if (wq->portal)
+ idxd_wq_unmap_portal(wq);
+ }
}
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
@@ -392,6 +399,32 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
memset(wq->name, 0, WQ_NAME_SIZE);
}
+static void idxd_wq_ref_release(struct percpu_ref *ref)
+{
+ struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active);
+
+ complete(&wq->wq_dead);
+}
+
+int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
+{
+ int rc;
+
+ memset(&wq->wq_active, 0, sizeof(wq->wq_active));
+ rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 0, GFP_KERNEL);
+ if (rc < 0)
+ return rc;
+ reinit_completion(&wq->wq_dead);
+ return 0;
+}
+
+void idxd_wq_quiesce(struct idxd_wq *wq)
+{
+ percpu_ref_kill(&wq->wq_active);
+ wait_for_completion(&wq->wq_dead);
+ percpu_ref_exit(&wq->wq_active);
+}
+
/* Device control bits */
static inline bool idxd_is_enabled(struct idxd_device *idxd)
{
@@ -432,13 +465,13 @@ int idxd_device_init_reset(struct idxd_device *idxd)
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = IDXD_CMD_RESET_DEVICE;
dev_dbg(dev, "%s: sending reset for init.\n", __func__);
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock_irqsave(&idxd->cmd_lock, flags);
iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
IDXD_CMDSTS_ACTIVE)
cpu_relax();
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock_irqrestore(&idxd->cmd_lock, flags);
return 0;
}
@@ -451,7 +484,8 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
if (idxd_device_is_halted(idxd)) {
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
- *status = IDXD_CMDSTS_HW_ERR;
+ if (status)
+ *status = IDXD_CMDSTS_HW_ERR;
return;
}
@@ -460,10 +494,10 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
cmd.operand = operand;
cmd.int_req = 1;
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock_irqsave(&idxd->cmd_lock, flags);
wait_event_lock_irq(idxd->cmd_waitq,
!test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
- idxd->dev_lock);
+ idxd->cmd_lock);
dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
__func__, cmd_code, operand);
@@ -477,9 +511,9 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
* After command submitted, release lock and go to sleep until
* the command completes via interrupt.
*/
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock_irqrestore(&idxd->cmd_lock, flags);
wait_for_completion(&done);
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock_irqsave(&idxd->cmd_lock, flags);
if (status) {
*status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
idxd->cmd_status = *status & GENMASK(7, 0);
@@ -488,7 +522,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
__clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
/* Wake up other pending commands */
wake_up(&idxd->cmd_waitq);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock_irqrestore(&idxd->cmd_lock, flags);
}
int idxd_device_enable(struct idxd_device *idxd)
@@ -521,7 +555,7 @@ void idxd_device_wqs_clear_state(struct idxd_device *idxd)
lockdep_assert_held(&idxd->dev_lock);
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (wq->state == IDXD_WQ_ENABLED) {
idxd_wq_disable_cleanup(wq);
@@ -579,6 +613,77 @@ void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
dev_dbg(dev, "pasid %d drained\n", pasid);
}
+int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
+ enum idxd_interrupt_type irq_type)
+{
+ struct device *dev = &idxd->pdev->dev;
+ u32 operand, status;
+
+ if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)))
+ return -EOPNOTSUPP;
+
+ dev_dbg(dev, "get int handle, idx %d\n", idx);
+
+ operand = idx & GENMASK(15, 0);
+ if (irq_type == IDXD_IRQ_IMS)
+ operand |= CMD_INT_HANDLE_IMS;
+
+ dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand);
+
+ idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status);
+
+ if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
+ dev_dbg(dev, "request int handle failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0);
+
+ dev_dbg(dev, "int handle acquired: %u\n", *handle);
+ return 0;
+}
+
+int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
+ enum idxd_interrupt_type irq_type)
+{
+ struct device *dev = &idxd->pdev->dev;
+ u32 operand, status;
+ union idxd_command_reg cmd;
+ unsigned long flags;
+
+ if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
+ return -EOPNOTSUPP;
+
+ dev_dbg(dev, "release int handle, handle %d\n", handle);
+
+ memset(&cmd, 0, sizeof(cmd));
+ operand = handle & GENMASK(15, 0);
+
+ if (irq_type == IDXD_IRQ_IMS)
+ operand |= CMD_INT_HANDLE_IMS;
+
+ cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE;
+ cmd.operand = operand;
+
+ dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
+
+ spin_lock_irqsave(&idxd->cmd_lock, flags);
+ iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
+
+ while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
+ cpu_relax();
+ status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+
+ if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
+ dev_dbg(dev, "release int handle failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ dev_dbg(dev, "int handle released.\n");
+ return 0;
+}
+
/* Device configuration bits */
void idxd_msix_perm_setup(struct idxd_device *idxd)
{
@@ -660,7 +765,7 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = &idxd->groups[i];
+ struct idxd_group *group = idxd->groups[i];
idxd_group_config_write(group);
}
@@ -739,7 +844,7 @@ static int idxd_wqs_config_write(struct idxd_device *idxd)
int i, rc;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
rc = idxd_wq_config_write(wq);
if (rc < 0)
@@ -755,7 +860,7 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
/* TC-A 0 and TC-B 1 should be defaults */
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = &idxd->groups[i];
+ struct idxd_group *group = idxd->groups[i];
if (group->tc_a == -1)
group->tc_a = group->grpcfg.flags.tc_a = 0;
@@ -782,12 +887,12 @@ static int idxd_engines_setup(struct idxd_device *idxd)
struct idxd_group *group;
for (i = 0; i < idxd->max_groups; i++) {
- group = &idxd->groups[i];
+ group = idxd->groups[i];
group->grpcfg.engines = 0;
}
for (i = 0; i < idxd->max_engines; i++) {
- eng = &idxd->engines[i];
+ eng = idxd->engines[i];
group = eng->group;
if (!group)
@@ -811,13 +916,13 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
struct device *dev = &idxd->pdev->dev;
for (i = 0; i < idxd->max_groups; i++) {
- group = &idxd->groups[i];
+ group = idxd->groups[i];
for (j = 0; j < 4; j++)
group->grpcfg.wqs[j] = 0;
}
for (i = 0; i < idxd->max_wqs; i++) {
- wq = &idxd->wqs[i];
+ wq = idxd->wqs[i];
group = wq->group;
if (!wq->group)
@@ -865,3 +970,119 @@ int idxd_device_config(struct idxd_device *idxd)
return 0;
}
+
+static int idxd_wq_load_config(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ int wqcfg_offset;
+ int i;
+
+ wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
+ memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
+
+ wq->size = wq->wqcfg->wq_size;
+ wq->threshold = wq->wqcfg->wq_thresh;
+ if (wq->wqcfg->priv)
+ wq->type = IDXD_WQT_KERNEL;
+
+ /* The driver does not support shared WQ mode in read-only config yet */
+ if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
+ return -EOPNOTSUPP;
+
+ set_bit(WQ_FLAG_DEDICATED, &wq->flags);
+
+ wq->priority = wq->wqcfg->priority;
+
+ for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
+ wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
+ dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
+ }
+
+ return 0;
+}
+
+static void idxd_group_load_config(struct idxd_group *group)
+{
+ struct idxd_device *idxd = group->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ int i, j, grpcfg_offset;
+
+ /*
+ * Load WQS bit fields
+ * Iterate through all 256 bits 64 bits at a time
+ */
+ for (i = 0; i < GRPWQCFG_STRIDES; i++) {
+ struct idxd_wq *wq;
+
+ grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
+ group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
+ group->id, i, grpcfg_offset, group->grpcfg.wqs[i]);
+
+ if (i * 64 >= idxd->max_wqs)
+ break;
+
+ /* Iterate through all 64 bits and check for wq set */
+ for (j = 0; j < 64; j++) {
+ int id = i * 64 + j;
+
+ /* No need to check beyond max wqs */
+ if (id >= idxd->max_wqs)
+ break;
+
+ /* Set group assignment for wq if wq bit is set */
+ if (group->grpcfg.wqs[i] & BIT(j)) {
+ wq = idxd->wqs[id];
+ wq->group = group;
+ }
+ }
+ }
+
+ grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
+ group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
+ grpcfg_offset, group->grpcfg.engines);
+
+ /* Iterate through all 64 bits to check engines set */
+ for (i = 0; i < 64; i++) {
+ if (i >= idxd->max_engines)
+ break;
+
+ if (group->grpcfg.engines & BIT(i)) {
+ struct idxd_engine *engine = idxd->engines[i];
+
+ engine->group = group;
+ }
+ }
+
+ grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
+ group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
+ group->id, grpcfg_offset, group->grpcfg.flags.bits);
+}
+
+int idxd_device_load_config(struct idxd_device *idxd)
+{
+ union gencfg_reg reg;
+ int i, rc;
+
+ reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+ idxd->token_limit = reg.token_limit;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = idxd->groups[i];
+
+ idxd_group_load_config(group);
+ }
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = idxd->wqs[i];
+
+ rc = idxd_wq_load_config(wq);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index a15e50126434..77439b645044 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -14,7 +14,10 @@
static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
{
- return container_of(c, struct idxd_wq, dma_chan);
+ struct idxd_dma_chan *idxd_chan;
+
+ idxd_chan = container_of(c, struct idxd_dma_chan, chan);
+ return idxd_chan->wq;
}
void idxd_dma_complete_txd(struct idxd_desc *desc,
@@ -135,7 +138,7 @@ static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
{
}
-dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct dma_chan *c = tx->chan;
struct idxd_wq *wq = to_idxd_wq(c);
@@ -156,14 +159,25 @@ dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
static void idxd_dma_release(struct dma_device *device)
{
+ struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
+
+ kfree(idxd_dma);
}
int idxd_register_dma_device(struct idxd_device *idxd)
{
- struct dma_device *dma = &idxd->dma_dev;
+ struct idxd_dma_dev *idxd_dma;
+ struct dma_device *dma;
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+ idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
+ if (!idxd_dma)
+ return -ENOMEM;
+
+ dma = &idxd_dma->dma;
INIT_LIST_HEAD(&dma->channels);
- dma->dev = &idxd->pdev->dev;
+ dma->dev = dev;
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
@@ -179,35 +193,72 @@ int idxd_register_dma_device(struct idxd_device *idxd)
dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
dma->device_free_chan_resources = idxd_dma_free_chan_resources;
- return dma_async_device_register(&idxd->dma_dev);
+ rc = dma_async_device_register(dma);
+ if (rc < 0) {
+ kfree(idxd_dma);
+ return rc;
+ }
+
+ idxd_dma->idxd = idxd;
+ /*
+ * This pointer is protected by the refs taken by the dma_chan. It will remain valid
+ * as long as there are outstanding channels.
+ */
+ idxd->idxd_dma = idxd_dma;
+ return 0;
}
void idxd_unregister_dma_device(struct idxd_device *idxd)
{
- dma_async_device_unregister(&idxd->dma_dev);
+ dma_async_device_unregister(&idxd->idxd_dma->dma);
}
int idxd_register_dma_channel(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
- struct dma_device *dma = &idxd->dma_dev;
- struct dma_chan *chan = &wq->dma_chan;
- int rc;
+ struct dma_device *dma = &idxd->idxd_dma->dma;
+ struct device *dev = &idxd->pdev->dev;
+ struct idxd_dma_chan *idxd_chan;
+ struct dma_chan *chan;
+ int rc, i;
+
+ idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
+ if (!idxd_chan)
+ return -ENOMEM;
- memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
+ chan = &idxd_chan->chan;
chan->device = dma;
list_add_tail(&chan->device_node, &dma->channels);
+
+ for (i = 0; i < wq->num_descs; i++) {
+ struct idxd_desc *desc = wq->descs[i];
+
+ dma_async_tx_descriptor_init(&desc->txd, chan);
+ desc->txd.tx_submit = idxd_dma_tx_submit;
+ }
+
rc = dma_async_device_channel_register(dma, chan);
- if (rc < 0)
+ if (rc < 0) {
+ kfree(idxd_chan);
return rc;
+ }
+
+ wq->idxd_chan = idxd_chan;
+ idxd_chan->wq = wq;
+ get_device(&wq->conf_dev);
return 0;
}
void idxd_unregister_dma_channel(struct idxd_wq *wq)
{
- struct dma_chan *chan = &wq->dma_chan;
+ struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
+ struct dma_chan *chan = &idxd_chan->chan;
+ struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
- dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
+ dma_async_device_channel_unregister(&idxd_dma->dma, chan);
list_del(&chan->device_node);
+ kfree(wq->idxd_chan);
+ wq->idxd_chan = NULL;
+ put_device(&wq->conf_dev);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 76014c14f473..26482c7d4c3a 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -8,12 +8,18 @@
#include <linux/percpu-rwsem.h>
#include <linux/wait.h>
#include <linux/cdev.h>
+#include <linux/idr.h>
+#include <linux/pci.h>
+#include <linux/perf_event.h>
#include "registers.h"
#define IDXD_DRIVER_VERSION "1.00"
extern struct kmem_cache *idxd_desc_pool;
+struct idxd_device;
+struct idxd_wq;
+
#define IDXD_REG_TIMEOUT 50
#define IDXD_DRAIN_TIMEOUT 5000
@@ -25,6 +31,7 @@ enum idxd_type {
};
#define IDXD_NAME_SIZE 128
+#define IDXD_PMU_EVENT_MAX 64
struct idxd_device_driver {
struct device_driver drv;
@@ -33,6 +40,7 @@ struct idxd_device_driver {
struct idxd_irq_entry {
struct idxd_device *idxd;
int id;
+ int vector;
struct llist_head pending_llist;
struct list_head work_list;
/*
@@ -56,6 +64,31 @@ struct idxd_group {
int tc_b;
};
+struct idxd_pmu {
+ struct idxd_device *idxd;
+
+ struct perf_event *event_list[IDXD_PMU_EVENT_MAX];
+ int n_events;
+
+ DECLARE_BITMAP(used_mask, IDXD_PMU_EVENT_MAX);
+
+ struct pmu pmu;
+ char name[IDXD_NAME_SIZE];
+ int cpu;
+
+ int n_counters;
+ int counter_width;
+ int n_event_categories;
+
+ bool per_counter_caps_supported;
+ unsigned long supported_event_categories;
+
+ unsigned long supported_filters;
+ int n_filters;
+
+ struct hlist_node cpuhp_node;
+};
+
#define IDXD_MAX_PRIORITY 0xf
enum idxd_wq_state {
@@ -75,10 +108,10 @@ enum idxd_wq_type {
};
struct idxd_cdev {
+ struct idxd_wq *wq;
struct cdev cdev;
- struct device *dev;
+ struct device dev;
int minor;
- struct wait_queue_head err_queue;
};
#define IDXD_ALLOCATED_BATCH_SIZE 128U
@@ -96,10 +129,18 @@ enum idxd_complete_type {
IDXD_COMPLETE_DEV_FAIL,
};
+struct idxd_dma_chan {
+ struct dma_chan chan;
+ struct idxd_wq *wq;
+};
+
struct idxd_wq {
void __iomem *portal;
+ struct percpu_ref wq_active;
+ struct completion wq_dead;
struct device conf_dev;
- struct idxd_cdev idxd_cdev;
+ struct idxd_cdev *idxd_cdev;
+ struct wait_queue_head err_queue;
struct idxd_device *idxd;
int id;
enum idxd_wq_type type;
@@ -125,7 +166,7 @@ struct idxd_wq {
int compls_size;
struct idxd_desc **descs;
struct sbitmap_queue sbq;
- struct dma_chan dma_chan;
+ struct idxd_dma_chan *idxd_chan;
char name[WQ_NAME_SIZE + 1];
u64 max_xfer_bytes;
u32 max_batch_size;
@@ -147,6 +188,7 @@ struct idxd_hw {
union group_cap_reg group_cap;
union engine_cap_reg engine_cap;
struct opcap opcap;
+ u32 cmd_cap;
};
enum idxd_device_state {
@@ -162,9 +204,22 @@ enum idxd_device_flag {
IDXD_FLAG_PASID_ENABLED,
};
-struct idxd_device {
+struct idxd_dma_dev {
+ struct idxd_device *idxd;
+ struct dma_device dma;
+};
+
+struct idxd_driver_data {
+ const char *name_prefix;
enum idxd_type type;
+ struct device_type *dev_type;
+ int compl_size;
+ int align;
+};
+
+struct idxd_device {
struct device conf_dev;
+ struct idxd_driver_data *data;
struct list_head list;
struct idxd_hw hw;
enum idxd_device_state state;
@@ -177,10 +232,11 @@ struct idxd_device {
void __iomem *reg_base;
spinlock_t dev_lock; /* spinlock for device */
+ spinlock_t cmd_lock; /* spinlock for device commands */
struct completion *cmd_done;
- struct idxd_group *groups;
- struct idxd_wq *wqs;
- struct idxd_engine *engines;
+ struct idxd_group **groups;
+ struct idxd_wq **wqs;
+ struct idxd_engine **engines;
struct iommu_sva *sva;
unsigned int pasid;
@@ -202,17 +258,19 @@ struct idxd_device {
int token_limit;
int nr_tokens; /* non-reserved tokens */
unsigned int wqcfg_size;
- int compl_size;
union sw_err_reg sw_err;
wait_queue_head_t cmd_waitq;
- struct msix_entry *msix_entries;
int num_wq_irqs;
struct idxd_irq_entry *irq_entries;
- struct dma_device dma_dev;
+ struct idxd_dma_dev *idxd_dma;
struct workqueue_struct *wq;
struct work_struct work;
+
+ int *int_handles;
+
+ struct idxd_pmu *idxd_pmu;
};
/* IDXD software descriptor */
@@ -232,6 +290,7 @@ struct idxd_desc {
struct list_head list;
int id;
int cpu;
+ unsigned int vector;
struct idxd_wq *wq;
};
@@ -242,6 +301,44 @@ extern struct bus_type dsa_bus_type;
extern struct bus_type iax_bus_type;
extern bool support_enqcmd;
+extern struct ida idxd_ida;
+extern struct device_type dsa_device_type;
+extern struct device_type iax_device_type;
+extern struct device_type idxd_wq_device_type;
+extern struct device_type idxd_engine_device_type;
+extern struct device_type idxd_group_device_type;
+
+static inline bool is_dsa_dev(struct device *dev)
+{
+ return dev->type == &dsa_device_type;
+}
+
+static inline bool is_iax_dev(struct device *dev)
+{
+ return dev->type == &iax_device_type;
+}
+
+static inline bool is_idxd_dev(struct device *dev)
+{
+ return is_dsa_dev(dev) || is_iax_dev(dev);
+}
+
+static inline bool is_idxd_wq_dev(struct device *dev)
+{
+ return dev->type == &idxd_wq_device_type;
+}
+
+static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
+{
+ if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
+ return true;
+ return false;
+}
+
+static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
+{
+ return wq->type == IDXD_WQT_USER;
+}
static inline bool wq_dedicated(struct idxd_wq *wq)
{
@@ -268,6 +365,11 @@ enum idxd_portal_prot {
IDXD_PORTAL_LIMITED,
};
+enum idxd_interrupt_type {
+ IDXD_IRQ_MSIX = 0,
+ IDXD_IRQ_IMS,
+};
+
static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
{
return prot * 0x1000;
@@ -279,18 +381,6 @@ static inline int idxd_get_wq_portal_full_offset(int wq_id,
return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
}
-static inline void idxd_set_type(struct idxd_device *idxd)
-{
- struct pci_dev *pdev = idxd->pdev;
-
- if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
- idxd->type = IDXD_TYPE_DSA;
- else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
- idxd->type = IDXD_TYPE_IAX;
- else
- idxd->type = IDXD_TYPE_UNKNOWN;
-}
-
static inline void idxd_wq_get(struct idxd_wq *wq)
{
wq->client_count++;
@@ -306,19 +396,17 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
return wq->client_count;
};
-const char *idxd_get_dev_name(struct idxd_device *idxd);
int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
-int idxd_setup_sysfs(struct idxd_device *idxd);
-void idxd_cleanup_sysfs(struct idxd_device *idxd);
+int idxd_register_devices(struct idxd_device *idxd);
+void idxd_unregister_devices(struct idxd_device *idxd);
int idxd_register_driver(void);
void idxd_unregister_driver(void);
-struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
+void idxd_wqs_quiesce(struct idxd_device *idxd);
/* device interrupt control */
void idxd_msix_perm_setup(struct idxd_device *idxd);
void idxd_msix_perm_clear(struct idxd_device *idxd);
-irqreturn_t idxd_irq_handler(int vec, void *data);
irqreturn_t idxd_misc_thread(int vec, void *data);
irqreturn_t idxd_wq_thread(int irq, void *data);
void idxd_mask_error_interrupts(struct idxd_device *idxd);
@@ -336,8 +424,14 @@ void idxd_device_cleanup(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd);
void idxd_device_wqs_clear_state(struct idxd_device *idxd);
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
+int idxd_device_load_config(struct idxd_device *idxd);
+int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
+ enum idxd_interrupt_type irq_type);
+int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
+ enum idxd_interrupt_type irq_type);
/* work queue control */
+void idxd_wqs_unmap_portal(struct idxd_device *idxd);
int idxd_wq_alloc_resources(struct idxd_wq *wq);
void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
@@ -349,6 +443,8 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq);
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
+void idxd_wq_quiesce(struct idxd_wq *wq);
+int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
@@ -363,7 +459,6 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type);
-dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
/* cdev */
int idxd_cdev_register(void);
@@ -372,4 +467,19 @@ int idxd_cdev_get_major(struct idxd_device *idxd);
int idxd_wq_add_cdev(struct idxd_wq *wq);
void idxd_wq_del_cdev(struct idxd_wq *wq);
+/* perfmon */
+#if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)
+int perfmon_pmu_init(struct idxd_device *idxd);
+void perfmon_pmu_remove(struct idxd_device *idxd);
+void perfmon_counter_overflow(struct idxd_device *idxd);
+void perfmon_init(void);
+void perfmon_exit(void);
+#else
+static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; }
+static inline void perfmon_pmu_remove(struct idxd_device *idxd) {}
+static inline void perfmon_counter_overflow(struct idxd_device *idxd) {}
+static inline void perfmon_init(void) {}
+static inline void perfmon_exit(void) {}
+#endif
+
#endif
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 6584b0ec07d5..2a926bef87f2 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -21,6 +21,7 @@
#include "../dmaengine.h"
#include "registers.h"
#include "idxd.h"
+#include "perfmon.h"
MODULE_VERSION(IDXD_DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
@@ -33,35 +34,39 @@ MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
#define DRV_NAME "idxd"
bool support_enqcmd;
-
-static struct idr idxd_idrs[IDXD_TYPE_MAX];
-static DEFINE_MUTEX(idxd_idr_lock);
+DEFINE_IDA(idxd_ida);
+
+static struct idxd_driver_data idxd_driver_data[] = {
+ [IDXD_TYPE_DSA] = {
+ .name_prefix = "dsa",
+ .type = IDXD_TYPE_DSA,
+ .compl_size = sizeof(struct dsa_completion_record),
+ .align = 32,
+ .dev_type = &dsa_device_type,
+ },
+ [IDXD_TYPE_IAX] = {
+ .name_prefix = "iax",
+ .type = IDXD_TYPE_IAX,
+ .compl_size = sizeof(struct iax_completion_record),
+ .align = 64,
+ .dev_type = &iax_device_type,
+ },
+};
static struct pci_device_id idxd_pci_tbl[] = {
/* DSA ver 1.0 platforms */
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
+ { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
/* IAX ver 1.0 platforms */
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IAX_SPR0) },
+ { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
-static char *idxd_name[] = {
- "dsa",
- "iax"
-};
-
-const char *idxd_get_dev_name(struct idxd_device *idxd)
-{
- return idxd_name[idxd->type];
-}
-
static int idxd_setup_interrupts(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
struct device *dev = &pdev->dev;
- struct msix_entry *msix;
struct idxd_irq_entry *irq_entry;
int i, msixcnt;
int rc = 0;
@@ -69,23 +74,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
msixcnt = pci_msix_vec_count(pdev);
if (msixcnt < 0) {
dev_err(dev, "Not MSI-X interrupt capable.\n");
- goto err_no_irq;
+ return -ENOSPC;
}
- idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
- msixcnt, GFP_KERNEL);
- if (!idxd->msix_entries) {
- rc = -ENOMEM;
- goto err_no_irq;
- }
-
- for (i = 0; i < msixcnt; i++)
- idxd->msix_entries[i].entry = i;
-
- rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
- if (rc) {
- dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
- goto err_no_irq;
+ rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
+ if (rc != msixcnt) {
+ dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
+ return -ENOSPC;
}
dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
@@ -93,119 +88,266 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
* We implement 1 completion list per MSI-X entry except for
* entry 0, which is for errors and others.
*/
- idxd->irq_entries = devm_kcalloc(dev, msixcnt,
- sizeof(struct idxd_irq_entry),
- GFP_KERNEL);
+ idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
+ GFP_KERNEL, dev_to_node(dev));
if (!idxd->irq_entries) {
rc = -ENOMEM;
- goto err_no_irq;
+ goto err_irq_entries;
}
for (i = 0; i < msixcnt; i++) {
idxd->irq_entries[i].id = i;
idxd->irq_entries[i].idxd = idxd;
+ idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
spin_lock_init(&idxd->irq_entries[i].list_lock);
}
- msix = &idxd->msix_entries[0];
irq_entry = &idxd->irq_entries[0];
- rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
- idxd_misc_thread, 0, "idxd-misc",
- irq_entry);
+ rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
+ 0, "idxd-misc", irq_entry);
if (rc < 0) {
dev_err(dev, "Failed to allocate misc interrupt.\n");
- goto err_no_irq;
+ goto err_misc_irq;
}
- dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
- msix->vector);
+ dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
/* first MSI-X entry is not for wq interrupts */
idxd->num_wq_irqs = msixcnt - 1;
for (i = 1; i < msixcnt; i++) {
- msix = &idxd->msix_entries[i];
irq_entry = &idxd->irq_entries[i];
init_llist_head(&idxd->irq_entries[i].pending_llist);
INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
- rc = devm_request_threaded_irq(dev, msix->vector,
- idxd_irq_handler,
- idxd_wq_thread, 0,
- "idxd-portal", irq_entry);
+ rc = request_threaded_irq(irq_entry->vector, NULL,
+ idxd_wq_thread, 0, "idxd-portal", irq_entry);
if (rc < 0) {
- dev_err(dev, "Failed to allocate irq %d.\n",
- msix->vector);
- goto err_no_irq;
+ dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
+ goto err_wq_irqs;
+ }
+
+ dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
+ if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
+ /*
+ * The MSIX vector enumeration starts at 1 with vector 0 being the
+ * misc interrupt that handles non I/O completion events. The
+ * interrupt handles are for IMS enumeration on guest. The misc
+ * interrupt vector does not require a handle and therefore we start
+ * the int_handles at index 0. Since 'i' starts at 1, the first
+ * int_handles index will be 0.
+ */
+ rc = idxd_device_request_int_handle(idxd, i, &idxd->int_handles[i - 1],
+ IDXD_IRQ_MSIX);
+ if (rc < 0) {
+ free_irq(irq_entry->vector, irq_entry);
+ goto err_wq_irqs;
+ }
+ dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i - 1]);
}
- dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
- i, msix->vector);
}
idxd_unmask_error_interrupts(idxd);
idxd_msix_perm_setup(idxd);
return 0;
- err_no_irq:
+ err_wq_irqs:
+ while (--i >= 0) {
+ irq_entry = &idxd->irq_entries[i];
+ free_irq(irq_entry->vector, irq_entry);
+ if (i != 0)
+ idxd_device_release_int_handle(idxd,
+ idxd->int_handles[i], IDXD_IRQ_MSIX);
+ }
+ err_misc_irq:
/* Disable error interrupt generation */
idxd_mask_error_interrupts(idxd);
- pci_disable_msix(pdev);
+ err_irq_entries:
+ pci_free_irq_vectors(pdev);
dev_err(dev, "No usable interrupts\n");
return rc;
}
-static int idxd_setup_internals(struct idxd_device *idxd)
+static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
- int i;
+ struct idxd_wq *wq;
+ int i, rc;
- init_waitqueue_head(&idxd->cmd_waitq);
- idxd->groups = devm_kcalloc(dev, idxd->max_groups,
- sizeof(struct idxd_group), GFP_KERNEL);
- if (!idxd->groups)
- return -ENOMEM;
-
- for (i = 0; i < idxd->max_groups; i++) {
- idxd->groups[i].idxd = idxd;
- idxd->groups[i].id = i;
- idxd->groups[i].tc_a = -1;
- idxd->groups[i].tc_b = -1;
- }
-
- idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
- GFP_KERNEL);
+ idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
+ GFP_KERNEL, dev_to_node(dev));
if (!idxd->wqs)
return -ENOMEM;
- idxd->engines = devm_kcalloc(dev, idxd->max_engines,
- sizeof(struct idxd_engine), GFP_KERNEL);
- if (!idxd->engines)
- return -ENOMEM;
-
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
+ if (!wq) {
+ rc = -ENOMEM;
+ goto err;
+ }
wq->id = i;
wq->idxd = idxd;
+ device_initialize(&wq->conf_dev);
+ wq->conf_dev.parent = &idxd->conf_dev;
+ wq->conf_dev.bus = &dsa_bus_type;
+ wq->conf_dev.type = &idxd_wq_device_type;
+ rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
+ if (rc < 0) {
+ put_device(&wq->conf_dev);
+ goto err;
+ }
+
mutex_init(&wq->wq_lock);
- wq->idxd_cdev.minor = -1;
+ init_waitqueue_head(&wq->err_queue);
+ init_completion(&wq->wq_dead);
wq->max_xfer_bytes = idxd->max_xfer_bytes;
wq->max_batch_size = idxd->max_batch_size;
- wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
- if (!wq->wqcfg)
- return -ENOMEM;
+ wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
+ if (!wq->wqcfg) {
+ put_device(&wq->conf_dev);
+ rc = -ENOMEM;
+ goto err;
+ }
+ idxd->wqs[i] = wq;
}
+ return 0;
+
+ err:
+ while (--i >= 0)
+ put_device(&idxd->wqs[i]->conf_dev);
+ return rc;
+}
+
+static int idxd_setup_engines(struct idxd_device *idxd)
+{
+ struct idxd_engine *engine;
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!idxd->engines)
+ return -ENOMEM;
+
for (i = 0; i < idxd->max_engines; i++) {
- idxd->engines[i].idxd = idxd;
- idxd->engines[i].id = i;
+ engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
+ if (!engine) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ engine->id = i;
+ engine->idxd = idxd;
+ device_initialize(&engine->conf_dev);
+ engine->conf_dev.parent = &idxd->conf_dev;
+ engine->conf_dev.type = &idxd_engine_device_type;
+ rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
+ if (rc < 0) {
+ put_device(&engine->conf_dev);
+ goto err;
+ }
+
+ idxd->engines[i] = engine;
}
- idxd->wq = create_workqueue(dev_name(dev));
- if (!idxd->wq)
+ return 0;
+
+ err:
+ while (--i >= 0)
+ put_device(&idxd->engines[i]->conf_dev);
+ return rc;
+}
+
+static int idxd_setup_groups(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ struct idxd_group *group;
+ int i, rc;
+
+ idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!idxd->groups)
return -ENOMEM;
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
+ if (!group) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ group->id = i;
+ group->idxd = idxd;
+ device_initialize(&group->conf_dev);
+ group->conf_dev.parent = &idxd->conf_dev;
+ group->conf_dev.bus = &dsa_bus_type;
+ group->conf_dev.type = &idxd_group_device_type;
+ rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
+ if (rc < 0) {
+ put_device(&group->conf_dev);
+ goto err;
+ }
+
+ idxd->groups[i] = group;
+ group->tc_a = -1;
+ group->tc_b = -1;
+ }
+
+ return 0;
+
+ err:
+ while (--i >= 0)
+ put_device(&idxd->groups[i]->conf_dev);
+ return rc;
+}
+
+static int idxd_setup_internals(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc, i;
+
+ init_waitqueue_head(&idxd->cmd_waitq);
+
+ if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
+ idxd->int_handles = devm_kcalloc(dev, idxd->max_wqs, sizeof(int), GFP_KERNEL);
+ if (!idxd->int_handles)
+ return -ENOMEM;
+ }
+
+ rc = idxd_setup_wqs(idxd);
+ if (rc < 0)
+ goto err_wqs;
+
+ rc = idxd_setup_engines(idxd);
+ if (rc < 0)
+ goto err_engine;
+
+ rc = idxd_setup_groups(idxd);
+ if (rc < 0)
+ goto err_group;
+
+ idxd->wq = create_workqueue(dev_name(dev));
+ if (!idxd->wq) {
+ rc = -ENOMEM;
+ goto err_wkq_create;
+ }
+
return 0;
+
+ err_wkq_create:
+ for (i = 0; i < idxd->max_groups; i++)
+ put_device(&idxd->groups[i]->conf_dev);
+ err_group:
+ for (i = 0; i < idxd->max_engines; i++)
+ put_device(&idxd->engines[i]->conf_dev);
+ err_engine:
+ for (i = 0; i < idxd->max_wqs; i++)
+ put_device(&idxd->wqs[i]->conf_dev);
+ err_wqs:
+ kfree(idxd->int_handles);
+ return rc;
}
static void idxd_read_table_offsets(struct idxd_device *idxd)
@@ -233,6 +375,12 @@ static void idxd_read_caps(struct idxd_device *idxd)
/* reading generic capabilities */
idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
+
+ if (idxd->hw.gen_cap.cmd_cap) {
+ idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET);
+ dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
+ }
+
idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
@@ -275,17 +423,34 @@ static void idxd_read_caps(struct idxd_device *idxd)
}
}
-static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
+static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
{
struct device *dev = &pdev->dev;
struct idxd_device *idxd;
+ int rc;
- idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
+ idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
if (!idxd)
return NULL;
idxd->pdev = pdev;
+ idxd->data = data;
+ idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
+ if (idxd->id < 0)
+ return NULL;
+
+ device_initialize(&idxd->conf_dev);
+ idxd->conf_dev.parent = dev;
+ idxd->conf_dev.bus = &dsa_bus_type;
+ idxd->conf_dev.type = idxd->data->dev_type;
+ rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
+ if (rc < 0) {
+ put_device(&idxd->conf_dev);
+ return NULL;
+ }
+
spin_lock_init(&idxd->dev_lock);
+ spin_lock_init(&idxd->cmd_lock);
return idxd;
}
@@ -338,11 +503,18 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
- rc = idxd_enable_system_pasid(idxd);
- if (rc < 0)
- dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
- else
- set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+ rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA);
+ if (rc == 0) {
+ rc = idxd_enable_system_pasid(idxd);
+ if (rc < 0) {
+ iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+ dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
+ } else {
+ set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+ }
+ } else {
+ dev_warn(dev, "Unable to turn on SVA feature.\n");
+ }
} else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n");
}
@@ -352,80 +524,75 @@ static int idxd_probe(struct idxd_device *idxd)
rc = idxd_setup_internals(idxd);
if (rc)
- goto err_setup;
+ goto err;
+
+ /* If the configs are readonly, then load them from device */
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
+ dev_dbg(dev, "Loading RO device config\n");
+ rc = idxd_device_load_config(idxd);
+ if (rc < 0)
+ goto err;
+ }
rc = idxd_setup_interrupts(idxd);
if (rc)
- goto err_setup;
+ goto err;
dev_dbg(dev, "IDXD interrupt setup complete.\n");
- mutex_lock(&idxd_idr_lock);
- idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
- mutex_unlock(&idxd_idr_lock);
- if (idxd->id < 0) {
- rc = -ENOMEM;
- goto err_idr_fail;
- }
-
idxd->major = idxd_cdev_get_major(idxd);
+ rc = perfmon_pmu_init(idxd);
+ if (rc < 0)
+ dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc);
+
dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
return 0;
- err_idr_fail:
- idxd_mask_error_interrupts(idxd);
- idxd_mask_msix_vectors(idxd);
- err_setup:
+ err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
+ iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
return rc;
}
-static void idxd_type_init(struct idxd_device *idxd)
-{
- if (idxd->type == IDXD_TYPE_DSA)
- idxd->compl_size = sizeof(struct dsa_completion_record);
- else if (idxd->type == IDXD_TYPE_IAX)
- idxd->compl_size = sizeof(struct iax_completion_record);
-}
-
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
struct idxd_device *idxd;
+ struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data;
int rc;
- rc = pcim_enable_device(pdev);
+ rc = pci_enable_device(pdev);
if (rc)
return rc;
dev_dbg(dev, "Alloc IDXD context\n");
- idxd = idxd_alloc(pdev);
- if (!idxd)
- return -ENOMEM;
+ idxd = idxd_alloc(pdev, data);
+ if (!idxd) {
+ rc = -ENOMEM;
+ goto err_idxd_alloc;
+ }
dev_dbg(dev, "Mapping BARs\n");
- idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
- if (!idxd->reg_base)
- return -ENOMEM;
+ idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
+ if (!idxd->reg_base) {
+ rc = -ENOMEM;
+ goto err_iomap;
+ }
dev_dbg(dev, "Set DMA masks\n");
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc)
- return rc;
+ goto err;
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc)
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc)
- return rc;
-
- idxd_set_type(idxd);
-
- idxd_type_init(idxd);
+ goto err;
dev_dbg(dev, "Set PCI master\n");
pci_set_master(pdev);
@@ -435,13 +602,13 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rc = idxd_probe(idxd);
if (rc) {
dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
- return -ENODEV;
+ goto err;
}
- rc = idxd_setup_sysfs(idxd);
+ rc = idxd_register_devices(idxd);
if (rc) {
dev_err(dev, "IDXD sysfs setup failed\n");
- return -ENODEV;
+ goto err;
}
idxd->state = IDXD_DEV_CONF_READY;
@@ -450,6 +617,14 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
idxd->hw.version);
return 0;
+
+ err:
+ pci_iounmap(pdev, idxd->reg_base);
+ err_iomap:
+ put_device(&idxd->conf_dev);
+ err_idxd_alloc:
+ pci_disable_device(pdev);
+ return rc;
}
static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
@@ -478,6 +653,36 @@ static void idxd_flush_work_list(struct idxd_irq_entry *ie)
}
}
+void idxd_wqs_quiesce(struct idxd_device *idxd)
+{
+ struct idxd_wq *wq;
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = idxd->wqs[i];
+ if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL)
+ idxd_wq_quiesce(wq);
+ }
+}
+
+static void idxd_release_int_handles(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ for (i = 0; i < idxd->num_wq_irqs; i++) {
+ if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) {
+ rc = idxd_device_release_int_handle(idxd, idxd->int_handles[i],
+ IDXD_IRQ_MSIX);
+ if (rc < 0)
+ dev_warn(dev, "irq handle %d release failed\n",
+ idxd->int_handles[i]);
+ else
+ dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i]);
+ }
+ }
+}
+
static void idxd_shutdown(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
@@ -495,7 +700,8 @@ static void idxd_shutdown(struct pci_dev *pdev)
for (i = 0; i < msixcnt; i++) {
irq_entry = &idxd->irq_entries[i];
- synchronize_irq(idxd->msix_entries[i].vector);
+ synchronize_irq(irq_entry->vector);
+ free_irq(irq_entry->vector, irq_entry);
if (i == 0)
continue;
idxd_flush_pending_llist(irq_entry);
@@ -503,6 +709,10 @@ static void idxd_shutdown(struct pci_dev *pdev)
}
idxd_msix_perm_clear(idxd);
+ idxd_release_int_handles(idxd);
+ pci_free_irq_vectors(pdev);
+ pci_iounmap(pdev, idxd->reg_base);
+ pci_disable_device(pdev);
destroy_workqueue(idxd->wq);
}
@@ -511,13 +721,12 @@ static void idxd_remove(struct pci_dev *pdev)
struct idxd_device *idxd = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s called\n", __func__);
- idxd_cleanup_sysfs(idxd);
idxd_shutdown(pdev);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- mutex_lock(&idxd_idr_lock);
- idr_remove(&idxd_idrs[idxd->type], idxd->id);
- mutex_unlock(&idxd_idr_lock);
+ idxd_unregister_devices(idxd);
+ perfmon_pmu_remove(idxd);
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
}
static struct pci_driver idxd_pci_driver = {
@@ -530,7 +739,7 @@ static struct pci_driver idxd_pci_driver = {
static int __init idxd_init_module(void)
{
- int err, i;
+ int err;
/*
* If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
@@ -546,8 +755,7 @@ static int __init idxd_init_module(void)
else
support_enqcmd = true;
- for (i = 0; i < IDXD_TYPE_MAX; i++)
- idr_init(&idxd_idrs[i]);
+ perfmon_init();
err = idxd_register_bus_type();
if (err < 0)
@@ -582,5 +790,6 @@ static void __exit idxd_exit_module(void)
pci_unregister_driver(&idxd_pci_driver);
idxd_cdev_remove();
idxd_unregister_bus_type();
+ perfmon_exit();
}
module_exit(idxd_exit_module);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index f1463fc58112..ae68e1e5487a 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -45,7 +45,7 @@ static void idxd_device_reinit(struct work_struct *work)
goto out;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (wq->state == IDXD_WQ_ENABLED) {
rc = idxd_wq_enable(wq);
@@ -102,15 +102,6 @@ static int idxd_device_schedule_fault_process(struct idxd_device *idxd,
return 0;
}
-irqreturn_t idxd_irq_handler(int vec, void *data)
-{
- struct idxd_irq_entry *irq_entry = data;
- struct idxd_device *idxd = irq_entry->idxd;
-
- idxd_mask_msix_vector(idxd, irq_entry->id);
- return IRQ_WAKE_THREAD;
-}
-
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
{
struct device *dev = &idxd->pdev->dev;
@@ -130,18 +121,18 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
int id = idxd->sw_err.wq_idx;
- struct idxd_wq *wq = &idxd->wqs[id];
+ struct idxd_wq *wq = idxd->wqs[id];
if (wq->type == IDXD_WQT_USER)
- wake_up_interruptible(&wq->idxd_cdev.err_queue);
+ wake_up_interruptible(&wq->err_queue);
} else {
int i;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (wq->type == IDXD_WQT_USER)
- wake_up_interruptible(&wq->idxd_cdev.err_queue);
+ wake_up_interruptible(&wq->err_queue);
}
}
@@ -165,11 +156,8 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
}
if (cause & IDXD_INTC_PERFMON_OVFL) {
- /*
- * Driver does not utilize perfmon counter overflow interrupt
- * yet.
- */
val |= IDXD_INTC_PERFMON_OVFL;
+ perfmon_counter_overflow(idxd);
}
val ^= cause;
@@ -202,6 +190,8 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
queue_work(idxd->wq, &idxd->work);
} else {
spin_lock_bh(&idxd->dev_lock);
+ idxd_wqs_quiesce(idxd);
+ idxd_wqs_unmap_portal(idxd);
idxd_device_wqs_clear_state(idxd);
dev_err(&idxd->pdev->dev,
"idxd halted, need %s.\n",
@@ -235,7 +225,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
}
- idxd_unmask_msix_vector(idxd, irq_entry->id);
return IRQ_HANDLED;
}
@@ -392,8 +381,6 @@ irqreturn_t idxd_wq_thread(int irq, void *data)
int processed;
processed = idxd_desc_process(irq_entry);
- idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
-
if (processed == 0)
return IRQ_NONE;
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
new file mode 100644
index 000000000000..d73004f47cf4
--- /dev/null
+++ b/drivers/dma/idxd/perfmon.c
@@ -0,0 +1,662 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2020 Intel Corporation. All rights rsvd. */
+
+#include <linux/sched/task.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include "idxd.h"
+#include "perfmon.h"
+
+static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
+
+static cpumask_t perfmon_dsa_cpu_mask;
+static bool cpuhp_set_up;
+static enum cpuhp_state cpuhp_slot;
+
+/*
+ * perf userspace reads this attribute to determine which cpus to open
+ * counters on. It's connected to perfmon_dsa_cpu_mask, which is
+ * maintained by the cpu hotplug handlers.
+ */
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *perfmon_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group cpumask_attr_group = {
+ .attrs = perfmon_cpumask_attrs,
+};
+
+/*
+ * These attributes specify the bits in the config word that the perf
+ * syscall uses to pass the event ids and categories to perfmon.
+ */
+DEFINE_PERFMON_FORMAT_ATTR(event_category, "config:0-3");
+DEFINE_PERFMON_FORMAT_ATTR(event, "config:4-31");
+
+/*
+ * These attributes specify the bits in the config1 word that the perf
+ * syscall uses to pass filter data to perfmon.
+ */
+DEFINE_PERFMON_FORMAT_ATTR(filter_wq, "config1:0-31");
+DEFINE_PERFMON_FORMAT_ATTR(filter_tc, "config1:32-39");
+DEFINE_PERFMON_FORMAT_ATTR(filter_pgsz, "config1:40-43");
+DEFINE_PERFMON_FORMAT_ATTR(filter_sz, "config1:44-51");
+DEFINE_PERFMON_FORMAT_ATTR(filter_eng, "config1:52-59");
+
+#define PERFMON_FILTERS_START 2
+#define PERFMON_FILTERS_MAX 5
+
+static struct attribute *perfmon_format_attrs[] = {
+ &format_attr_idxd_event_category.attr,
+ &format_attr_idxd_event.attr,
+ &format_attr_idxd_filter_wq.attr,
+ &format_attr_idxd_filter_tc.attr,
+ &format_attr_idxd_filter_pgsz.attr,
+ &format_attr_idxd_filter_sz.attr,
+ &format_attr_idxd_filter_eng.attr,
+ NULL,
+};
+
+static struct attribute_group perfmon_format_attr_group = {
+ .name = "format",
+ .attrs = perfmon_format_attrs,
+};
+
+static const struct attribute_group *perfmon_attr_groups[] = {
+ &perfmon_format_attr_group,
+ &cpumask_attr_group,
+ NULL,
+};
+
+static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return cpumap_print_to_pagebuf(true, buf, &perfmon_dsa_cpu_mask);
+}
+
+static bool is_idxd_event(struct idxd_pmu *idxd_pmu, struct perf_event *event)
+{
+ return &idxd_pmu->pmu == event->pmu;
+}
+
+static int perfmon_collect_events(struct idxd_pmu *idxd_pmu,
+ struct perf_event *leader,
+ bool do_grp)
+{
+ struct perf_event *event;
+ int n, max_count;
+
+ max_count = idxd_pmu->n_counters;
+ n = idxd_pmu->n_events;
+
+ if (n >= max_count)
+ return -EINVAL;
+
+ if (is_idxd_event(idxd_pmu, leader)) {
+ idxd_pmu->event_list[n] = leader;
+ idxd_pmu->event_list[n]->hw.idx = n;
+ n++;
+ }
+
+ if (!do_grp)
+ return n;
+
+ for_each_sibling_event(event, leader) {
+ if (!is_idxd_event(idxd_pmu, event) ||
+ event->state <= PERF_EVENT_STATE_OFF)
+ continue;
+
+ if (n >= max_count)
+ return -EINVAL;
+
+ idxd_pmu->event_list[n] = event;
+ idxd_pmu->event_list[n]->hw.idx = n;
+ n++;
+ }
+
+ return n;
+}
+
+static void perfmon_assign_hw_event(struct idxd_pmu *idxd_pmu,
+ struct perf_event *event, int idx)
+{
+ struct idxd_device *idxd = idxd_pmu->idxd;
+ struct hw_perf_event *hwc = &event->hw;
+
+ hwc->idx = idx;
+ hwc->config_base = ioread64(CNTRCFG_REG(idxd, idx));
+ hwc->event_base = ioread64(CNTRCFG_REG(idxd, idx));
+}
+
+static int perfmon_assign_event(struct idxd_pmu *idxd_pmu,
+ struct perf_event *event)
+{
+ int i;
+
+ for (i = 0; i < IDXD_PMU_EVENT_MAX; i++)
+ if (!test_and_set_bit(i, idxd_pmu->used_mask))
+ return i;
+
+ return -EINVAL;
+}
+
+/*
+ * Check whether there are enough counters to satisfy that all the
+ * events in the group can actually be scheduled at the same time.
+ *
+ * To do this, create a fake idxd_pmu object so the event collection
+ * and assignment functions can be used without affecting the internal
+ * state of the real idxd_pmu object.
+ */
+static int perfmon_validate_group(struct idxd_pmu *pmu,
+ struct perf_event *event)
+{
+ struct perf_event *leader = event->group_leader;
+ struct idxd_pmu *fake_pmu;
+ int i, ret = 0, n, idx;
+
+ fake_pmu = kzalloc(sizeof(*fake_pmu), GFP_KERNEL);
+ if (!fake_pmu)
+ return -ENOMEM;
+
+ fake_pmu->pmu.name = pmu->pmu.name;
+ fake_pmu->n_counters = pmu->n_counters;
+
+ n = perfmon_collect_events(fake_pmu, leader, true);
+ if (n < 0) {
+ ret = n;
+ goto out;
+ }
+
+ fake_pmu->n_events = n;
+ n = perfmon_collect_events(fake_pmu, event, false);
+ if (n < 0) {
+ ret = n;
+ goto out;
+ }
+
+ fake_pmu->n_events = n;
+
+ for (i = 0; i < n; i++) {
+ event = fake_pmu->event_list[i];
+
+ idx = perfmon_assign_event(fake_pmu, event);
+ if (idx < 0) {
+ ret = idx;
+ goto out;
+ }
+ }
+out:
+ kfree(fake_pmu);
+
+ return ret;
+}
+
+static int perfmon_pmu_event_init(struct perf_event *event)
+{
+ struct idxd_device *idxd;
+ int ret = 0;
+
+ idxd = event_to_idxd(event);
+ event->hw.idx = -1;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* sampling not supported */
+ if (event->attr.sample_period)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ if (event->pmu != &idxd->idxd_pmu->pmu)
+ return -EINVAL;
+
+ event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd));
+ event->cpu = idxd->idxd_pmu->cpu;
+ event->hw.config = event->attr.config;
+
+ if (event->group_leader != event)
+ /* non-group events have themselves as leader */
+ ret = perfmon_validate_group(idxd->idxd_pmu, event);
+
+ return ret;
+}
+
+static inline u64 perfmon_pmu_read_counter(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct idxd_device *idxd;
+ int cntr = hwc->idx;
+
+ idxd = event_to_idxd(event);
+
+ return ioread64(CNTRDATA_REG(idxd, cntr));
+}
+
+static void perfmon_pmu_event_update(struct perf_event *event)
+{
+ struct idxd_device *idxd = event_to_idxd(event);
+ u64 prev_raw_count, new_raw_count, delta, p, n;
+ int shift = 64 - idxd->idxd_pmu->counter_width;
+ struct hw_perf_event *hwc = &event->hw;
+
+ do {
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = perfmon_pmu_read_counter(event);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count);
+
+ n = (new_raw_count << shift);
+ p = (prev_raw_count << shift);
+
+ delta = ((n - p) >> shift);
+
+ local64_add(delta, &event->count);
+}
+
+void perfmon_counter_overflow(struct idxd_device *idxd)
+{
+ int i, n_counters, max_loop = OVERFLOW_SIZE;
+ struct perf_event *event;
+ unsigned long ovfstatus;
+
+ n_counters = min(idxd->idxd_pmu->n_counters, OVERFLOW_SIZE);
+
+ ovfstatus = ioread32(OVFSTATUS_REG(idxd));
+
+ /*
+ * While updating overflowed counters, other counters behind
+ * them could overflow and be missed in a given pass.
+ * Normally this could happen at most n_counters times, but in
+ * theory a tiny counter width could result in continual
+ * overflows and endless looping. max_loop provides a
+ * failsafe in that highly unlikely case.
+ */
+ while (ovfstatus && max_loop--) {
+ /* Figure out which counter(s) overflowed */
+ for_each_set_bit(i, &ovfstatus, n_counters) {
+ unsigned long ovfstatus_clear = 0;
+
+ /* Update event->count for overflowed counter */
+ event = idxd->idxd_pmu->event_list[i];
+ perfmon_pmu_event_update(event);
+ /* Writing 1 to OVFSTATUS bit clears it */
+ set_bit(i, &ovfstatus_clear);
+ iowrite32(ovfstatus_clear, OVFSTATUS_REG(idxd));
+ }
+
+ ovfstatus = ioread32(OVFSTATUS_REG(idxd));
+ }
+
+ /*
+ * Should never happen. If so, it means a counter(s) looped
+ * around twice while this handler was running.
+ */
+ WARN_ON_ONCE(ovfstatus);
+}
+
+static inline void perfmon_reset_config(struct idxd_device *idxd)
+{
+ iowrite32(CONFIG_RESET, PERFRST_REG(idxd));
+ iowrite32(0, OVFSTATUS_REG(idxd));
+ iowrite32(0, PERFFRZ_REG(idxd));
+}
+
+static inline void perfmon_reset_counters(struct idxd_device *idxd)
+{
+ iowrite32(CNTR_RESET, PERFRST_REG(idxd));
+}
+
+static inline void perfmon_reset(struct idxd_device *idxd)
+{
+ perfmon_reset_config(idxd);
+ perfmon_reset_counters(idxd);
+}
+
+static void perfmon_pmu_event_start(struct perf_event *event, int mode)
+{
+ u32 flt_wq, flt_tc, flt_pg_sz, flt_xfer_sz, flt_eng = 0;
+ u64 cntr_cfg, cntrdata, event_enc, event_cat = 0;
+ struct hw_perf_event *hwc = &event->hw;
+ union filter_cfg flt_cfg;
+ union event_cfg event_cfg;
+ struct idxd_device *idxd;
+ int cntr;
+
+ idxd = event_to_idxd(event);
+
+ event->hw.idx = hwc->idx;
+ cntr = hwc->idx;
+
+ /* Obtain event category and event value from user space */
+ event_cfg.val = event->attr.config;
+ flt_cfg.val = event->attr.config1;
+ event_cat = event_cfg.event_cat;
+ event_enc = event_cfg.event_enc;
+
+ /* Obtain filter configuration from user space */
+ flt_wq = flt_cfg.wq;
+ flt_tc = flt_cfg.tc;
+ flt_pg_sz = flt_cfg.pg_sz;
+ flt_xfer_sz = flt_cfg.xfer_sz;
+ flt_eng = flt_cfg.eng;
+
+ if (flt_wq && test_bit(FLT_WQ, &idxd->idxd_pmu->supported_filters))
+ iowrite32(flt_wq, FLTCFG_REG(idxd, cntr, FLT_WQ));
+ if (flt_tc && test_bit(FLT_TC, &idxd->idxd_pmu->supported_filters))
+ iowrite32(flt_tc, FLTCFG_REG(idxd, cntr, FLT_TC));
+ if (flt_pg_sz && test_bit(FLT_PG_SZ, &idxd->idxd_pmu->supported_filters))
+ iowrite32(flt_pg_sz, FLTCFG_REG(idxd, cntr, FLT_PG_SZ));
+ if (flt_xfer_sz && test_bit(FLT_XFER_SZ, &idxd->idxd_pmu->supported_filters))
+ iowrite32(flt_xfer_sz, FLTCFG_REG(idxd, cntr, FLT_XFER_SZ));
+ if (flt_eng && test_bit(FLT_ENG, &idxd->idxd_pmu->supported_filters))
+ iowrite32(flt_eng, FLTCFG_REG(idxd, cntr, FLT_ENG));
+
+ /* Read the start value */
+ cntrdata = ioread64(CNTRDATA_REG(idxd, cntr));
+ local64_set(&event->hw.prev_count, cntrdata);
+
+ /* Set counter to event/category */
+ cntr_cfg = event_cat << CNTRCFG_CATEGORY_SHIFT;
+ cntr_cfg |= event_enc << CNTRCFG_EVENT_SHIFT;
+ /* Set interrupt on overflow and counter enable bits */
+ cntr_cfg |= (CNTRCFG_IRQ_OVERFLOW | CNTRCFG_ENABLE);
+
+ iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr));
+}
+
+static void perfmon_pmu_event_stop(struct perf_event *event, int mode)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct idxd_device *idxd;
+ int i, cntr = hwc->idx;
+ u64 cntr_cfg;
+
+ idxd = event_to_idxd(event);
+
+ /* remove this event from event list */
+ for (i = 0; i < idxd->idxd_pmu->n_events; i++) {
+ if (event != idxd->idxd_pmu->event_list[i])
+ continue;
+
+ for (++i; i < idxd->idxd_pmu->n_events; i++)
+ idxd->idxd_pmu->event_list[i - 1] = idxd->idxd_pmu->event_list[i];
+ --idxd->idxd_pmu->n_events;
+ break;
+ }
+
+ cntr_cfg = ioread64(CNTRCFG_REG(idxd, cntr));
+ cntr_cfg &= ~CNTRCFG_ENABLE;
+ iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr));
+
+ if (mode == PERF_EF_UPDATE)
+ perfmon_pmu_event_update(event);
+
+ event->hw.idx = -1;
+ clear_bit(cntr, idxd->idxd_pmu->used_mask);
+}
+
+static void perfmon_pmu_event_del(struct perf_event *event, int mode)
+{
+ perfmon_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+static int perfmon_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct idxd_device *idxd = event_to_idxd(event);
+ struct idxd_pmu *idxd_pmu = idxd->idxd_pmu;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx, n;
+
+ n = perfmon_collect_events(idxd_pmu, event, false);
+ if (n < 0)
+ return n;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ if (!(flags & PERF_EF_START))
+ hwc->state |= PERF_HES_ARCH;
+
+ idx = perfmon_assign_event(idxd_pmu, event);
+ if (idx < 0)
+ return idx;
+
+ perfmon_assign_hw_event(idxd_pmu, event, idx);
+
+ if (flags & PERF_EF_START)
+ perfmon_pmu_event_start(event, 0);
+
+ idxd_pmu->n_events = n;
+
+ return 0;
+}
+
+static void enable_perfmon_pmu(struct idxd_device *idxd)
+{
+ iowrite32(COUNTER_UNFREEZE, PERFFRZ_REG(idxd));
+}
+
+static void disable_perfmon_pmu(struct idxd_device *idxd)
+{
+ iowrite32(COUNTER_FREEZE, PERFFRZ_REG(idxd));
+}
+
+static void perfmon_pmu_enable(struct pmu *pmu)
+{
+ struct idxd_device *idxd = pmu_to_idxd(pmu);
+
+ enable_perfmon_pmu(idxd);
+}
+
+static void perfmon_pmu_disable(struct pmu *pmu)
+{
+ struct idxd_device *idxd = pmu_to_idxd(pmu);
+
+ disable_perfmon_pmu(idxd);
+}
+
+static void skip_filter(int i)
+{
+ int j;
+
+ for (j = i; j < PERFMON_FILTERS_MAX; j++)
+ perfmon_format_attrs[PERFMON_FILTERS_START + j] =
+ perfmon_format_attrs[PERFMON_FILTERS_START + j + 1];
+}
+
+static void idxd_pmu_init(struct idxd_pmu *idxd_pmu)
+{
+ int i;
+
+ for (i = 0 ; i < PERFMON_FILTERS_MAX; i++) {
+ if (!test_bit(i, &idxd_pmu->supported_filters))
+ skip_filter(i);
+ }
+
+ idxd_pmu->pmu.name = idxd_pmu->name;
+ idxd_pmu->pmu.attr_groups = perfmon_attr_groups;
+ idxd_pmu->pmu.task_ctx_nr = perf_invalid_context;
+ idxd_pmu->pmu.event_init = perfmon_pmu_event_init;
+ idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable,
+ idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable,
+ idxd_pmu->pmu.add = perfmon_pmu_event_add;
+ idxd_pmu->pmu.del = perfmon_pmu_event_del;
+ idxd_pmu->pmu.start = perfmon_pmu_event_start;
+ idxd_pmu->pmu.stop = perfmon_pmu_event_stop;
+ idxd_pmu->pmu.read = perfmon_pmu_event_update;
+ idxd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
+ idxd_pmu->pmu.module = THIS_MODULE;
+}
+
+void perfmon_pmu_remove(struct idxd_device *idxd)
+{
+ if (!idxd->idxd_pmu)
+ return;
+
+ cpuhp_state_remove_instance(cpuhp_slot, &idxd->idxd_pmu->cpuhp_node);
+ perf_pmu_unregister(&idxd->idxd_pmu->pmu);
+ kfree(idxd->idxd_pmu);
+ idxd->idxd_pmu = NULL;
+}
+
+static int perf_event_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct idxd_pmu *idxd_pmu;
+
+ idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node);
+
+ /* select the first online CPU as the designated reader */
+ if (cpumask_empty(&perfmon_dsa_cpu_mask)) {
+ cpumask_set_cpu(cpu, &perfmon_dsa_cpu_mask);
+ idxd_pmu->cpu = cpu;
+ }
+
+ return 0;
+}
+
+static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct idxd_pmu *idxd_pmu;
+ unsigned int target;
+
+ idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node);
+
+ if (!cpumask_test_and_clear_cpu(cpu, &perfmon_dsa_cpu_mask))
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ /* migrate events if there is a valid target */
+ if (target < nr_cpu_ids)
+ cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
+ else
+ target = -1;
+
+ perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+
+ return 0;
+}
+
+int perfmon_pmu_init(struct idxd_device *idxd)
+{
+ union idxd_perfcap perfcap;
+ struct idxd_pmu *idxd_pmu;
+ int rc = -ENODEV;
+
+ /*
+ * perfmon module initialization failed, nothing to do
+ */
+ if (!cpuhp_set_up)
+ return -ENODEV;
+
+ /*
+ * If perfmon_offset or num_counters is 0, it means perfmon is
+ * not supported on this hardware.
+ */
+ if (idxd->perfmon_offset == 0)
+ return -ENODEV;
+
+ idxd_pmu = kzalloc(sizeof(*idxd_pmu), GFP_KERNEL);
+ if (!idxd_pmu)
+ return -ENOMEM;
+
+ idxd_pmu->idxd = idxd;
+ idxd->idxd_pmu = idxd_pmu;
+
+ if (idxd->data->type == IDXD_TYPE_DSA) {
+ rc = sprintf(idxd_pmu->name, "dsa%d", idxd->id);
+ if (rc < 0)
+ goto free;
+ } else if (idxd->data->type == IDXD_TYPE_IAX) {
+ rc = sprintf(idxd_pmu->name, "iax%d", idxd->id);
+ if (rc < 0)
+ goto free;
+ } else {
+ goto free;
+ }
+
+ perfmon_reset(idxd);
+
+ perfcap.bits = ioread64(PERFCAP_REG(idxd));
+
+ /*
+ * If total perf counter is 0, stop further registration.
+ * This is necessary in order to support driver running on
+ * guest which does not have pmon support.
+ */
+ if (perfcap.num_perf_counter == 0)
+ goto free;
+
+ /* A counter width of 0 means it can't count */
+ if (perfcap.counter_width == 0)
+ goto free;
+
+ /* Overflow interrupt and counter freeze support must be available */
+ if (!perfcap.overflow_interrupt || !perfcap.counter_freeze)
+ goto free;
+
+ /* Number of event categories cannot be 0 */
+ if (perfcap.num_event_category == 0)
+ goto free;
+
+ /*
+ * We don't support per-counter capabilities for now.
+ */
+ if (perfcap.cap_per_counter)
+ goto free;
+
+ idxd_pmu->n_event_categories = perfcap.num_event_category;
+ idxd_pmu->supported_event_categories = perfcap.global_event_category;
+ idxd_pmu->per_counter_caps_supported = perfcap.cap_per_counter;
+
+ /* check filter capability. If 0, then filters are not supported */
+ idxd_pmu->supported_filters = perfcap.filter;
+ if (perfcap.filter)
+ idxd_pmu->n_filters = hweight8(perfcap.filter);
+
+ /* Store the total number of counters categories, and counter width */
+ idxd_pmu->n_counters = perfcap.num_perf_counter;
+ idxd_pmu->counter_width = perfcap.counter_width;
+
+ idxd_pmu_init(idxd_pmu);
+
+ rc = perf_pmu_register(&idxd_pmu->pmu, idxd_pmu->name, -1);
+ if (rc)
+ goto free;
+
+ rc = cpuhp_state_add_instance(cpuhp_slot, &idxd_pmu->cpuhp_node);
+ if (rc) {
+ perf_pmu_unregister(&idxd->idxd_pmu->pmu);
+ goto free;
+ }
+out:
+ return rc;
+free:
+ kfree(idxd_pmu);
+ idxd->idxd_pmu = NULL;
+
+ goto out;
+}
+
+void __init perfmon_init(void)
+{
+ int rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "driver/dma/idxd/perf:online",
+ perf_event_cpu_online,
+ perf_event_cpu_offline);
+ if (WARN_ON(rc < 0))
+ return;
+
+ cpuhp_slot = rc;
+ cpuhp_set_up = true;
+}
+
+void __exit perfmon_exit(void)
+{
+ if (cpuhp_set_up)
+ cpuhp_remove_multi_state(cpuhp_slot);
+}
diff --git a/drivers/dma/idxd/perfmon.h b/drivers/dma/idxd/perfmon.h
new file mode 100644
index 000000000000..9a081a1bc605
--- /dev/null
+++ b/drivers/dma/idxd/perfmon.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 Intel Corporation. All rights rsvd. */
+
+#ifndef _PERFMON_H_
+#define _PERFMON_H_
+
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/sbitmap.h>
+#include <linux/dmaengine.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include <linux/uuid.h>
+#include <linux/idxd.h>
+#include <linux/perf_event.h>
+#include "registers.h"
+
+static inline struct idxd_pmu *event_to_pmu(struct perf_event *event)
+{
+ struct idxd_pmu *idxd_pmu;
+ struct pmu *pmu;
+
+ pmu = event->pmu;
+ idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
+
+ return idxd_pmu;
+}
+
+static inline struct idxd_device *event_to_idxd(struct perf_event *event)
+{
+ struct idxd_pmu *idxd_pmu;
+ struct pmu *pmu;
+
+ pmu = event->pmu;
+ idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
+
+ return idxd_pmu->idxd;
+}
+
+static inline struct idxd_device *pmu_to_idxd(struct pmu *pmu)
+{
+ struct idxd_pmu *idxd_pmu;
+
+ idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
+
+ return idxd_pmu->idxd;
+}
+
+enum dsa_perf_events {
+ DSA_PERF_EVENT_WQ = 0,
+ DSA_PERF_EVENT_ENGINE,
+ DSA_PERF_EVENT_ADDR_TRANS,
+ DSA_PERF_EVENT_OP,
+ DSA_PERF_EVENT_COMPL,
+ DSA_PERF_EVENT_MAX,
+};
+
+enum filter_enc {
+ FLT_WQ = 0,
+ FLT_TC,
+ FLT_PG_SZ,
+ FLT_XFER_SZ,
+ FLT_ENG,
+ FLT_MAX,
+};
+
+#define CONFIG_RESET 0x0000000000000001
+#define CNTR_RESET 0x0000000000000002
+#define CNTR_ENABLE 0x0000000000000001
+#define INTR_OVFL 0x0000000000000002
+
+#define COUNTER_FREEZE 0x00000000FFFFFFFF
+#define COUNTER_UNFREEZE 0x0000000000000000
+#define OVERFLOW_SIZE 32
+
+#define CNTRCFG_ENABLE BIT(0)
+#define CNTRCFG_IRQ_OVERFLOW BIT(1)
+#define CNTRCFG_CATEGORY_SHIFT 8
+#define CNTRCFG_EVENT_SHIFT 32
+
+#define PERFMON_TABLE_OFFSET(_idxd) \
+({ \
+ typeof(_idxd) __idxd = (_idxd); \
+ ((__idxd)->reg_base + (__idxd)->perfmon_offset); \
+})
+#define PERFMON_REG_OFFSET(idxd, offset) \
+ (PERFMON_TABLE_OFFSET(idxd) + (offset))
+
+#define PERFCAP_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFCAP_OFFSET))
+#define PERFRST_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFRST_OFFSET))
+#define OVFSTATUS_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_OVFSTATUS_OFFSET))
+#define PERFFRZ_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFFRZ_OFFSET))
+
+#define FLTCFG_REG(idxd, cntr, flt) \
+ (PERFMON_REG_OFFSET(idxd, IDXD_FLTCFG_OFFSET) + ((cntr) * 32) + ((flt) * 4))
+
+#define CNTRCFG_REG(idxd, cntr) \
+ (PERFMON_REG_OFFSET(idxd, IDXD_CNTRCFG_OFFSET) + ((cntr) * 8))
+#define CNTRDATA_REG(idxd, cntr) \
+ (PERFMON_REG_OFFSET(idxd, IDXD_CNTRDATA_OFFSET) + ((cntr) * 8))
+#define CNTRCAP_REG(idxd, cntr) \
+ (PERFMON_REG_OFFSET(idxd, IDXD_CNTRCAP_OFFSET) + ((cntr) * 8))
+
+#define EVNTCAP_REG(idxd, category) \
+ (PERFMON_REG_OFFSET(idxd, IDXD_EVNTCAP_OFFSET) + ((category) * 8))
+
+#define DEFINE_PERFMON_FORMAT_ATTR(_name, _format) \
+static ssize_t __perfmon_idxd_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *page) \
+{ \
+ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
+ return sprintf(page, _format "\n"); \
+} \
+static struct kobj_attribute format_attr_idxd_##_name = \
+ __ATTR(_name, 0444, __perfmon_idxd_##_name##_show, NULL)
+
+#endif
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 751ecb4f9f81..c970c3f025f0 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -24,8 +24,8 @@ union gen_cap_reg {
u64 overlap_copy:1;
u64 cache_control_mem:1;
u64 cache_control_cache:1;
+ u64 cmd_cap:1;
u64 rsvd:3;
- u64 int_handle_req:1;
u64 dest_readback:1;
u64 drain_readback:1;
u64 rsvd2:6;
@@ -120,7 +120,8 @@ union gencfg_reg {
union genctrl_reg {
struct {
u32 softerr_int_en:1;
- u32 rsvd:31;
+ u32 halt_int_en:1;
+ u32 rsvd:30;
};
u32 bits;
} __packed;
@@ -180,8 +181,11 @@ enum idxd_cmd {
IDXD_CMD_DRAIN_PASID,
IDXD_CMD_ABORT_PASID,
IDXD_CMD_REQUEST_INT_HANDLE,
+ IDXD_CMD_RELEASE_INT_HANDLE,
};
+#define CMD_INT_HANDLE_IMS 0x10000
+
#define IDXD_CMDSTS_OFFSET 0xa8
union cmdsts_reg {
struct {
@@ -193,6 +197,8 @@ union cmdsts_reg {
u32 bits;
} __packed;
#define IDXD_CMDSTS_ACTIVE 0x80000000
+#define IDXD_CMDSTS_ERR_MASK 0xff
+#define IDXD_CMDSTS_RES_SHIFT 8
enum idxd_cmdsts_err {
IDXD_CMDSTS_SUCCESS = 0,
@@ -228,6 +234,8 @@ enum idxd_cmdsts_err {
IDXD_CMDSTS_ERR_NO_HANDLE,
};
+#define IDXD_CMDCAP_OFFSET 0xb0
+
#define IDXD_SWERR_OFFSET 0xc0
#define IDXD_SWERR_VALID 0x00000001
#define IDXD_SWERR_OVERFLOW 0x00000002
@@ -378,4 +386,112 @@ union wqcfg {
#define GRPENGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 32)
#define GRPFLGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 40)
+/* Following is performance monitor registers */
+#define IDXD_PERFCAP_OFFSET 0x0
+union idxd_perfcap {
+ struct {
+ u64 num_perf_counter:6;
+ u64 rsvd1:2;
+ u64 counter_width:8;
+ u64 num_event_category:4;
+ u64 global_event_category:16;
+ u64 filter:8;
+ u64 rsvd2:8;
+ u64 cap_per_counter:1;
+ u64 writeable_counter:1;
+ u64 counter_freeze:1;
+ u64 overflow_interrupt:1;
+ u64 rsvd3:8;
+ };
+ u64 bits;
+} __packed;
+
+#define IDXD_EVNTCAP_OFFSET 0x80
+union idxd_evntcap {
+ struct {
+ u64 events:28;
+ u64 rsvd:36;
+ };
+ u64 bits;
+} __packed;
+
+struct idxd_event {
+ union {
+ struct {
+ u32 event_category:4;
+ u32 events:28;
+ };
+ u32 val;
+ };
+} __packed;
+
+#define IDXD_CNTRCAP_OFFSET 0x800
+struct idxd_cntrcap {
+ union {
+ struct {
+ u32 counter_width:8;
+ u32 rsvd:20;
+ u32 num_events:4;
+ };
+ u32 val;
+ };
+ struct idxd_event events[];
+} __packed;
+
+#define IDXD_PERFRST_OFFSET 0x10
+union idxd_perfrst {
+ struct {
+ u32 perfrst_config:1;
+ u32 perfrst_counter:1;
+ u32 rsvd:30;
+ };
+ u32 val;
+} __packed;
+
+#define IDXD_OVFSTATUS_OFFSET 0x30
+#define IDXD_PERFFRZ_OFFSET 0x20
+#define IDXD_CNTRCFG_OFFSET 0x100
+union idxd_cntrcfg {
+ struct {
+ u64 enable:1;
+ u64 interrupt_ovf:1;
+ u64 global_freeze_ovf:1;
+ u64 rsvd1:5;
+ u64 event_category:4;
+ u64 rsvd2:20;
+ u64 events:28;
+ u64 rsvd3:4;
+ };
+ u64 val;
+} __packed;
+
+#define IDXD_FLTCFG_OFFSET 0x300
+
+#define IDXD_CNTRDATA_OFFSET 0x200
+union idxd_cntrdata {
+ struct {
+ u64 event_count_value;
+ };
+ u64 val;
+} __packed;
+
+union event_cfg {
+ struct {
+ u64 event_cat:4;
+ u64 event_enc:28;
+ };
+ u64 val;
+} __packed;
+
+union filter_cfg {
+ struct {
+ u64 wq:32;
+ u64 tc:8;
+ u64 pg_sz:4;
+ u64 xfer_sz:8;
+ u64 eng:8;
+ };
+ u64 val;
+} __packed;
+
#endif
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index a7a61bcc17d5..19afb62abaff 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -15,18 +15,30 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
desc = wq->descs[idx];
memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
- memset(desc->completion, 0, idxd->compl_size);
+ memset(desc->completion, 0, idxd->data->compl_size);
desc->cpu = cpu;
if (device_pasid_enabled(idxd))
desc->hw->pasid = idxd->pasid;
/*
- * Descriptor completion vectors are 1-8 for MSIX. We will round
- * robin through the 8 vectors.
+ * Descriptor completion vectors are 1...N for MSIX. We will round
+ * robin through the N vectors.
*/
wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
- desc->hw->int_handle = wq->vec_ptr;
+ if (!idxd->int_handles) {
+ desc->hw->int_handle = wq->vec_ptr;
+ } else {
+ desc->vector = wq->vec_ptr;
+ /*
+ * int_handles are only for descriptor completion. However for device
+ * MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
+ * though we are rotating through 1...N for descriptor interrupts, we
+ * need to acqurie the int_handles from 0..N-1.
+ */
+ desc->hw->int_handle = idxd->int_handles[desc->vector - 1];
+ }
+
return desc;
}
@@ -79,13 +91,15 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
struct idxd_device *idxd = wq->idxd;
- int vec = desc->hw->int_handle;
void __iomem *portal;
int rc;
if (idxd->state != IDXD_DEV_ENABLED)
return -EIO;
+ if (!percpu_ref_tryget_live(&wq->wq_active))
+ return -ENXIO;
+
portal = wq->portal;
/*
@@ -108,13 +122,25 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
return rc;
}
+ percpu_ref_put(&wq->wq_active);
+
/*
* Pending the descriptor to the lockless list for the irq_entry
* that we designated the descriptor to.
*/
- if (desc->hw->flags & IDXD_OP_FLAG_RCI)
- llist_add(&desc->llnode,
- &idxd->irq_entries[vec].pending_llist);
+ if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
+ int vec;
+
+ /*
+ * If the driver is on host kernel, it would be the value
+ * assigned to interrupt handle, which is index for MSIX
+ * vector. If it's guest then can't use the int_handle since
+ * that is the index to IMS for the entire device. The guest
+ * device local index will be used.
+ */
+ vec = !idxd->int_handles ? desc->hw->int_handle : desc->vector;
+ llist_add(&desc->llnode, &idxd->irq_entries[vec].pending_llist);
+ }
return 0;
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 18bf4d148989..0460d58e3941 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -16,69 +16,6 @@ static char *idxd_wq_type_names[] = {
[IDXD_WQT_USER] = "user",
};
-static void idxd_conf_device_release(struct device *dev)
-{
- dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
-}
-
-static struct device_type idxd_group_device_type = {
- .name = "group",
- .release = idxd_conf_device_release,
-};
-
-static struct device_type idxd_wq_device_type = {
- .name = "wq",
- .release = idxd_conf_device_release,
-};
-
-static struct device_type idxd_engine_device_type = {
- .name = "engine",
- .release = idxd_conf_device_release,
-};
-
-static struct device_type dsa_device_type = {
- .name = "dsa",
- .release = idxd_conf_device_release,
-};
-
-static struct device_type iax_device_type = {
- .name = "iax",
- .release = idxd_conf_device_release,
-};
-
-static inline bool is_dsa_dev(struct device *dev)
-{
- return dev ? dev->type == &dsa_device_type : false;
-}
-
-static inline bool is_iax_dev(struct device *dev)
-{
- return dev ? dev->type == &iax_device_type : false;
-}
-
-static inline bool is_idxd_dev(struct device *dev)
-{
- return is_dsa_dev(dev) || is_iax_dev(dev);
-}
-
-static inline bool is_idxd_wq_dev(struct device *dev)
-{
- return dev ? dev->type == &idxd_wq_device_type : false;
-}
-
-static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
-{
- if (wq->type == IDXD_WQT_KERNEL &&
- strcmp(wq->name, "dmaengine") == 0)
- return true;
- return false;
-}
-
-static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
-{
- return wq->type == IDXD_WQT_USER;
-}
-
static int idxd_config_bus_match(struct device *dev,
struct device_driver *drv)
{
@@ -110,9 +47,131 @@ static int idxd_config_bus_match(struct device *dev,
return matched;
}
-static int idxd_config_bus_probe(struct device *dev)
+static int enable_wq(struct idxd_wq *wq)
{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ unsigned long flags;
int rc;
+
+ mutex_lock(&wq->wq_lock);
+
+ if (idxd->state != IDXD_DEV_ENABLED) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "Enabling while device not enabled.\n");
+ return -EPERM;
+ }
+
+ if (wq->state != IDXD_WQ_DISABLED) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ %d already enabled.\n", wq->id);
+ return -EBUSY;
+ }
+
+ if (!wq->group) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ not attached to group.\n");
+ return -EINVAL;
+ }
+
+ if (strlen(wq->name) == 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ name not set.\n");
+ return -EINVAL;
+ }
+
+ /* Shared WQ checks */
+ if (wq_shared(wq)) {
+ if (!device_swq_supported(idxd)) {
+ dev_warn(dev, "PASID not enabled and shared WQ.\n");
+ mutex_unlock(&wq->wq_lock);
+ return -ENXIO;
+ }
+ /*
+ * Shared wq with the threshold set to 0 means the user
+ * did not set the threshold or transitioned from a
+ * dedicated wq but did not set threshold. A value
+ * of 0 would effectively disable the shared wq. The
+ * driver does not allow a value of 0 to be set for
+ * threshold via sysfs.
+ */
+ if (wq->threshold == 0) {
+ dev_warn(dev, "Shared WQ and threshold 0.\n");
+ mutex_unlock(&wq->wq_lock);
+ return -EINVAL;
+ }
+ }
+
+ rc = idxd_wq_alloc_resources(wq);
+ if (rc < 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ resource alloc failed\n");
+ return rc;
+ }
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ rc = idxd_device_config(idxd);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ if (rc < 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "Writing WQ %d config failed: %d\n", wq->id, rc);
+ return rc;
+ }
+
+ rc = idxd_wq_enable(wq);
+ if (rc < 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ %d enabling failed: %d\n", wq->id, rc);
+ return rc;
+ }
+
+ rc = idxd_wq_map_portal(wq);
+ if (rc < 0) {
+ dev_warn(dev, "wq portal mapping failed: %d\n", rc);
+ rc = idxd_wq_disable(wq);
+ if (rc < 0)
+ dev_warn(dev, "IDXD wq disable failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+
+ wq->client_count = 0;
+
+ if (wq->type == IDXD_WQT_KERNEL) {
+ rc = idxd_wq_init_percpu_ref(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "percpu_ref setup failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+ }
+
+ if (is_idxd_wq_dmaengine(wq)) {
+ rc = idxd_register_dma_channel(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "DMA channel register failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+ } else if (is_idxd_wq_cdev(wq)) {
+ rc = idxd_wq_add_cdev(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "Cdev creation failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+ }
+
+ mutex_unlock(&wq->wq_lock);
+ dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
+
+ return 0;
+}
+
+static int idxd_config_bus_probe(struct device *dev)
+{
+ int rc = 0;
unsigned long flags;
dev_dbg(dev, "%s called\n", __func__);
@@ -130,7 +189,8 @@ static int idxd_config_bus_probe(struct device *dev)
/* Perform IDXD configuration and enabling */
spin_lock_irqsave(&idxd->dev_lock, flags);
- rc = idxd_device_config(idxd);
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ rc = idxd_device_config(idxd);
spin_unlock_irqrestore(&idxd->dev_lock, flags);
if (rc < 0) {
module_put(THIS_MODULE);
@@ -157,115 +217,8 @@ static int idxd_config_bus_probe(struct device *dev)
return 0;
} else if (is_idxd_wq_dev(dev)) {
struct idxd_wq *wq = confdev_to_wq(dev);
- struct idxd_device *idxd = wq->idxd;
-
- mutex_lock(&wq->wq_lock);
-
- if (idxd->state != IDXD_DEV_ENABLED) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "Enabling while device not enabled.\n");
- return -EPERM;
- }
-
- if (wq->state != IDXD_WQ_DISABLED) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ %d already enabled.\n", wq->id);
- return -EBUSY;
- }
-
- if (!wq->group) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ not attached to group.\n");
- return -EINVAL;
- }
-
- if (strlen(wq->name) == 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ name not set.\n");
- return -EINVAL;
- }
-
- /* Shared WQ checks */
- if (wq_shared(wq)) {
- if (!device_swq_supported(idxd)) {
- dev_warn(dev,
- "PASID not enabled and shared WQ.\n");
- mutex_unlock(&wq->wq_lock);
- return -ENXIO;
- }
- /*
- * Shared wq with the threshold set to 0 means the user
- * did not set the threshold or transitioned from a
- * dedicated wq but did not set threshold. A value
- * of 0 would effectively disable the shared wq. The
- * driver does not allow a value of 0 to be set for
- * threshold via sysfs.
- */
- if (wq->threshold == 0) {
- dev_warn(dev,
- "Shared WQ and threshold 0.\n");
- mutex_unlock(&wq->wq_lock);
- return -EINVAL;
- }
- }
-
- rc = idxd_wq_alloc_resources(wq);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ resource alloc failed\n");
- return rc;
- }
-
- spin_lock_irqsave(&idxd->dev_lock, flags);
- rc = idxd_device_config(idxd);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "Writing WQ %d config failed: %d\n",
- wq->id, rc);
- return rc;
- }
-
- rc = idxd_wq_enable(wq);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ %d enabling failed: %d\n",
- wq->id, rc);
- return rc;
- }
-
- rc = idxd_wq_map_portal(wq);
- if (rc < 0) {
- dev_warn(dev, "wq portal mapping failed: %d\n", rc);
- rc = idxd_wq_disable(wq);
- if (rc < 0)
- dev_warn(dev, "IDXD wq disable failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
-
- wq->client_count = 0;
-
- dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
- if (is_idxd_wq_dmaengine(wq)) {
- rc = idxd_register_dma_channel(wq);
- if (rc < 0) {
- dev_dbg(dev, "DMA channel register failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
- } else if (is_idxd_wq_cdev(wq)) {
- rc = idxd_wq_add_cdev(wq);
- if (rc < 0) {
- dev_dbg(dev, "Cdev creation failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
- }
-
- mutex_unlock(&wq->wq_lock);
- return 0;
+ return enable_wq(wq);
}
return -ENODEV;
@@ -283,6 +236,9 @@ static void disable_wq(struct idxd_wq *wq)
return;
}
+ if (wq->type == IDXD_WQT_KERNEL)
+ idxd_wq_quiesce(wq);
+
if (is_idxd_wq_dmaengine(wq))
idxd_unregister_dma_channel(wq);
else if (is_idxd_wq_cdev(wq))
@@ -322,7 +278,7 @@ static int idxd_config_bus_remove(struct device *dev)
dev_dbg(dev, "%s removing dev %s\n", __func__,
dev_name(&idxd->conf_dev));
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (wq->state == IDXD_WQ_DISABLED)
continue;
@@ -333,12 +289,14 @@ static int idxd_config_bus_remove(struct device *dev)
idxd_unregister_dma_device(idxd);
rc = idxd_device_disable(idxd);
- for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = idxd->wqs[i];
- mutex_lock(&wq->wq_lock);
- idxd_wq_disable_cleanup(wq);
- mutex_unlock(&wq->wq_lock);
+ mutex_lock(&wq->wq_lock);
+ idxd_wq_disable_cleanup(wq);
+ mutex_unlock(&wq->wq_lock);
+ }
}
module_put(THIS_MODULE);
if (rc < 0)
@@ -364,19 +322,6 @@ struct bus_type dsa_bus_type = {
.shutdown = idxd_config_bus_shutdown,
};
-struct bus_type iax_bus_type = {
- .name = "iax",
- .match = idxd_config_bus_match,
- .probe = idxd_config_bus_probe,
- .remove = idxd_config_bus_remove,
- .shutdown = idxd_config_bus_shutdown,
-};
-
-static struct bus_type *idxd_bus_types[] = {
- &dsa_bus_type,
- &iax_bus_type
-};
-
static struct idxd_device_driver dsa_drv = {
.drv = {
.name = "dsa",
@@ -386,60 +331,15 @@ static struct idxd_device_driver dsa_drv = {
},
};
-static struct idxd_device_driver iax_drv = {
- .drv = {
- .name = "iax",
- .bus = &iax_bus_type,
- .owner = THIS_MODULE,
- .mod_name = KBUILD_MODNAME,
- },
-};
-
-static struct idxd_device_driver *idxd_drvs[] = {
- &dsa_drv,
- &iax_drv
-};
-
-struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
-{
- return idxd_bus_types[idxd->type];
-}
-
-static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
-{
- if (idxd->type == IDXD_TYPE_DSA)
- return &dsa_device_type;
- else if (idxd->type == IDXD_TYPE_IAX)
- return &iax_device_type;
- else
- return NULL;
-}
-
/* IDXD generic driver setup */
int idxd_register_driver(void)
{
- int i, rc;
-
- for (i = 0; i < IDXD_TYPE_MAX; i++) {
- rc = driver_register(&idxd_drvs[i]->drv);
- if (rc < 0)
- goto drv_fail;
- }
-
- return 0;
-
-drv_fail:
- while (--i >= 0)
- driver_unregister(&idxd_drvs[i]->drv);
- return rc;
+ return driver_register(&dsa_drv.drv);
}
void idxd_unregister_driver(void)
{
- int i;
-
- for (i = 0; i < IDXD_TYPE_MAX; i++)
- driver_unregister(&idxd_drvs[i]->drv);
+ driver_unregister(&dsa_drv.drv);
}
/* IDXD engine attributes */
@@ -450,9 +350,9 @@ static ssize_t engine_group_id_show(struct device *dev,
container_of(dev, struct idxd_engine, conf_dev);
if (engine->group)
- return sprintf(buf, "%d\n", engine->group->id);
+ return sysfs_emit(buf, "%d\n", engine->group->id);
else
- return sprintf(buf, "%d\n", -1);
+ return sysfs_emit(buf, "%d\n", -1);
}
static ssize_t engine_group_id_store(struct device *dev,
@@ -488,7 +388,7 @@ static ssize_t engine_group_id_store(struct device *dev,
if (prevg)
prevg->num_engines--;
- engine->group = &idxd->groups[id];
+ engine->group = idxd->groups[id];
engine->group->num_engines++;
return count;
@@ -512,6 +412,19 @@ static const struct attribute_group *idxd_engine_attribute_groups[] = {
NULL,
};
+static void idxd_conf_engine_release(struct device *dev)
+{
+ struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
+
+ kfree(engine);
+}
+
+struct device_type idxd_engine_device_type = {
+ .name = "engine",
+ .release = idxd_conf_engine_release,
+ .groups = idxd_engine_attribute_groups,
+};
+
/* Group attributes */
static void idxd_set_free_tokens(struct idxd_device *idxd)
@@ -519,7 +432,7 @@ static void idxd_set_free_tokens(struct idxd_device *idxd)
int i, tokens;
for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
- struct idxd_group *g = &idxd->groups[i];
+ struct idxd_group *g = idxd->groups[i];
tokens += g->tokens_reserved;
}
@@ -534,7 +447,7 @@ static ssize_t group_tokens_reserved_show(struct device *dev,
struct idxd_group *group =
container_of(dev, struct idxd_group, conf_dev);
- return sprintf(buf, "%u\n", group->tokens_reserved);
+ return sysfs_emit(buf, "%u\n", group->tokens_reserved);
}
static ssize_t group_tokens_reserved_store(struct device *dev,
@@ -551,7 +464,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
if (rc < 0)
return -EINVAL;
- if (idxd->type == IDXD_TYPE_IAX)
+ if (idxd->data->type == IDXD_TYPE_IAX)
return -EOPNOTSUPP;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
@@ -582,7 +495,7 @@ static ssize_t group_tokens_allowed_show(struct device *dev,
struct idxd_group *group =
container_of(dev, struct idxd_group, conf_dev);
- return sprintf(buf, "%u\n", group->tokens_allowed);
+ return sysfs_emit(buf, "%u\n", group->tokens_allowed);
}
static ssize_t group_tokens_allowed_store(struct device *dev,
@@ -599,7 +512,7 @@ static ssize_t group_tokens_allowed_store(struct device *dev,
if (rc < 0)
return -EINVAL;
- if (idxd->type == IDXD_TYPE_IAX)
+ if (idxd->data->type == IDXD_TYPE_IAX)
return -EOPNOTSUPP;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
@@ -627,7 +540,7 @@ static ssize_t group_use_token_limit_show(struct device *dev,
struct idxd_group *group =
container_of(dev, struct idxd_group, conf_dev);
- return sprintf(buf, "%u\n", group->use_token_limit);
+ return sysfs_emit(buf, "%u\n", group->use_token_limit);
}
static ssize_t group_use_token_limit_store(struct device *dev,
@@ -644,7 +557,7 @@ static ssize_t group_use_token_limit_store(struct device *dev,
if (rc < 0)
return -EINVAL;
- if (idxd->type == IDXD_TYPE_IAX)
+ if (idxd->data->type == IDXD_TYPE_IAX)
return -EOPNOTSUPP;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
@@ -670,22 +583,22 @@ static ssize_t group_engines_show(struct device *dev,
struct idxd_group *group =
container_of(dev, struct idxd_group, conf_dev);
int i, rc = 0;
- char *tmp = buf;
struct idxd_device *idxd = group->idxd;
for (i = 0; i < idxd->max_engines; i++) {
- struct idxd_engine *engine = &idxd->engines[i];
+ struct idxd_engine *engine = idxd->engines[i];
if (!engine->group)
continue;
if (engine->group->id == group->id)
- rc += sprintf(tmp + rc, "engine%d.%d ",
- idxd->id, engine->id);
+ rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
}
+ if (!rc)
+ return 0;
rc--;
- rc += sprintf(tmp + rc, "\n");
+ rc += sysfs_emit_at(buf, rc, "\n");
return rc;
}
@@ -699,22 +612,22 @@ static ssize_t group_work_queues_show(struct device *dev,
struct idxd_group *group =
container_of(dev, struct idxd_group, conf_dev);
int i, rc = 0;
- char *tmp = buf;
struct idxd_device *idxd = group->idxd;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (!wq->group)
continue;
if (wq->group->id == group->id)
- rc += sprintf(tmp + rc, "wq%d.%d ",
- idxd->id, wq->id);
+ rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
}
+ if (!rc)
+ return 0;
rc--;
- rc += sprintf(tmp + rc, "\n");
+ rc += sysfs_emit_at(buf, rc, "\n");
return rc;
}
@@ -729,7 +642,7 @@ static ssize_t group_traffic_class_a_show(struct device *dev,
struct idxd_group *group =
container_of(dev, struct idxd_group, conf_dev);
- return sprintf(buf, "%d\n", group->tc_a);
+ return sysfs_emit(buf, "%d\n", group->tc_a);
}
static ssize_t group_traffic_class_a_store(struct device *dev,
@@ -770,7 +683,7 @@ static ssize_t group_traffic_class_b_show(struct device *dev,
struct idxd_group *group =
container_of(dev, struct idxd_group, conf_dev);
- return sprintf(buf, "%d\n", group->tc_b);
+ return sysfs_emit(buf, "%d\n", group->tc_b);
}
static ssize_t group_traffic_class_b_store(struct device *dev,
@@ -824,13 +737,26 @@ static const struct attribute_group *idxd_group_attribute_groups[] = {
NULL,
};
+static void idxd_conf_group_release(struct device *dev)
+{
+ struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
+
+ kfree(group);
+}
+
+struct device_type idxd_group_device_type = {
+ .name = "group",
+ .release = idxd_conf_group_release,
+ .groups = idxd_group_attribute_groups,
+};
+
/* IDXD work queue attribs */
static ssize_t wq_clients_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%d\n", wq->client_count);
+ return sysfs_emit(buf, "%d\n", wq->client_count);
}
static struct device_attribute dev_attr_wq_clients =
@@ -843,12 +769,12 @@ static ssize_t wq_state_show(struct device *dev,
switch (wq->state) {
case IDXD_WQ_DISABLED:
- return sprintf(buf, "disabled\n");
+ return sysfs_emit(buf, "disabled\n");
case IDXD_WQ_ENABLED:
- return sprintf(buf, "enabled\n");
+ return sysfs_emit(buf, "enabled\n");
}
- return sprintf(buf, "unknown\n");
+ return sysfs_emit(buf, "unknown\n");
}
static struct device_attribute dev_attr_wq_state =
@@ -860,9 +786,9 @@ static ssize_t wq_group_id_show(struct device *dev,
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
if (wq->group)
- return sprintf(buf, "%u\n", wq->group->id);
+ return sysfs_emit(buf, "%u\n", wq->group->id);
else
- return sprintf(buf, "-1\n");
+ return sysfs_emit(buf, "-1\n");
}
static ssize_t wq_group_id_store(struct device *dev,
@@ -896,7 +822,7 @@ static ssize_t wq_group_id_store(struct device *dev,
return count;
}
- group = &idxd->groups[id];
+ group = idxd->groups[id];
prevg = wq->group;
if (prevg)
@@ -914,8 +840,7 @@ static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%s\n",
- wq_dedicated(wq) ? "dedicated" : "shared");
+ return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
}
static ssize_t wq_mode_store(struct device *dev,
@@ -951,7 +876,7 @@ static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%u\n", wq->size);
+ return sysfs_emit(buf, "%u\n", wq->size);
}
static int total_claimed_wq_size(struct idxd_device *idxd)
@@ -960,7 +885,7 @@ static int total_claimed_wq_size(struct idxd_device *idxd)
int wq_size = 0;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
wq_size += wq->size;
}
@@ -1002,7 +927,7 @@ static ssize_t wq_priority_show(struct device *dev,
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%u\n", wq->priority);
+ return sysfs_emit(buf, "%u\n", wq->priority);
}
static ssize_t wq_priority_store(struct device *dev,
@@ -1039,8 +964,7 @@ static ssize_t wq_block_on_fault_show(struct device *dev,
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%u\n",
- test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
+ return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
}
static ssize_t wq_block_on_fault_store(struct device *dev,
@@ -1079,7 +1003,7 @@ static ssize_t wq_threshold_show(struct device *dev,
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%u\n", wq->threshold);
+ return sysfs_emit(buf, "%u\n", wq->threshold);
}
static ssize_t wq_threshold_store(struct device *dev,
@@ -1122,15 +1046,12 @@ static ssize_t wq_type_show(struct device *dev,
switch (wq->type) {
case IDXD_WQT_KERNEL:
- return sprintf(buf, "%s\n",
- idxd_wq_type_names[IDXD_WQT_KERNEL]);
+ return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
case IDXD_WQT_USER:
- return sprintf(buf, "%s\n",
- idxd_wq_type_names[IDXD_WQT_USER]);
+ return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
case IDXD_WQT_NONE:
default:
- return sprintf(buf, "%s\n",
- idxd_wq_type_names[IDXD_WQT_NONE]);
+ return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
}
return -EINVAL;
@@ -1171,7 +1092,7 @@ static ssize_t wq_name_show(struct device *dev,
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%s\n", wq->name);
+ return sysfs_emit(buf, "%s\n", wq->name);
}
static ssize_t wq_name_store(struct device *dev,
@@ -1206,8 +1127,16 @@ static ssize_t wq_cdev_minor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ int minor = -1;
- return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
+ mutex_lock(&wq->wq_lock);
+ if (wq->idxd_cdev)
+ minor = wq->idxd_cdev->minor;
+ mutex_unlock(&wq->wq_lock);
+
+ if (minor == -1)
+ return -ENXIO;
+ return sysfs_emit(buf, "%d\n", minor);
}
static struct device_attribute dev_attr_wq_cdev_minor =
@@ -1233,7 +1162,7 @@ static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attri
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%llu\n", wq->max_xfer_bytes);
+ return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
}
static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
@@ -1267,7 +1196,7 @@ static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribut
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%u\n", wq->max_batch_size);
+ return sysfs_emit(buf, "%u\n", wq->max_batch_size);
}
static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
@@ -1300,7 +1229,7 @@ static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
- return sprintf(buf, "%u\n", wq->ats_dis);
+ return sysfs_emit(buf, "%u\n", wq->ats_dis);
}
static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
@@ -1356,6 +1285,20 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {
NULL,
};
+static void idxd_conf_wq_release(struct device *dev)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ kfree(wq->wqcfg);
+ kfree(wq);
+}
+
+struct device_type idxd_wq_device_type = {
+ .name = "wq",
+ .release = idxd_conf_wq_release,
+ .groups = idxd_wq_attribute_groups,
+};
+
/* IDXD device attribs */
static ssize_t version_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1363,7 +1306,7 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%#x\n", idxd->hw.version);
+ return sysfs_emit(buf, "%#x\n", idxd->hw.version);
}
static DEVICE_ATTR_RO(version);
@@ -1374,7 +1317,7 @@ static ssize_t max_work_queues_size_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->max_wq_size);
+ return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
}
static DEVICE_ATTR_RO(max_work_queues_size);
@@ -1384,7 +1327,7 @@ static ssize_t max_groups_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->max_groups);
+ return sysfs_emit(buf, "%u\n", idxd->max_groups);
}
static DEVICE_ATTR_RO(max_groups);
@@ -1394,7 +1337,7 @@ static ssize_t max_work_queues_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->max_wqs);
+ return sysfs_emit(buf, "%u\n", idxd->max_wqs);
}
static DEVICE_ATTR_RO(max_work_queues);
@@ -1404,7 +1347,7 @@ static ssize_t max_engines_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->max_engines);
+ return sysfs_emit(buf, "%u\n", idxd->max_engines);
}
static DEVICE_ATTR_RO(max_engines);
@@ -1414,7 +1357,7 @@ static ssize_t numa_node_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
+ return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
}
static DEVICE_ATTR_RO(numa_node);
@@ -1424,7 +1367,7 @@ static ssize_t max_batch_size_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->max_batch_size);
+ return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
}
static DEVICE_ATTR_RO(max_batch_size);
@@ -1435,7 +1378,7 @@ static ssize_t max_transfer_size_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
+ return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
}
static DEVICE_ATTR_RO(max_transfer_size);
@@ -1461,7 +1404,7 @@ static ssize_t gen_cap_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
+ return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
}
static DEVICE_ATTR_RO(gen_cap);
@@ -1471,8 +1414,7 @@ static ssize_t configurable_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n",
- test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
+ return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
}
static DEVICE_ATTR_RO(configurable);
@@ -1486,13 +1428,13 @@ static ssize_t clients_show(struct device *dev,
spin_lock_irqsave(&idxd->dev_lock, flags);
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
count += wq->client_count;
}
spin_unlock_irqrestore(&idxd->dev_lock, flags);
- return sprintf(buf, "%d\n", count);
+ return sysfs_emit(buf, "%d\n", count);
}
static DEVICE_ATTR_RO(clients);
@@ -1502,7 +1444,7 @@ static ssize_t pasid_enabled_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", device_pasid_enabled(idxd));
+ return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
}
static DEVICE_ATTR_RO(pasid_enabled);
@@ -1515,14 +1457,14 @@ static ssize_t state_show(struct device *dev,
switch (idxd->state) {
case IDXD_DEV_DISABLED:
case IDXD_DEV_CONF_READY:
- return sprintf(buf, "disabled\n");
+ return sysfs_emit(buf, "disabled\n");
case IDXD_DEV_ENABLED:
- return sprintf(buf, "enabled\n");
+ return sysfs_emit(buf, "enabled\n");
case IDXD_DEV_HALTED:
- return sprintf(buf, "halted\n");
+ return sysfs_emit(buf, "halted\n");
}
- return sprintf(buf, "unknown\n");
+ return sysfs_emit(buf, "unknown\n");
}
static DEVICE_ATTR_RO(state);
@@ -1536,10 +1478,10 @@ static ssize_t errors_show(struct device *dev,
spin_lock_irqsave(&idxd->dev_lock, flags);
for (i = 0; i < 4; i++)
- out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
+ out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
spin_unlock_irqrestore(&idxd->dev_lock, flags);
out--;
- out += sprintf(buf + out, "\n");
+ out += sysfs_emit_at(buf, out, "\n");
return out;
}
static DEVICE_ATTR_RO(errors);
@@ -1550,7 +1492,7 @@ static ssize_t max_tokens_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->max_tokens);
+ return sysfs_emit(buf, "%u\n", idxd->max_tokens);
}
static DEVICE_ATTR_RO(max_tokens);
@@ -1560,7 +1502,7 @@ static ssize_t token_limit_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->token_limit);
+ return sysfs_emit(buf, "%u\n", idxd->token_limit);
}
static ssize_t token_limit_store(struct device *dev,
@@ -1599,7 +1541,7 @@ static ssize_t cdev_major_show(struct device *dev,
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%u\n", idxd->major);
+ return sysfs_emit(buf, "%u\n", idxd->major);
}
static DEVICE_ATTR_RO(cdev_major);
@@ -1608,7 +1550,7 @@ static ssize_t cmd_status_show(struct device *dev,
{
struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
- return sprintf(buf, "%#x\n", idxd->cmd_status);
+ return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
}
static DEVICE_ATTR_RO(cmd_status);
@@ -1644,183 +1586,161 @@ static const struct attribute_group *idxd_attribute_groups[] = {
NULL,
};
-static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
+static void idxd_conf_device_release(struct device *dev)
{
- struct device *dev = &idxd->pdev->dev;
- int i, rc;
+ struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
+
+ kfree(idxd->groups);
+ kfree(idxd->wqs);
+ kfree(idxd->engines);
+ kfree(idxd->irq_entries);
+ kfree(idxd->int_handles);
+ ida_free(&idxd_ida, idxd->id);
+ kfree(idxd);
+}
+
+struct device_type dsa_device_type = {
+ .name = "dsa",
+ .release = idxd_conf_device_release,
+ .groups = idxd_attribute_groups,
+};
+
+struct device_type iax_device_type = {
+ .name = "iax",
+ .release = idxd_conf_device_release,
+ .groups = idxd_attribute_groups,
+};
+
+static int idxd_register_engine_devices(struct idxd_device *idxd)
+{
+ int i, j, rc;
for (i = 0; i < idxd->max_engines; i++) {
- struct idxd_engine *engine = &idxd->engines[i];
-
- engine->conf_dev.parent = &idxd->conf_dev;
- dev_set_name(&engine->conf_dev, "engine%d.%d",
- idxd->id, engine->id);
- engine->conf_dev.bus = idxd_get_bus_type(idxd);
- engine->conf_dev.groups = idxd_engine_attribute_groups;
- engine->conf_dev.type = &idxd_engine_device_type;
- dev_dbg(dev, "Engine device register: %s\n",
- dev_name(&engine->conf_dev));
- rc = device_register(&engine->conf_dev);
- if (rc < 0) {
- put_device(&engine->conf_dev);
+ struct idxd_engine *engine = idxd->engines[i];
+
+ rc = device_add(&engine->conf_dev);
+ if (rc < 0)
goto cleanup;
- }
}
return 0;
cleanup:
- while (i--) {
- struct idxd_engine *engine = &idxd->engines[i];
+ j = i - 1;
+ for (; i < idxd->max_engines; i++)
+ put_device(&idxd->engines[i]->conf_dev);
- device_unregister(&engine->conf_dev);
- }
+ while (j--)
+ device_unregister(&idxd->engines[j]->conf_dev);
return rc;
}
-static int idxd_setup_group_sysfs(struct idxd_device *idxd)
+static int idxd_register_group_devices(struct idxd_device *idxd)
{
- struct device *dev = &idxd->pdev->dev;
- int i, rc;
+ int i, j, rc;
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = &idxd->groups[i];
-
- group->conf_dev.parent = &idxd->conf_dev;
- dev_set_name(&group->conf_dev, "group%d.%d",
- idxd->id, group->id);
- group->conf_dev.bus = idxd_get_bus_type(idxd);
- group->conf_dev.groups = idxd_group_attribute_groups;
- group->conf_dev.type = &idxd_group_device_type;
- dev_dbg(dev, "Group device register: %s\n",
- dev_name(&group->conf_dev));
- rc = device_register(&group->conf_dev);
- if (rc < 0) {
- put_device(&group->conf_dev);
+ struct idxd_group *group = idxd->groups[i];
+
+ rc = device_add(&group->conf_dev);
+ if (rc < 0)
goto cleanup;
- }
}
return 0;
cleanup:
- while (i--) {
- struct idxd_group *group = &idxd->groups[i];
+ j = i - 1;
+ for (; i < idxd->max_groups; i++)
+ put_device(&idxd->groups[i]->conf_dev);
- device_unregister(&group->conf_dev);
- }
+ while (j--)
+ device_unregister(&idxd->groups[j]->conf_dev);
return rc;
}
-static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
+static int idxd_register_wq_devices(struct idxd_device *idxd)
{
- struct device *dev = &idxd->pdev->dev;
- int i, rc;
+ int i, rc, j;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
-
- wq->conf_dev.parent = &idxd->conf_dev;
- dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
- wq->conf_dev.bus = idxd_get_bus_type(idxd);
- wq->conf_dev.groups = idxd_wq_attribute_groups;
- wq->conf_dev.type = &idxd_wq_device_type;
- dev_dbg(dev, "WQ device register: %s\n",
- dev_name(&wq->conf_dev));
- rc = device_register(&wq->conf_dev);
- if (rc < 0) {
- put_device(&wq->conf_dev);
+ struct idxd_wq *wq = idxd->wqs[i];
+
+ rc = device_add(&wq->conf_dev);
+ if (rc < 0)
goto cleanup;
- }
}
return 0;
cleanup:
- while (i--) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ j = i - 1;
+ for (; i < idxd->max_wqs; i++)
+ put_device(&idxd->wqs[i]->conf_dev);
- device_unregister(&wq->conf_dev);
- }
+ while (j--)
+ device_unregister(&idxd->wqs[j]->conf_dev);
return rc;
}
-static int idxd_setup_device_sysfs(struct idxd_device *idxd)
+int idxd_register_devices(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
- int rc;
- char devname[IDXD_NAME_SIZE];
+ int rc, i;
- sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
- idxd->conf_dev.parent = dev;
- dev_set_name(&idxd->conf_dev, "%s", devname);
- idxd->conf_dev.bus = idxd_get_bus_type(idxd);
- idxd->conf_dev.groups = idxd_attribute_groups;
- idxd->conf_dev.type = idxd_get_device_type(idxd);
-
- dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
- rc = device_register(&idxd->conf_dev);
- if (rc < 0) {
- put_device(&idxd->conf_dev);
- return rc;
- }
-
- return 0;
-}
-
-int idxd_setup_sysfs(struct idxd_device *idxd)
-{
- struct device *dev = &idxd->pdev->dev;
- int rc;
-
- rc = idxd_setup_device_sysfs(idxd);
- if (rc < 0) {
- dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
+ rc = device_add(&idxd->conf_dev);
+ if (rc < 0)
return rc;
- }
- rc = idxd_setup_wq_sysfs(idxd);
+ rc = idxd_register_wq_devices(idxd);
if (rc < 0) {
- /* unregister conf dev */
- dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
- return rc;
+ dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
+ goto err_wq;
}
- rc = idxd_setup_group_sysfs(idxd);
+ rc = idxd_register_engine_devices(idxd);
if (rc < 0) {
- /* unregister conf dev */
- dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
- return rc;
+ dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
+ goto err_engine;
}
- rc = idxd_setup_engine_sysfs(idxd);
+ rc = idxd_register_group_devices(idxd);
if (rc < 0) {
- /* unregister conf dev */
- dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
- return rc;
+ dev_dbg(dev, "Group device registering failed: %d\n", rc);
+ goto err_group;
}
return 0;
+
+ err_group:
+ for (i = 0; i < idxd->max_engines; i++)
+ device_unregister(&idxd->engines[i]->conf_dev);
+ err_engine:
+ for (i = 0; i < idxd->max_wqs; i++)
+ device_unregister(&idxd->wqs[i]->conf_dev);
+ err_wq:
+ device_del(&idxd->conf_dev);
+ return rc;
}
-void idxd_cleanup_sysfs(struct idxd_device *idxd)
+void idxd_unregister_devices(struct idxd_device *idxd)
{
int i;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
device_unregister(&wq->conf_dev);
}
for (i = 0; i < idxd->max_engines; i++) {
- struct idxd_engine *engine = &idxd->engines[i];
+ struct idxd_engine *engine = idxd->engines[i];
device_unregister(&engine->conf_dev);
}
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = &idxd->groups[i];
+ struct idxd_group *group = idxd->groups[i];
device_unregister(&group->conf_dev);
}
@@ -1830,26 +1750,10 @@ void idxd_cleanup_sysfs(struct idxd_device *idxd)
int idxd_register_bus_type(void)
{
- int i, rc;
-
- for (i = 0; i < IDXD_TYPE_MAX; i++) {
- rc = bus_register(idxd_bus_types[i]);
- if (rc < 0)
- goto bus_err;
- }
-
- return 0;
-
-bus_err:
- while (--i >= 0)
- bus_unregister(idxd_bus_types[i]);
- return rc;
+ return bus_register(&dsa_bus_type);
}
void idxd_unregister_bus_type(void)
{
- int i;
-
- for (i = 0; i < IDXD_TYPE_MAX; i++)
- bus_unregister(idxd_bus_types[i]);
+ bus_unregister(&dsa_bus_type);
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index d0b2e601e3e5..ecdaada95120 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013 - 2015 Linaro Ltd.
- * Copyright (c) 2013 Hisilicon Limited.
+ * Copyright (c) 2013 HiSilicon Limited.
*/
#include <linux/sched.h>
#include <linux/device.h>
@@ -1039,6 +1039,6 @@ static struct platform_driver k3_pdma_driver = {
module_platform_driver(k3_pdma_driver);
-MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
+MODULE_DESCRIPTION("HiSilicon k3 DMA Driver");
MODULE_ALIAS("platform:k3dma");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 57f5ee4235c7..43ac3ab23d4c 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -2281,6 +2281,7 @@ static int gpi_probe(struct platform_device *pdev)
static const struct of_device_id gpi_of_match[] = {
{ .compatible = "qcom,sdm845-gpi-dma" },
+ { .compatible = "qcom,sm8150-gpi-dma" },
{ },
};
MODULE_DEVICE_TABLE(of, gpi_of_match);
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 6c0f9eb8ecc6..23d64489d25f 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -90,12 +90,6 @@ static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
return container_of(dmach, struct hidma_chan, chan);
}
-static inline
-struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
-{
- return container_of(t, struct hidma_desc, desc);
-}
-
static void hidma_free(struct hidma_dev *dmadev)
{
INIT_LIST_HEAD(&dmadev->ddev.channels);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 3aded7861fef..75c0b8e904e5 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2453,6 +2453,13 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
return 0;
}
+static void xilinx_dma_synchronize(struct dma_chan *dchan)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+
+ tasklet_kill(&chan->tasklet);
+}
+
/**
* xilinx_dma_channel_set_config - Configure VDMA channel
* Run-time configuration for Axi VDMA, supports:
@@ -3074,6 +3081,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_free_chan_resources =
xilinx_dma_free_chan_resources;
xdev->common.device_terminate_all = xilinx_dma_terminate_all;
+ xdev->common.device_synchronize = xilinx_dma_synchronize;
xdev->common.device_tx_status = xilinx_dma_tx_status;
xdev->common.device_issue_pending = xilinx_dma_issue_pending;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index d51ca0428bb8..f191a1f901ac 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -166,6 +166,7 @@ static int __init dmi_checksum(const u8 *buf, u8 len)
static const char *dmi_ident[DMI_STRING_MAX];
static LIST_HEAD(dmi_devices);
int dmi_available;
+EXPORT_SYMBOL_GPL(dmi_available);
/*
* Save a DMI string
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 64344e84bd63..f5bd0dcbbb1c 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -23,6 +23,7 @@
#include <asm/cpuidle.h>
#include <asm/cputype.h>
+#include <asm/hypervisor.h>
#include <asm/system_misc.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
@@ -501,6 +502,7 @@ static int __init psci_probe(void)
psci_init_cpu_suspend();
psci_init_system_suspend();
psci_init_system_reset2();
+ kvm_init_hyp_services();
}
return 0;
diff --git a/drivers/firmware/smccc/Makefile b/drivers/firmware/smccc/Makefile
index 72ab84042832..40d19144a860 100644
--- a/drivers/firmware/smccc/Makefile
+++ b/drivers/firmware/smccc/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
#
-obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o
+obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o kvm_guest.o
obj-$(CONFIG_ARM_SMCCC_SOC_ID) += soc_id.o
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
new file mode 100644
index 000000000000..2d3e866decaa
--- /dev/null
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "smccc: KVM: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/hypervisor.h>
+
+static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { };
+
+void __init kvm_init_hyp_services(void)
+{
+ struct arm_smccc_res res;
+ u32 val[4];
+
+ if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC)
+ return;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
+ if (res.a0 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 ||
+ res.a1 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 ||
+ res.a2 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 ||
+ res.a3 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3)
+ return;
+
+ memset(&res, 0, sizeof(res));
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID, &res);
+
+ val[0] = lower_32_bits(res.a0);
+ val[1] = lower_32_bits(res.a1);
+ val[2] = lower_32_bits(res.a2);
+ val[3] = lower_32_bits(res.a3);
+
+ bitmap_from_arr32(__kvm_arm_hyp_services, val, ARM_SMCCC_KVM_NUM_FUNCS);
+
+ pr_info("hypervisor services detected (0x%08lx 0x%08lx 0x%08lx 0x%08lx)\n",
+ res.a3, res.a2, res.a1, res.a0);
+}
+
+bool kvm_arm_hyp_service_available(u32 func_id)
+{
+ if (func_id >= ARM_SMCCC_KVM_NUM_FUNCS)
+ return false;
+
+ return test_bit(func_id, __kvm_arm_hyp_services);
+}
+EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index d52bfc5ed5e4..028f81d702cc 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -8,6 +8,7 @@
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/arm-smccc.h>
+#include <linux/kernel.h>
#include <asm/archrandom.h>
static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 83082e2f2e44..15b138326ecc 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -812,6 +812,120 @@ int zynqmp_pm_fpga_get_status(u32 *value)
EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
/**
+ * zynqmp_pm_pinctrl_request - Request Pin from firmware
+ * @pin: Pin number to request
+ *
+ * This function requests pin from firmware.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_request(const u32 pin)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_REQUEST, pin, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_request);
+
+/**
+ * zynqmp_pm_pinctrl_release - Inform firmware that Pin control is released
+ * @pin: Pin number to release
+ *
+ * This function release pin from firmware.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_release(const u32 pin)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_RELEASE, pin, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_release);
+
+/**
+ * zynqmp_pm_pinctrl_get_function - Read function id set for the given pin
+ * @pin: Pin number
+ * @id: Buffer to store function ID
+ *
+ * This function provides the function currently set for the given pin.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!id)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_PINCTRL_GET_FUNCTION, pin, 0,
+ 0, 0, ret_payload);
+ *id = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_function);
+
+/**
+ * zynqmp_pm_pinctrl_set_function - Set requested function for the pin
+ * @pin: Pin number
+ * @id: Function ID to set
+ *
+ * This function sets requested function for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_SET_FUNCTION, pin, id,
+ 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_function);
+
+/**
+ * zynqmp_pm_pinctrl_get_config - Get configuration parameter for the pin
+ * @pin: Pin number
+ * @param: Parameter to get
+ * @value: Buffer to store parameter value
+ *
+ * This function gets requested configuration parameter for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
+ u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_GET, pin, param,
+ 0, 0, ret_payload);
+ *value = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_config);
+
+/**
+ * zynqmp_pm_pinctrl_set_config - Set configuration parameter for the pin
+ * @pin: Pin number
+ * @param: Parameter to set
+ * @value: Parameter value to set
+ *
+ * This function sets requested configuration parameter for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
+ u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, pin,
+ param, value, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_config);
+
+/**
* zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
* master has initialized its own power management
*
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d3b3de514f6e..1dd0ec6727fd 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -321,9 +321,8 @@ config GPIO_HLWD
config GPIO_ICH
tristate "Intel ICH GPIO"
- depends on PCI && X86
- select MFD_CORE
- select LPC_ICH
+ depends on X86
+ depends on LPC_ICH
help
Say yes here to support the GPIO functionality of a number of Intel
ICH-based chipsets. Currently supported devices: ICH6, ICH7, ICH8
@@ -502,6 +501,19 @@ config GPIO_RDA
help
Say Y here to support RDA Micro GPIO controller.
+config GPIO_REALTEK_OTTO
+ tristate "Realtek Otto GPIO support"
+ depends on MACH_REALTEK_RTL
+ default MACH_REALTEK_RTL
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ The GPIO controller on the Otto MIPS platform supports up to two
+ banks of 32 GPIOs, with edge triggered interrupts. The 32 GPIOs
+ are grouped in four 8-bit wide ports.
+
+ When built as a module, the module will be called realtek_otto_gpio.
+
config GPIO_REG
bool
help
@@ -847,9 +859,9 @@ config GPIO_IT87
config GPIO_SCH
tristate "Intel SCH/TunnelCreek/Centerton/Quark X1000 GPIO"
- depends on (X86 || COMPILE_TEST) && PCI
- select MFD_CORE
- select LPC_SCH
+ depends on (X86 || COMPILE_TEST) && ACPI
+ depends on LPC_SCH
+ select GPIOLIB_IRQCHIP
help
Say yes here to support GPIO interface on Intel Poulsbo SCH,
Intel Tunnel Creek processor, Intel Centerton processor or
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 4c12f31db31f..d7c81e1611a4 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -125,6 +125,7 @@ obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
obj-$(CONFIG_GPIO_RDA) += gpio-rda.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
+obj-$(CONFIG_GPIO_REALTEK_OTTO) += gpio-realtek-otto.o
obj-$(CONFIG_GPIO_REG) += gpio-reg.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 7a9021c4fa48..71c0bea34d7b 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -49,15 +49,15 @@ struct dio48e_gpio {
unsigned char out_state[6];
unsigned char control[2];
raw_spinlock_t lock;
- unsigned base;
+ unsigned int base;
unsigned char irq_mask;
};
-static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned port = offset / 8;
- const unsigned mask = BIT(offset % 8);
+ const unsigned int port = offset / 8;
+ const unsigned int mask = BIT(offset % 8);
if (dio48egpio->io_state[port] & mask)
return GPIO_LINE_DIRECTION_IN;
@@ -65,14 +65,14 @@ static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
return GPIO_LINE_DIRECTION_OUT;
}
-static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned io_port = offset / 8;
+ const unsigned int io_port = offset / 8;
const unsigned int control_port = io_port / 3;
- const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
+ const unsigned int control_addr = dio48egpio->base + 3 + control_port * 4;
unsigned long flags;
- unsigned control;
+ unsigned int control;
raw_spin_lock_irqsave(&dio48egpio->lock, flags);
@@ -104,17 +104,17 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
+static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned io_port = offset / 8;
+ const unsigned int io_port = offset / 8;
const unsigned int control_port = io_port / 3;
- const unsigned mask = BIT(offset % 8);
- const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
- const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port;
+ const unsigned int mask = BIT(offset % 8);
+ const unsigned int control_addr = dio48egpio->base + 3 + control_port * 4;
+ const unsigned int out_port = (io_port > 2) ? io_port + 1 : io_port;
unsigned long flags;
- unsigned control;
+ unsigned int control;
raw_spin_lock_irqsave(&dio48egpio->lock, flags);
@@ -154,14 +154,14 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
return 0;
}
-static int dio48e_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int dio48e_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned port = offset / 8;
- const unsigned mask = BIT(offset % 8);
- const unsigned in_port = (port > 2) ? port + 1 : port;
+ const unsigned int port = offset / 8;
+ const unsigned int mask = BIT(offset % 8);
+ const unsigned int in_port = (port > 2) ? port + 1 : port;
unsigned long flags;
- unsigned port_state;
+ unsigned int port_state;
raw_spin_lock_irqsave(&dio48egpio->lock, flags);
@@ -202,12 +202,12 @@ static int dio48e_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
return 0;
}
-static void dio48e_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static void dio48e_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned port = offset / 8;
- const unsigned mask = BIT(offset % 8);
- const unsigned out_port = (port > 2) ? port + 1 : port;
+ const unsigned int port = offset / 8;
+ const unsigned int mask = BIT(offset % 8);
+ const unsigned int out_port = (port > 2) ? port + 1 : port;
unsigned long flags;
raw_spin_lock_irqsave(&dio48egpio->lock, flags);
@@ -306,7 +306,7 @@ static void dio48e_irq_unmask(struct irq_data *data)
raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
}
-static int dio48e_irq_set_type(struct irq_data *data, unsigned flow_type)
+static int dio48e_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
const unsigned long offset = irqd_to_hwirq(data);
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 08171431bb8f..34e35b64dcdc 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -37,31 +37,6 @@ struct gpio_aggregator {
static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */
static DEFINE_IDR(gpio_aggregator_idr);
-static char *get_arg(char **args)
-{
- char *start, *end;
-
- start = skip_spaces(*args);
- if (!*start)
- return NULL;
-
- if (*start == '"') {
- /* Quoted arg */
- end = strchr(++start, '"');
- if (!end)
- return ERR_PTR(-EINVAL);
- } else {
- /* Unquoted arg */
- for (end = start; *end && !isspace(*end); end++) ;
- }
-
- if (*end)
- *end++ = '\0';
-
- *args = end;
- return start;
-}
-
static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
int hwnum, unsigned int *n)
{
@@ -83,8 +58,8 @@ static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
static int aggr_parse(struct gpio_aggregator *aggr)
{
+ char *args = skip_spaces(aggr->args);
char *name, *offsets, *p;
- char *args = aggr->args;
unsigned long *bitmap;
unsigned int i, n = 0;
int error = 0;
@@ -93,13 +68,9 @@ static int aggr_parse(struct gpio_aggregator *aggr)
if (!bitmap)
return -ENOMEM;
- for (name = get_arg(&args), offsets = get_arg(&args); name;
- offsets = get_arg(&args)) {
- if (IS_ERR(name)) {
- pr_err("Cannot get GPIO specifier: %pe\n", name);
- error = PTR_ERR(name);
- goto free_bitmap;
- }
+ args = next_arg(args, &name, &p);
+ while (*args) {
+ args = next_arg(args, &offsets, &p);
p = get_options(offsets, 0, &error);
if (error == 0 || *p) {
@@ -125,7 +96,7 @@ static int aggr_parse(struct gpio_aggregator *aggr)
goto free_bitmap;
}
- name = get_arg(&args);
+ args = next_arg(args, &name, &p);
}
if (!n) {
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index de56c013a658..3b31f5e9bf40 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -5,13 +5,11 @@
* Copyright (C) 2010 Extreme Engineering Solutions.
*/
-
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/ioport.h>
#include <linux/mfd/lpc_ich.h>
#include <linux/module.h>
-#include <linux/pci.h>
#include <linux/platform_device.h>
#define DRV_NAME "gpio_ich"
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index 8f1be34953ce..f332341fd4c8 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -125,14 +125,6 @@ static inline int superio_inw(int reg)
return val;
}
-static inline void superio_outw(int val, int reg)
-{
- outb(reg++, REG);
- outb(val >> 8, VAL);
- outb(reg, REG);
- outb(val, VAL);
-}
-
static inline void superio_set_mask(int mask, int reg)
{
u8 curr_val = superio_inb(reg);
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 28b757d34046..d7e73876a3b9 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -479,15 +479,10 @@ static struct platform_device *gpio_mockup_pdevs[GPIO_MOCKUP_MAX_GC];
static void gpio_mockup_unregister_pdevs(void)
{
- struct platform_device *pdev;
int i;
- for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++) {
- pdev = gpio_mockup_pdevs[i];
-
- if (pdev)
- platform_device_unregister(pdev);
- }
+ for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++)
+ platform_device_unregister(gpio_mockup_pdevs[i]);
}
static __init char **gpio_mockup_make_line_names(const char *label,
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 6dfca83bcd90..4b9157a69fca 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -9,6 +9,7 @@
* kind, whether express or implied.
*/
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
@@ -18,6 +19,8 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/property.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/gpio/driver.h>
@@ -303,8 +306,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mpc8xxx_gpio_chip *mpc8xxx_gc;
struct gpio_chip *gc;
- const struct mpc8xxx_gpio_devtype *devtype =
- of_device_get_match_data(&pdev->dev);
+ const struct mpc8xxx_gpio_devtype *devtype = NULL;
+ struct fwnode_handle *fwnode;
int ret;
mpc8xxx_gc = devm_kzalloc(&pdev->dev, sizeof(*mpc8xxx_gc), GFP_KERNEL);
@@ -315,14 +318,14 @@ static int mpc8xxx_probe(struct platform_device *pdev)
raw_spin_lock_init(&mpc8xxx_gc->lock);
- mpc8xxx_gc->regs = of_iomap(np, 0);
- if (!mpc8xxx_gc->regs)
- return -ENOMEM;
+ mpc8xxx_gc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mpc8xxx_gc->regs))
+ return PTR_ERR(mpc8xxx_gc->regs);
gc = &mpc8xxx_gc->gc;
gc->parent = &pdev->dev;
- if (of_property_read_bool(np, "little-endian")) {
+ if (device_property_read_bool(&pdev->dev, "little-endian")) {
ret = bgpio_init(gc, &pdev->dev, 4,
mpc8xxx_gc->regs + GPIO_DAT,
NULL, NULL,
@@ -345,6 +348,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
mpc8xxx_gc->direction_output = gc->direction_output;
+ devtype = device_get_match_data(&pdev->dev);
if (!devtype)
devtype = &mpc8xxx_gpio_devtype_default;
@@ -369,24 +373,29 @@ static int mpc8xxx_probe(struct platform_device *pdev)
* associated input enable must be set (GPIOxGPIE[IEn]=1) to propagate
* the port value to the GPIO Data Register.
*/
+ fwnode = dev_fwnode(&pdev->dev);
if (of_device_is_compatible(np, "fsl,qoriq-gpio") ||
of_device_is_compatible(np, "fsl,ls1028a-gpio") ||
- of_device_is_compatible(np, "fsl,ls1088a-gpio"))
+ of_device_is_compatible(np, "fsl,ls1088a-gpio") ||
+ is_acpi_node(fwnode))
gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
ret = gpiochip_add_data(gc, mpc8xxx_gc);
if (ret) {
- pr_err("%pOF: GPIO chip registration failed with status %d\n",
- np, ret);
+ dev_err(&pdev->dev,
+ "GPIO chip registration failed with status %d\n", ret);
goto err;
}
- mpc8xxx_gc->irqn = irq_of_parse_and_map(np, 0);
+ mpc8xxx_gc->irqn = platform_get_irq(pdev, 0);
if (!mpc8xxx_gc->irqn)
return 0;
- mpc8xxx_gc->irq = irq_domain_add_linear(np, MPC8XXX_GPIO_PINS,
- &mpc8xxx_gpio_irq_ops, mpc8xxx_gc);
+ mpc8xxx_gc->irq = irq_domain_create_linear(fwnode,
+ MPC8XXX_GPIO_PINS,
+ &mpc8xxx_gpio_irq_ops,
+ mpc8xxx_gc);
+
if (!mpc8xxx_gc->irq)
return 0;
@@ -399,8 +408,9 @@ static int mpc8xxx_probe(struct platform_device *pdev)
IRQF_SHARED, "gpio-cascade",
mpc8xxx_gc);
if (ret) {
- dev_err(&pdev->dev, "%s: failed to devm_request_irq(%d), ret = %d\n",
- np->full_name, mpc8xxx_gc->irqn, ret);
+ dev_err(&pdev->dev,
+ "failed to devm_request_irq(%d), ret = %d\n",
+ mpc8xxx_gc->irqn, ret);
goto err;
}
@@ -425,12 +435,21 @@ static int mpc8xxx_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id gpio_acpi_ids[] = {
+ {"NXP0031",},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, gpio_acpi_ids);
+#endif
+
static struct platform_driver mpc8xxx_plat_driver = {
.probe = mpc8xxx_probe,
.remove = mpc8xxx_remove,
.driver = {
.name = "gpio-mpc8xxx",
.of_match_table = mpc8xxx_gpio_ids,
+ .acpi_match_table = ACPI_PTR(gpio_acpi_ids),
},
};
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index dfc0c1eb1b33..524b668eb1ac 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -60,11 +60,6 @@ static inline int is_imx23_gpio(struct mxs_gpio_port *port)
return port->devid == IMX23_GPIO;
}
-static inline int is_imx28_gpio(struct mxs_gpio_port *port)
-{
- return port->devid == IMX28_GPIO;
-}
-
/* Note: This driver assumes 32 GPIOs are handled in one register */
static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 56152263ab38..ca23f72165ca 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1373,15 +1373,14 @@ static int omap_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
- const struct of_device_id *match;
const struct omap_gpio_platform_data *pdata;
struct gpio_bank *bank;
struct irq_chip *irqc;
int ret;
- match = of_match_device(of_match_ptr(omap_gpio_match), dev);
+ pdata = device_get_match_data(dev);
- pdata = match ? match->data : dev_get_platdata(dev);
+ pdata = pdata ?: dev_get_platdata(dev);
if (!pdata)
return -EINVAL;
diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c
new file mode 100644
index 000000000000..cb64fb5a51aa
--- /dev/null
+++ b/drivers/gpio/gpio-realtek-otto.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+/*
+ * Total register block size is 0x1C for one bank of four ports (A, B, C, D).
+ * An optional second bank, with ports E, F, G, and H, may be present, starting
+ * at register offset 0x1C.
+ */
+
+/*
+ * Pin select: (0) "normal", (1) "dedicate peripheral"
+ * Not used on RTL8380/RTL8390, peripheral selection is managed by control bits
+ * in the peripheral registers.
+ */
+#define REALTEK_GPIO_REG_CNR 0x00
+/* Clear bit (0) for input, set bit (1) for output */
+#define REALTEK_GPIO_REG_DIR 0x08
+#define REALTEK_GPIO_REG_DATA 0x0C
+/* Read bit for IRQ status, write 1 to clear IRQ */
+#define REALTEK_GPIO_REG_ISR 0x10
+/* Two bits per GPIO in IMR registers */
+#define REALTEK_GPIO_REG_IMR 0x14
+#define REALTEK_GPIO_REG_IMR_AB 0x14
+#define REALTEK_GPIO_REG_IMR_CD 0x18
+#define REALTEK_GPIO_IMR_LINE_MASK GENMASK(1, 0)
+#define REALTEK_GPIO_IRQ_EDGE_FALLING 1
+#define REALTEK_GPIO_IRQ_EDGE_RISING 2
+#define REALTEK_GPIO_IRQ_EDGE_BOTH 3
+
+#define REALTEK_GPIO_MAX 32
+#define REALTEK_GPIO_PORTS_PER_BANK 4
+
+/**
+ * realtek_gpio_ctrl - Realtek Otto GPIO driver data
+ *
+ * @gc: Associated gpio_chip instance
+ * @base: Base address of the register block for a GPIO bank
+ * @lock: Lock for accessing the IRQ registers and values
+ * @intr_mask: Mask for interrupts lines
+ * @intr_type: Interrupt type selection
+ *
+ * Because the interrupt mask register (IMR) combines the function of IRQ type
+ * selection and masking, two extra values are stored. @intr_mask is used to
+ * mask/unmask the interrupts for a GPIO port, and @intr_type is used to store
+ * the selected interrupt types. The logical AND of these values is written to
+ * IMR on changes.
+ */
+struct realtek_gpio_ctrl {
+ struct gpio_chip gc;
+ void __iomem *base;
+ raw_spinlock_t lock;
+ u16 intr_mask[REALTEK_GPIO_PORTS_PER_BANK];
+ u16 intr_type[REALTEK_GPIO_PORTS_PER_BANK];
+};
+
+/* Expand with more flags as devices with other quirks are added */
+enum realtek_gpio_flags {
+ /*
+ * Allow disabling interrupts, for cases where the port order is
+ * unknown. This may result in a port mismatch between ISR and IMR.
+ * An interrupt would appear to come from a different line than the
+ * line the IRQ handler was assigned to, causing uncaught interrupts.
+ */
+ GPIO_INTERRUPTS_DISABLED = BIT(0),
+};
+
+static struct realtek_gpio_ctrl *irq_data_to_ctrl(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+
+ return container_of(gc, struct realtek_gpio_ctrl, gc);
+}
+
+/*
+ * Normal port order register access
+ *
+ * Port information is stored with the first port at offset 0, followed by the
+ * second, etc. Most registers store one bit per GPIO and use a u8 value per
+ * port. The two interrupt mask registers store two bits per GPIO, so use u16
+ * values.
+ */
+static void realtek_gpio_write_imr(struct realtek_gpio_ctrl *ctrl,
+ unsigned int port, u16 irq_type, u16 irq_mask)
+{
+ iowrite16(irq_type & irq_mask, ctrl->base + REALTEK_GPIO_REG_IMR + 2 * port);
+}
+
+static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl,
+ unsigned int port, u8 mask)
+{
+ iowrite8(mask, ctrl->base + REALTEK_GPIO_REG_ISR + port);
+}
+
+static u8 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl, unsigned int port)
+{
+ return ioread8(ctrl->base + REALTEK_GPIO_REG_ISR + port);
+}
+
+/* Set the rising and falling edge mask bits for a GPIO port pin */
+static u16 realtek_gpio_imr_bits(unsigned int pin, u16 value)
+{
+ return (value & REALTEK_GPIO_IMR_LINE_MASK) << 2 * pin;
+}
+
+static void realtek_gpio_irq_ack(struct irq_data *data)
+{
+ struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
+ irq_hw_number_t line = irqd_to_hwirq(data);
+ unsigned int port = line / 8;
+ unsigned int port_pin = line % 8;
+
+ realtek_gpio_clear_isr(ctrl, port, BIT(port_pin));
+}
+
+static void realtek_gpio_irq_unmask(struct irq_data *data)
+{
+ struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
+ unsigned int line = irqd_to_hwirq(data);
+ unsigned int port = line / 8;
+ unsigned int port_pin = line % 8;
+ unsigned long flags;
+ u16 m;
+
+ raw_spin_lock_irqsave(&ctrl->lock, flags);
+ m = ctrl->intr_mask[port];
+ m |= realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
+ ctrl->intr_mask[port] = m;
+ realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
+ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
+}
+
+static void realtek_gpio_irq_mask(struct irq_data *data)
+{
+ struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
+ unsigned int line = irqd_to_hwirq(data);
+ unsigned int port = line / 8;
+ unsigned int port_pin = line % 8;
+ unsigned long flags;
+ u16 m;
+
+ raw_spin_lock_irqsave(&ctrl->lock, flags);
+ m = ctrl->intr_mask[port];
+ m &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
+ ctrl->intr_mask[port] = m;
+ realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
+ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
+}
+
+static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
+ unsigned int line = irqd_to_hwirq(data);
+ unsigned int port = line / 8;
+ unsigned int port_pin = line % 8;
+ unsigned long flags;
+ u16 type, t;
+
+ switch (flow_type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_FALLING:
+ type = REALTEK_GPIO_IRQ_EDGE_FALLING;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ type = REALTEK_GPIO_IRQ_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ type = REALTEK_GPIO_IRQ_EDGE_BOTH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irq_set_handler_locked(data, handle_edge_irq);
+
+ raw_spin_lock_irqsave(&ctrl->lock, flags);
+ t = ctrl->intr_type[port];
+ t &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
+ t |= realtek_gpio_imr_bits(port_pin, type);
+ ctrl->intr_type[port] = t;
+ realtek_gpio_write_imr(ctrl, port, t, ctrl->intr_mask[port]);
+ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ return 0;
+}
+
+static void realtek_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
+ struct irq_chip *irq_chip = irq_desc_get_chip(desc);
+ unsigned int lines_done;
+ unsigned int port_pin_count;
+ unsigned int irq;
+ unsigned long status;
+ int offset;
+
+ chained_irq_enter(irq_chip, desc);
+
+ for (lines_done = 0; lines_done < gc->ngpio; lines_done += 8) {
+ status = realtek_gpio_read_isr(ctrl, lines_done / 8);
+ port_pin_count = min(gc->ngpio - lines_done, 8U);
+ for_each_set_bit(offset, &status, port_pin_count) {
+ irq = irq_find_mapping(gc->irq.domain, offset);
+ generic_handle_irq(irq);
+ }
+ }
+
+ chained_irq_exit(irq_chip, desc);
+}
+
+static int realtek_gpio_irq_init(struct gpio_chip *gc)
+{
+ struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
+ unsigned int port;
+
+ for (port = 0; (port * 8) < gc->ngpio; port++) {
+ realtek_gpio_write_imr(ctrl, port, 0, 0);
+ realtek_gpio_clear_isr(ctrl, port, GENMASK(7, 0));
+ }
+
+ return 0;
+}
+
+static struct irq_chip realtek_gpio_irq_chip = {
+ .name = "realtek-otto-gpio",
+ .irq_ack = realtek_gpio_irq_ack,
+ .irq_mask = realtek_gpio_irq_mask,
+ .irq_unmask = realtek_gpio_irq_unmask,
+ .irq_set_type = realtek_gpio_irq_set_type,
+};
+
+static const struct of_device_id realtek_gpio_of_match[] = {
+ {
+ .compatible = "realtek,otto-gpio",
+ .data = (void *)GPIO_INTERRUPTS_DISABLED,
+ },
+ {
+ .compatible = "realtek,rtl8380-gpio",
+ },
+ {
+ .compatible = "realtek,rtl8390-gpio",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, realtek_gpio_of_match);
+
+static int realtek_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int dev_flags;
+ struct gpio_irq_chip *girq;
+ struct realtek_gpio_ctrl *ctrl;
+ u32 ngpios;
+ int err, irq;
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ dev_flags = (unsigned int) device_get_match_data(dev);
+
+ ngpios = REALTEK_GPIO_MAX;
+ device_property_read_u32(dev, "ngpios", &ngpios);
+
+ if (ngpios > REALTEK_GPIO_MAX) {
+ dev_err(&pdev->dev, "invalid ngpios (max. %d)\n",
+ REALTEK_GPIO_MAX);
+ return -EINVAL;
+ }
+
+ ctrl->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctrl->base))
+ return PTR_ERR(ctrl->base);
+
+ raw_spin_lock_init(&ctrl->lock);
+
+ err = bgpio_init(&ctrl->gc, dev, 4,
+ ctrl->base + REALTEK_GPIO_REG_DATA, NULL, NULL,
+ ctrl->base + REALTEK_GPIO_REG_DIR, NULL,
+ BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ if (err) {
+ dev_err(dev, "unable to init generic GPIO");
+ return err;
+ }
+
+ ctrl->gc.ngpio = ngpios;
+ ctrl->gc.owner = THIS_MODULE;
+
+ irq = platform_get_irq_optional(pdev, 0);
+ if (!(dev_flags & GPIO_INTERRUPTS_DISABLED) && irq > 0) {
+ girq = &ctrl->gc.irq;
+ girq->chip = &realtek_gpio_irq_chip;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ girq->parent_handler = realtek_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, girq->num_parents,
+ sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->init_hw = realtek_gpio_irq_init;
+ }
+
+ return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
+}
+
+static struct platform_driver realtek_gpio_driver = {
+ .driver = {
+ .name = "realtek-otto-gpio",
+ .of_match_table = realtek_gpio_of_match,
+ },
+ .probe = realtek_gpio_probe,
+};
+module_platform_driver(realtek_gpio_driver);
+
+MODULE_DESCRIPTION("Realtek Otto GPIO support");
+MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 5412cb3b0b2a..134cedf151a7 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -254,6 +254,11 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->names = config->names;
chip->label = config->label ?: dev_name(config->parent);
+#if defined(CONFIG_OF_GPIO)
+ /* gpiolib will use of_node of the parent if chip->of_node is NULL */
+ chip->of_node = to_of_node(config->fwnode);
+#endif /* CONFIG_OF_GPIO */
+
/*
* If our regmap is fast_io we should probably set can_sleep to false.
* Right now, the regmap doesn't save this property, nor is there any
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 3a1b1adb08c6..a6f0421d6e50 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -7,33 +7,55 @@
*/
#include <linux/acpi.h>
+#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci_ids.h>
#include <linux/platform_device.h>
+#include <linux/types.h>
#define GEN 0x00
#define GIO 0x04
#define GLV 0x08
+#define GTPE 0x0c
+#define GTNE 0x10
+#define GGPE 0x14
+#define GSMI 0x18
+#define GTS 0x1c
+
+#define CORE_BANK_OFFSET 0x00
+#define RESUME_BANK_OFFSET 0x20
+
+/*
+ * iLB datasheet describes GPE0BLK registers, in particular GPE0E.GPIO bit.
+ * Document Number: 328195-001
+ */
+#define GPE0E_GPIO 14
struct sch_gpio {
struct gpio_chip chip;
+ struct irq_chip irqchip;
spinlock_t lock;
unsigned short iobase;
unsigned short resume_base;
+
+ /* GPE handling */
+ u32 gpe;
+ acpi_gpe_handler gpe_handler;
};
static unsigned int sch_gpio_offset(struct sch_gpio *sch, unsigned int gpio,
unsigned int reg)
{
- unsigned int base = 0;
+ unsigned int base = CORE_BANK_OFFSET;
if (gpio >= sch->resume_base) {
gpio -= sch->resume_base;
- base += 0x20;
+ base = RESUME_BANK_OFFSET;
}
return base + reg + gpio / 8;
@@ -79,10 +101,11 @@ static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned int gpio, unsigned i
static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned int gpio_num)
{
struct sch_gpio *sch = gpiochip_get_data(gc);
+ unsigned long flags;
- spin_lock(&sch->lock);
+ spin_lock_irqsave(&sch->lock, flags);
sch_gpio_reg_set(sch, gpio_num, GIO, 1);
- spin_unlock(&sch->lock);
+ spin_unlock_irqrestore(&sch->lock, flags);
return 0;
}
@@ -96,20 +119,22 @@ static int sch_gpio_get(struct gpio_chip *gc, unsigned int gpio_num)
static void sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val)
{
struct sch_gpio *sch = gpiochip_get_data(gc);
+ unsigned long flags;
- spin_lock(&sch->lock);
+ spin_lock_irqsave(&sch->lock, flags);
sch_gpio_reg_set(sch, gpio_num, GLV, val);
- spin_unlock(&sch->lock);
+ spin_unlock_irqrestore(&sch->lock, flags);
}
static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned int gpio_num,
int val)
{
struct sch_gpio *sch = gpiochip_get_data(gc);
+ unsigned long flags;
- spin_lock(&sch->lock);
+ spin_lock_irqsave(&sch->lock, flags);
sch_gpio_reg_set(sch, gpio_num, GIO, 0);
- spin_unlock(&sch->lock);
+ spin_unlock_irqrestore(&sch->lock, flags);
/*
* according to the datasheet, writing to the level register has no
@@ -144,10 +169,145 @@ static const struct gpio_chip sch_gpio_chip = {
.get_direction = sch_gpio_get_direction,
};
+static int sch_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sch_gpio *sch = gpiochip_get_data(gc);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+ unsigned long flags;
+ int rising, falling;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ rising = 1;
+ falling = 0;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ rising = 0;
+ falling = 1;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ rising = 1;
+ falling = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&sch->lock, flags);
+
+ sch_gpio_reg_set(sch, gpio_num, GTPE, rising);
+ sch_gpio_reg_set(sch, gpio_num, GTNE, falling);
+
+ irq_set_handler_locked(d, handle_edge_irq);
+
+ spin_unlock_irqrestore(&sch->lock, flags);
+
+ return 0;
+}
+
+static void sch_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sch_gpio *sch = gpiochip_get_data(gc);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sch->lock, flags);
+ sch_gpio_reg_set(sch, gpio_num, GTS, 1);
+ spin_unlock_irqrestore(&sch->lock, flags);
+}
+
+static void sch_irq_mask_unmask(struct irq_data *d, int val)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sch_gpio *sch = gpiochip_get_data(gc);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sch->lock, flags);
+ sch_gpio_reg_set(sch, gpio_num, GGPE, val);
+ spin_unlock_irqrestore(&sch->lock, flags);
+}
+
+static void sch_irq_mask(struct irq_data *d)
+{
+ sch_irq_mask_unmask(d, 0);
+}
+
+static void sch_irq_unmask(struct irq_data *d)
+{
+ sch_irq_mask_unmask(d, 1);
+}
+
+static u32 sch_gpio_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context)
+{
+ struct sch_gpio *sch = context;
+ struct gpio_chip *gc = &sch->chip;
+ unsigned long core_status, resume_status;
+ unsigned long pending;
+ unsigned long flags;
+ int offset;
+ u32 ret;
+
+ spin_lock_irqsave(&sch->lock, flags);
+
+ core_status = inl(sch->iobase + CORE_BANK_OFFSET + GTS);
+ resume_status = inl(sch->iobase + RESUME_BANK_OFFSET + GTS);
+
+ spin_unlock_irqrestore(&sch->lock, flags);
+
+ pending = (resume_status << sch->resume_base) | core_status;
+ for_each_set_bit(offset, &pending, sch->chip.ngpio)
+ generic_handle_irq(irq_find_mapping(gc->irq.domain, offset));
+
+ /* Set returning value depending on whether we handled an interrupt */
+ ret = pending ? ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
+
+ /* Acknowledge GPE to ACPICA */
+ ret |= ACPI_REENABLE_GPE;
+
+ return ret;
+}
+
+static void sch_gpio_remove_gpe_handler(void *data)
+{
+ struct sch_gpio *sch = data;
+
+ acpi_disable_gpe(NULL, sch->gpe);
+ acpi_remove_gpe_handler(NULL, sch->gpe, sch->gpe_handler);
+}
+
+static int sch_gpio_install_gpe_handler(struct sch_gpio *sch)
+{
+ struct device *dev = sch->chip.parent;
+ acpi_status status;
+
+ status = acpi_install_gpe_handler(NULL, sch->gpe, ACPI_GPE_LEVEL_TRIGGERED,
+ sch->gpe_handler, sch);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Failed to install GPE handler for %u: %s\n",
+ sch->gpe, acpi_format_exception(status));
+ return -ENODEV;
+ }
+
+ status = acpi_enable_gpe(NULL, sch->gpe);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Failed to enable GPE handler for %u: %s\n",
+ sch->gpe, acpi_format_exception(status));
+ acpi_remove_gpe_handler(NULL, sch->gpe, sch->gpe_handler);
+ return -ENODEV;
+ }
+
+ return devm_add_action_or_reset(dev, sch_gpio_remove_gpe_handler, sch);
+}
+
static int sch_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_irq_chip *girq;
struct sch_gpio *sch;
struct resource *res;
+ int ret;
sch = devm_kzalloc(&pdev->dev, sizeof(*sch), GFP_KERNEL);
if (!sch)
@@ -207,6 +367,28 @@ static int sch_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sch);
+ sch->irqchip.name = "sch_gpio";
+ sch->irqchip.irq_ack = sch_irq_ack;
+ sch->irqchip.irq_mask = sch_irq_mask;
+ sch->irqchip.irq_unmask = sch_irq_unmask;
+ sch->irqchip.irq_set_type = sch_irq_type;
+
+ girq = &sch->chip.irq;
+ girq->chip = &sch->irqchip;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->parent_handler = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+ /* GPE setup is optional */
+ sch->gpe = GPE0E_GPIO;
+ sch->gpe_handler = sch_gpio_gpe_handler;
+
+ ret = sch_gpio_install_gpe_handler(sch);
+ if (ret)
+ dev_warn(&pdev->dev, "Can't setup GPE, no IRQ support\n");
+
return devm_gpiochip_add_data(&pdev->dev, &sch->chip, sch);
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 1aacd2a5a1fd..3ef22a3c104d 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1291,6 +1291,13 @@ void acpi_gpiochip_remove(struct gpio_chip *chip)
kfree(acpi_gpio);
}
+void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev)
+{
+ /* Set default fwnode to parent's one if present */
+ if (gc->parent)
+ ACPI_COMPANION_SET(&gdev->dev, ACPI_COMPANION(gc->parent));
+}
+
static int acpi_gpio_package_count(const union acpi_object *obj)
{
const union acpi_object *element = obj->package.elements;
@@ -1440,6 +1447,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
},
{
/*
+ * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FC:02@12",
+ },
+ },
+ {
+ /*
* HP X2 10 models with Cherry Trail SoC + TI PMIC use an
* external embedded-controller connected via I2C + an ACPI GPIO
* event handler on INT33FF:01 pin 0, causing spurious wakeups.
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index e2edb632b2cc..e476558d9471 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -36,6 +36,8 @@ struct acpi_gpio_info {
void acpi_gpiochip_add(struct gpio_chip *chip);
void acpi_gpiochip_remove(struct gpio_chip *chip);
+void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev);
+
void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
@@ -58,6 +60,8 @@ int acpi_gpio_count(struct device *dev, const char *con_id);
static inline void acpi_gpiochip_add(struct gpio_chip *chip) { }
static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { }
+static inline void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev) { }
+
static inline void
acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { }
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index baf0153b7bca..bbcc7c073f63 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -1042,11 +1042,13 @@ void of_gpiochip_remove(struct gpio_chip *chip)
void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev)
{
+ /* Set default OF node to parent's one if present */
+ if (gc->parent)
+ gdev->dev.of_node = gc->parent->of_node;
+
/* If the gpiochip has an assigned OF node this takes precedence */
if (gc->of_node)
gdev->dev.of_node = gc->of_node;
else
gc->of_node = gdev->dev.of_node;
- if (gdev->dev.of_node)
- gdev->dev.fwnode = of_fwnode_handle(gdev->dev.of_node);
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 6367646dce83..1427c1be749b 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -586,14 +586,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (!gdev)
return -ENOMEM;
gdev->dev.bus = &gpio_bus_type;
+ gdev->dev.parent = gc->parent;
gdev->chip = gc;
gc->gpiodev = gdev;
- if (gc->parent) {
- gdev->dev.parent = gc->parent;
- gdev->dev.of_node = gc->parent->of_node;
- }
of_gpio_dev_init(gc, gdev);
+ acpi_gpio_dev_init(gc, gdev);
/*
* Assign fwnode depending on the result of the previous calls,
@@ -1465,9 +1463,8 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
struct lock_class_key *lock_key,
struct lock_class_key *request_key)
{
+ struct fwnode_handle *fwnode = dev_fwnode(&gc->gpiodev->dev);
struct irq_chip *irqchip = gc->irq.chip;
- const struct irq_domain_ops *ops = NULL;
- struct device_node *np;
unsigned int type;
unsigned int i;
@@ -1479,7 +1476,6 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
return -EINVAL;
}
- np = gc->gpiodev->dev.of_node;
type = gc->irq.default_type;
/*
@@ -1487,15 +1483,9 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
* used to configure the interrupts, as you may end up with
* conflicting triggers. Tell the user, and reset to NONE.
*/
- if (WARN(np && type != IRQ_TYPE_NONE,
- "%s: Ignoring %u default trigger\n", np->full_name, type))
- type = IRQ_TYPE_NONE;
-
- if (has_acpi_companion(gc->parent) && type != IRQ_TYPE_NONE) {
- acpi_handle_warn(ACPI_HANDLE(gc->parent),
- "Ignoring %u default trigger\n", type);
+ if (WARN(fwnode && type != IRQ_TYPE_NONE,
+ "%pfw: Ignoring %u default trigger\n", fwnode, type))
type = IRQ_TYPE_NONE;
- }
if (gc->to_irq)
chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
@@ -1512,15 +1502,11 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
return ret;
} else {
/* Some drivers provide custom irqdomain ops */
- if (gc->irq.domain_ops)
- ops = gc->irq.domain_ops;
-
- if (!ops)
- ops = &gpiochip_domain_ops;
- gc->irq.domain = irq_domain_add_simple(np,
+ gc->irq.domain = irq_domain_create_simple(fwnode,
gc->ngpio,
gc->irq.first,
- ops, gc);
+ gc->irq.domain_ops ?: &gpiochip_domain_ops,
+ gc);
if (!gc->irq.domain)
return -EINVAL;
}
@@ -3684,11 +3670,12 @@ EXPORT_SYMBOL_GPL(fwnode_gpiod_get_index);
*/
int gpiod_count(struct device *dev, const char *con_id)
{
+ const struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
int count = -ENOENT;
- if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
+ if (is_of_node(fwnode))
count = of_gpio_get_count(dev, con_id);
- else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev))
+ else if (is_acpi_node(fwnode))
count = acpi_gpio_count(dev, con_id);
if (count < 0)
@@ -3826,18 +3813,17 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
int ret;
/* Maybe we have a device name, maybe not */
const char *devname = dev ? dev_name(dev) : "?";
+ const struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
- if (dev) {
- /* Using device tree? */
- if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
- dev_dbg(dev, "using device tree for GPIO lookup\n");
- desc = of_find_gpio(dev, con_id, idx, &lookupflags);
- } else if (ACPI_COMPANION(dev)) {
- dev_dbg(dev, "using ACPI for GPIO lookup\n");
- desc = acpi_find_gpio(dev, con_id, idx, &flags, &lookupflags);
- }
+ /* Using device tree? */
+ if (is_of_node(fwnode)) {
+ dev_dbg(dev, "using device tree for GPIO lookup\n");
+ desc = of_find_gpio(dev, con_id, idx, &lookupflags);
+ } else if (is_acpi_node(fwnode)) {
+ dev_dbg(dev, "using ACPI for GPIO lookup\n");
+ desc = acpi_find_gpio(dev, con_id, idx, &flags, &lookupflags);
}
/*
@@ -3921,9 +3907,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
struct gpio_desc *desc = ERR_PTR(-ENODEV);
int ret;
- if (!fwnode)
- return ERR_PTR(-EINVAL);
-
if (is_of_node(fwnode)) {
desc = gpiod_get_from_of_node(to_of_node(fwnode),
propname, index,
@@ -3939,7 +3922,8 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
acpi_gpio_update_gpiod_flags(&dflags, &info);
acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
- }
+ } else
+ return ERR_PTR(-EINVAL);
/* Currently only ACPI takes this path */
ret = gpiod_request(desc, label);
@@ -4220,11 +4204,13 @@ EXPORT_SYMBOL_GPL(gpiod_put_array);
static int gpio_bus_match(struct device *dev, struct device_driver *drv)
{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
/*
* Only match if the fwnode doesn't already have a proper struct device
* created for it.
*/
- if (dev->fwnode && dev->fwnode->dev != dev)
+ if (fwnode && fwnode->dev != dev)
return 0;
return 1;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 14f68c028126..5ffb07b02810 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -234,14 +234,10 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
})
/* GPUVM API */
-int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
- void **vm, void **process_info,
- struct dma_fence **ef);
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
struct file *filp, u32 pasid,
void **vm, void **process_info,
struct dma_fence **ef);
-void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index e93850f2f3b1..7d4118c8128a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -445,22 +445,19 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
mapping_flags |= AMDGPU_VM_MTYPE_UC;
} else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
if (bo_adev == adev) {
- mapping_flags |= AMDGPU_VM_MTYPE_RW;
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
if (adev->gmc.xgmi.connected_to_cpu)
snoop = true;
} else {
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
if (amdgpu_xgmi_same_hive(adev, bo_adev))
snoop = true;
}
} else {
snoop = true;
- if (adev->gmc.xgmi.connected_to_cpu)
- /* system memory uses NC on A+A */
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- else
- mapping_flags |= coherent ?
- AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
break;
default:
@@ -1037,41 +1034,6 @@ create_evict_fence_fail:
return ret;
}
-int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
- void **vm, void **process_info,
- struct dma_fence **ef)
-{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct amdgpu_vm *new_vm;
- int ret;
-
- new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
- if (!new_vm)
- return -ENOMEM;
-
- /* Initialize AMDGPU part of the VM */
- ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
- if (ret) {
- pr_err("Failed init vm ret %d\n", ret);
- goto amdgpu_vm_init_fail;
- }
-
- /* Initialize KFD part of the VM and process info */
- ret = init_kfd_vm(new_vm, process_info, ef);
- if (ret)
- goto init_kfd_vm_fail;
-
- *vm = (void *) new_vm;
-
- return 0;
-
-init_kfd_vm_fail:
- amdgpu_vm_fini(adev, new_vm);
-amdgpu_vm_init_fail:
- kfree(new_vm);
- return ret;
-}
-
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
struct file *filp, u32 pasid,
void **vm, void **process_info,
@@ -1138,21 +1100,6 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
}
}
-void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
-{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
-
- if (WARN_ON(!kgd || !vm))
- return;
-
- pr_debug("Destroying process vm %p\n", vm);
-
- /* Release the VM context */
- amdgpu_vm_fini(adev, avm);
- kfree(vm);
-}
-
void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 9a2f811450ed..2e622c1675d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -908,6 +908,19 @@ int amdgpu_display_gem_fb_verify_and_init(
&amdgpu_fb_funcs);
if (ret)
goto err;
+ /* Verify that the modifier is supported. */
+ if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ struct drm_format_name_buf format_name;
+ drm_dbg_kms(dev,
+ "unsupported pixel format %s / modifier 0x%llx\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name),
+ mode_cmd->modifier[0]);
+
+ ret = -EINVAL;
+ goto err;
+ }
ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e0c4f7c7f1b9..baa980a477d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -291,8 +291,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
break;
case TTM_PL_VRAM:
- r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
- dir, &sgt);
+ r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0,
+ bo->tbo.base.size, attach->dev, dir, &sgt);
if (r)
return ERR_PTR(r);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index d8f131ed10cb..922938931e1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -185,7 +185,7 @@ uint amdgpu_ras_mask = 0xffffffff;
int amdgpu_bad_page_threshold = -1;
struct amdgpu_watchdog_timer amdgpu_watchdog_timer = {
.timeout_fatal_disable = false,
- .period = 0x23, /* default to max. timeout = 1 << 0x23 cycles */
+ .period = 0x0, /* default to 0x0 (timeout disable) */
};
/**
@@ -553,7 +553,7 @@ module_param_named(timeout_fatal_disable, amdgpu_watchdog_timer.timeout_fatal_di
* DOC: timeout_period (uint)
* Modify the watchdog timeout max_cycles as (1 << period)
*/
-MODULE_PARM_DESC(timeout_period, "watchdog timeout period (1 to 0x23(default), timeout maxCycles = (1 << period)");
+MODULE_PARM_DESC(timeout_period, "watchdog timeout period (0 = timeout disabled, 1 ~ 0x23 = timeout maxcycles = (1 << period)");
module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 4c5c19820d37..4f10c4529840 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -205,7 +205,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
struct drm_gem_object *gobj = NULL;
struct amdgpu_bo *abo = NULL;
int ret;
- unsigned long tmp;
memset(&mode_cmd, 0, sizeof(mode_cmd));
mode_cmd.width = sizes->surface_width;
@@ -246,8 +245,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->fbops = &amdgpufb_ops;
- tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
- info->fix.smem_start = adev->gmc.aper_base + tmp;
+ info->fix.smem_start = amdgpu_gmc_vram_cpu_pa(adev, abo);
info->fix.smem_len = amdgpu_bo_size(abo);
info->screen_base = amdgpu_bo_kptr(abo);
info->screen_size = amdgpu_bo_size(abo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 4d32233cde92..c39ed9eb0987 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -487,6 +487,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_RAVEN:
+ case CHIP_RENOIR:
if (amdgpu_tmz == 0) {
adev->gmc.tmz_enabled = false;
dev_info(adev->dev,
@@ -497,7 +498,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
"Trusted Memory Zone (TMZ) feature enabled\n");
}
break;
- case CHIP_RENOIR:
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
@@ -661,8 +661,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
u64 vram_addr = adev->vm_manager.vram_base_offset -
adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
u64 vram_end = vram_addr + vram_size;
- u64 gart_ptb_gpu_pa = amdgpu_bo_gpu_offset(adev->gart.bo) +
- adev->vm_manager.vram_base_offset - adev->gmc.vram_start;
+ u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
flags |= AMDGPU_PTE_WRITEABLE;
@@ -685,3 +684,39 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
/* Requires gart_ptb_gpu_pa to be 4K aligned */
amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
}
+
+/**
+ * amdgpu_gmc_vram_mc2pa - calculate vram buffer's physical address from MC
+ * address
+ *
+ * @adev: amdgpu_device pointer
+ * @mc_addr: MC address of buffer
+ */
+uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr)
+{
+ return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
+}
+
+/**
+ * amdgpu_gmc_vram_pa - calculate vram buffer object's physical address from
+ * GPU's view
+ *
+ * @adev: amdgpu_device pointer
+ * @bo: amdgpu buffer object
+ */
+uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
+{
+ return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
+}
+
+/**
+ * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address
+ * from CPU's view
+ *
+ * @adev: amdgpu_device pointer
+ * @bo: amdgpu buffer object
+ */
+uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
+{
+ return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index cbb7735c6988..9d11c02a3938 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -218,15 +218,6 @@ struct amdgpu_gmc {
*/
u64 fb_start;
u64 fb_end;
- /* In the case of use GART table for vmid0 FB access, [fb_start, fb_end]
- * will be squeezed to GART aperture. But we have a PSP FW issue to fix
- * for now. To temporarily workaround the PSP FW issue, added below two
- * variables to remember the original fb_start/end to re-enable FB
- * aperture to workaround the PSP FW issue. Will delete it after we
- * get a proper PSP FW fix.
- */
- u64 fb_start_original;
- u64 fb_end_original;
unsigned vram_width;
u64 real_vram_size;
int vram_mtrr;
@@ -341,4 +332,7 @@ amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev);
+uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr);
+uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 148a3b481b12..a2fe2dac32c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -76,6 +76,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
+ /* flush the cache before commit the IB */
+ ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
if (!vm)
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 19c0a3655228..82e9ecf84352 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -519,8 +519,10 @@ static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry,
pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
GFP_KERNEL);
- if (!pmu_entry->pmu.attr_groups)
+ if (!pmu_entry->pmu.attr_groups) {
+ ret = -ENOMEM;
goto err_attr_group;
+ }
snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix,
adev_to_drm(pmu_entry->adev)->primary->index);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 9e769cf6095b..a09483beb968 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -328,8 +328,12 @@ psp_cmd_submit_buf(struct psp_context *psp,
static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
struct psp_gfx_cmd_resp *cmd,
- uint64_t tmr_mc, uint32_t size)
+ uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t size = amdgpu_bo_size(tmr_bo);
+ uint64_t tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
+
if (amdgpu_sriov_vf(psp->adev))
cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
else
@@ -337,6 +341,9 @@ static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_size = size;
+ cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
+ cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
+ cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
}
static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
@@ -407,16 +414,6 @@ static int psp_tmr_init(struct psp_context *psp)
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
- /* workaround the tmr_mc_addr:
- * PSP requires an address in FB aperture. Right now driver produce
- * tmr_mc_addr in the GART aperture. Convert it back to FB aperture
- * for PSP. Will revert it after we get a fix from PSP FW.
- */
- if (psp->adev->asic_type == CHIP_ALDEBARAN) {
- psp->tmr_mc_addr -= psp->adev->gmc.fb_start;
- psp->tmr_mc_addr += psp->adev->gmc.fb_start_original;
- }
-
return ret;
}
@@ -466,8 +463,7 @@ static int psp_tmr_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr,
- amdgpu_bo_size(psp->tmr_bo));
+ psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
@@ -561,7 +557,7 @@ static int psp_boot_config_set(struct amdgpu_device *adev)
struct psp_context *psp = &adev->psp;
struct psp_gfx_cmd_resp *cmd = psp->cmd;
- if (adev->asic_type != CHIP_SIENNA_CICHLID)
+ if (adev->asic_type != CHIP_SIENNA_CICHLID || amdgpu_sriov_vf(adev))
return 0;
memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 0541196ae1ed..b0d2fc9454ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -114,7 +114,7 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
if (amdgpu_ras_check_bad_page(adev, address)) {
dev_warn(adev->dev,
- "RAS WARN: 0x%llx has been marked as bad page!\n",
+ "RAS WARN: 0x%llx has already been marked as bad page!\n",
address);
return 0;
}
@@ -221,18 +221,17 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
op = 1;
else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
op = 2;
- else if (sscanf(str, "retire_page") == 0)
+ else if (strstr(str, "retire_page") != NULL)
op = 3;
else if (str[0] && str[1] && str[2] && str[3])
/* ascii string, but commands are not matched. */
return -EINVAL;
if (op != -1) {
-
if (op == 3) {
- if (sscanf(str, "%*s %llu", &address) != 1)
- if (sscanf(str, "%*s 0x%llx", &address) != 1)
- return -EINVAL;
+ if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
+ sscanf(str, "%*s %llu", &address) != 1)
+ return -EINVAL;
data->op = op;
data->inject.address = address;
@@ -255,11 +254,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
data->op = op;
if (op == 2) {
- if (sscanf(str, "%*s %*s %*s %u %llu %llu",
- &sub_block, &address, &value) != 3)
- if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
- &sub_block, &address, &value) != 3)
- return -EINVAL;
+ if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
+ &sub_block, &address, &value) != 3 &&
+ sscanf(str, "%*s %*s %*s %u %llu %llu",
+ &sub_block, &address, &value) != 3)
+ return -EINVAL;
data->head.sub_block_index = sub_block;
data->inject.address = address;
data->inject.value = value;
@@ -278,7 +277,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
/**
* DOC: AMDGPU RAS debugfs control interface
*
- * It accepts struct ras_debug_if who has two members.
+ * The control interface accepts struct ras_debug_if which has two members.
*
* First member: ras_debug_if::head or ras_debug_if::inject.
*
@@ -303,32 +302,33 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
*
* How to use the interface?
*
- * Programs
+ * In a program
*
- * Copy the struct ras_debug_if in your codes and initialize it.
- * Write the struct to the control node.
+ * Copy the struct ras_debug_if in your code and initialize it.
+ * Write the struct to the control interface.
*
- * Shells
+ * From shell
*
* .. code-block:: bash
*
- * echo op block [error [sub_block address value]] > .../ras/ras_ctrl
+ * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
+ * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
+ * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
*
- * Parameters:
+ * Where N, is the card which you want to affect.
*
- * op: disable, enable, inject
- * disable: only block is needed
- * enable: block and error are needed
- * inject: error, address, value are needed
- * block: umc, sdma, gfx, .........
+ * "disable" requires only the block.
+ * "enable" requires the block and error type.
+ * "inject" requires the block, error type, address, and value.
+ * The block is one of: umc, sdma, gfx, etc.
* see ras_block_string[] for details
- * error: ue, ce
- * ue: multi_uncorrectable
- * ce: single_correctable
- * sub_block:
- * sub block index, pass 0 if there is no sub block
+ * The error type is one of: ue, ce, where,
+ * ue is multi-uncorrectable
+ * ce is single-correctable
+ * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
+ * The address and value are hexadecimal numbers, leading 0x is optional.
*
- * here are some examples for bash commands:
+ * For instance,
*
* .. code-block:: bash
*
@@ -336,17 +336,17 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
* echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
* echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
*
- * How to check the result?
+ * How to check the result of the operation?
*
- * For disable/enable, please check ras features at
+ * To check disable/enable, see "ras" features at,
* /sys/class/drm/card[0/1/2...]/device/ras/features
*
- * For inject, please check corresponding err count at
- * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
+ * To check inject, see the corresponding error count at,
+ * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
*
* .. note::
* Operations are only allowed on blocks which are supported.
- * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
* to see which blocks support RAS on a particular asic.
*
*/
@@ -367,11 +367,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
if (ret)
return -EINVAL;
- if (data.op == 3)
- {
+ if (data.op == 3) {
ret = amdgpu_reserve_page_direct(adev, data.inject.address);
-
- if (ret)
+ if (!ret)
return size;
else
return ret;
@@ -503,6 +501,12 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
if (amdgpu_ras_query_error_status(obj->adev, &info))
return -EINVAL;
+
+ if (obj->adev->asic_type == CHIP_ALDEBARAN) {
+ if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
+ DRM_WARN("Failed to reset error counter and error status");
+ }
+
return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
"ce", info.ce_count);
}
@@ -1269,6 +1273,8 @@ static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *
&amdgpu_ras_debugfs_ctrl_ops);
debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
&amdgpu_ras_debugfs_eeprom_ops);
+ debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
+ &con->bad_page_cnt_threshold);
/*
* After one uncorrectable error happens, usually GPU recovery will
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index dec0db8b0b13..9e38475e0f8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -112,6 +112,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man);
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
struct ttm_resource *mem,
+ u64 offset, u64 size,
struct device *dev,
enum dma_data_direction dir,
struct sg_table **sgt);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 592a2dd16493..bce105e2973e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -25,6 +25,7 @@
#include <linux/dma-mapping.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
+#include "amdgpu_res_cursor.h"
#include "amdgpu_atomfirmware.h"
#include "atom.h"
@@ -565,6 +566,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
*
* @adev: amdgpu device pointer
* @mem: TTM memory object
+ * @offset: byte offset from the base of VRAM BO
+ * @length: number of bytes to export in sg_table
* @dev: the other device
* @dir: dma direction
* @sgt: resulting sg table
@@ -573,39 +576,47 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
*/
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
struct ttm_resource *mem,
+ u64 offset, u64 length,
struct device *dev,
enum dma_data_direction dir,
struct sg_table **sgt)
{
- struct drm_mm_node *node;
+ struct amdgpu_res_cursor cursor;
struct scatterlist *sg;
int num_entries = 0;
- unsigned int pages;
int i, r;
*sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
if (!*sgt)
return -ENOMEM;
- for (pages = mem->num_pages, node = mem->mm_node;
- pages; pages -= node->size, ++node)
- ++num_entries;
+ /* Determine the number of DRM_MM nodes to export */
+ amdgpu_res_first(mem, offset, length, &cursor);
+ while (cursor.remaining) {
+ num_entries++;
+ amdgpu_res_next(&cursor, cursor.size);
+ }
r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
if (r)
goto error_free;
+ /* Initialize scatterlist nodes of sg_table */
for_each_sgtable_sg((*sgt), sg, i)
sg->length = 0;
- node = mem->mm_node;
+ /*
+ * Walk down DRM_MM nodes to populate scatterlist nodes
+ * @note: Use iterator api to get first the DRM_MM node
+ * and the number of bytes from it. Access the following
+ * DRM_MM node(s) if more buffer needs to exported
+ */
+ amdgpu_res_first(mem, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
- phys_addr_t phys = (node->start << PAGE_SHIFT) +
- adev->gmc.aper_base;
- size_t size = node->size << PAGE_SHIFT;
+ phys_addr_t phys = cursor.start + adev->gmc.aper_base;
+ size_t size = cursor.size;
dma_addr_t addr;
- ++node;
addr = dma_map_resource(dev, phys, size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
r = dma_mapping_error(dev, addr);
@@ -615,7 +626,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
sg_set_page(sg, NULL, size, 0);
sg_dma_address(sg) = addr;
sg_dma_len(sg) = size;
+
+ amdgpu_res_next(&cursor, cursor.size);
}
+
return 0;
error_unmap:
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 06811a1f4625..a078a38c2cee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1587,6 +1587,9 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
err = 0;
adev->gfx.mec2_fw = NULL;
}
+ } else {
+ adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
+ adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
}
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index 830080ff90d8..b4789dfc2bb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -994,7 +994,7 @@ static int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
return ret;
}
-static const struct soc15_reg_entry gfx_v9_4_rdrsp_status_regs =
+static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs =
{ SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
@@ -1007,15 +1007,21 @@ static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < gfx_v9_4_rdrsp_status_regs.se_num; i++) {
- for (j = 0; j < gfx_v9_4_rdrsp_status_regs.instance;
+ for (i = 0; i < gfx_v9_4_ea_err_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_ea_err_status_regs.instance;
j++) {
gfx_v9_4_select_se_sh(adev, i, 0, j);
reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
- gfx_v9_4_rdrsp_status_regs));
- if (reg_value)
+ gfx_v9_4_ea_err_status_regs));
+ if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
+ /* SDP read/write error/parity error in FUE_IS_FATAL mode
+ * can cause system fatal error in arcturas. Harvest the error
+ * status before GPU reset */
dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
j, reg_value);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index 9ca76a3ac38c..a30c7c10cd9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -808,7 +808,7 @@ static struct gfx_v9_4_2_utc_block gfx_v9_4_2_utc_blocks[] = {
REG_SET_FIELD(0, ATC_L2_CACHE_4K_DSM_CNTL, WRITE_COUNTERS, 1) },
};
-static const struct soc15_reg_entry gfx_v9_4_2_rdrsp_status_regs =
+static const struct soc15_reg_entry gfx_v9_4_2_ea_err_status_regs =
{ SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16 };
static int gfx_v9_4_2_get_reg_error_count(struct amdgpu_device *adev,
@@ -997,8 +997,9 @@ static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev,
blk->clear);
/* print the edc count */
- gfx_v9_4_2_log_utc_edc_count(adev, blk, j, sec_cnt,
- ded_cnt);
+ if (sec_cnt || ded_cnt)
+ gfx_v9_4_2_log_utc_edc_count(adev, blk, j, sec_cnt,
+ ded_cnt);
}
}
@@ -1040,11 +1041,11 @@ static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev)
uint32_t i, j;
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < gfx_v9_4_2_rdrsp_status_regs.se_num; i++) {
- for (j = 0; j < gfx_v9_4_2_rdrsp_status_regs.instance;
+ for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance;
j++) {
gfx_v9_4_2_select_se_sh(adev, i, 0, j);
- WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_rdrsp_status_regs), 0x10);
+ WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), 0x10);
}
}
gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
@@ -1089,17 +1090,20 @@ static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < gfx_v9_4_2_rdrsp_status_regs.se_num; i++) {
- for (j = 0; j < gfx_v9_4_2_rdrsp_status_regs.instance;
+ for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance;
j++) {
gfx_v9_4_2_select_se_sh(adev, i, 0, j);
reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
- gfx_v9_4_2_rdrsp_status_regs));
- if (reg_value)
+ gfx_v9_4_2_ea_err_status_regs));
+ if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
j, reg_value);
+ }
/* clear after read */
- WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_rdrsp_status_regs), 0x10);
+ WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), 0x10);
}
}
@@ -1112,19 +1116,19 @@ static void gfx_v9_4_2_query_utc_err_status(struct amdgpu_device *adev)
uint32_t data;
data = RREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS);
- if (!data) {
+ if (data) {
dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data);
WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3);
}
data = RREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS);
- if (!data) {
+ if (data) {
dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data);
WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3);
}
data = RREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS);
- if (!data) {
+ if (data) {
dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data);
WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index d189507dcef0..1e4678cb98f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -120,8 +120,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
@@ -141,21 +140,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* FB aperture and AGP aperture. Disable them.
*/
if (adev->gmc.pdb0_bo) {
- if (adev->asic_type == CHIP_ALDEBARAN) {
- WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, adev->gmc.fb_end_original >> 24);
- WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, adev->gmc.fb_start_original >> 24);
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->gmc.fb_start_original >> 18);
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.fb_end_original >> 18);
- } else {
- WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 0);
- WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
- }
+ WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 0);
+ WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index 2aecc6a243e8..14c1c1a297dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -165,8 +165,7 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
- + adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index 410fd3a1a388..41807817de7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -164,8 +164,7 @@ static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
- + adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 2bfd620576f2..498b28a35f5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -568,8 +568,7 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
- *addr = adev->vm_manager.vram_base_offset + *addr -
- adev->gmc.vram_start;
+ *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
BUG_ON(*addr & 0xFFFF00000000003FULL);
if (!adev->gmc.translate_further)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index c82d82da2c73..455bb91060d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -574,7 +574,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
* be updated to avoid reading an incorrect value due to
* the new fast GRBM interface.
*/
- if (entry->vmid_src == AMDGPU_GFXHUB_0)
+ if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
+ (adev->asic_type < CHIP_ALDEBARAN))
RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status);
@@ -802,7 +803,8 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* be cleared to avoid a false ACK due to the new fast
* GRBM interface.
*/
- if (vmhub == AMDGPU_GFXHUB_0)
+ if ((vmhub == AMDGPU_GFXHUB_0) &&
+ (adev->asic_type < CHIP_ALDEBARAN))
RREG32_NO_KIQ(hub->vm_inv_eng0_req +
hub->eng_distance * eng);
@@ -1048,8 +1050,7 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
- *addr = adev->vm_manager.vram_base_offset + *addr -
- adev->gmc.vram_start;
+ *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
BUG_ON(*addr & 0xFFFF00000000003FULL);
if (!adev->gmc.translate_further)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index aa9be5612c89..a99953833820 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -114,8 +114,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
return;
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index 7977a7879b32..0103a5ab28e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -47,8 +47,6 @@ static u64 mmhub_v1_7_get_fb_location(struct amdgpu_device *adev)
adev->gmc.fb_start = base;
adev->gmc.fb_end = top;
- adev->gmc.fb_start_original = base;
- adev->gmc.fb_end_original = top;
return base;
}
@@ -126,17 +124,16 @@ static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev)
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, 0xFFFFFF);
WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, 0);
- WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, adev->gmc.fb_end_original >> 24);
- WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, adev->gmc.fb_start_original >> 24);
- WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->gmc.fb_start_original >> 18);
- WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.fb_end_original >> 18);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, 0);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
}
if (amdgpu_sriov_vf(adev))
return;
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
@@ -1287,7 +1284,7 @@ static void mmhub_v1_7_reset_ras_error_count(struct amdgpu_device *adev)
}
}
-static const struct soc15_reg_entry mmhub_v1_7_err_status_regs[] = {
+static const struct soc15_reg_entry mmhub_v1_7_ea_err_status_regs[] = {
{ SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_ERR_STATUS), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_ERR_STATUS), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_ERR_STATUS), 0, 0, 0 },
@@ -1304,12 +1301,15 @@ static void mmhub_v1_7_query_ras_error_status(struct amdgpu_device *adev)
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB))
return;
- for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_err_status_regs); i++) {
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_ea_err_status_regs); i++) {
reg_value =
- RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_err_status_regs[i]));
- if (reg_value)
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_ea_err_status_regs[i]));
+ if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n",
i, reg_value);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index da7edd1ed6b2..ac76081b91d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -210,8 +210,7 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
}
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index 1141c37432f0..a9899335d0b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -162,8 +162,7 @@ static void mmhub_v2_3_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 0cffa820ea6e..47c8dd9d1c78 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -136,8 +136,7 @@ static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15_OFFSET(
MMHUB, 0,
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
@@ -1646,9 +1645,15 @@ static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_err_status_regs); i++) {
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_err_status_regs[i]));
- if (reg_value)
+ if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
+ /* SDP read/write error/parity error in FUE_IS_FATAL mode
+ * can cause system fatal error in arcturas. Harvest the error
+ * status before GPU reset */
dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n",
i, reg_value);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 83ea063388fd..0d2d629e2d6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -31,6 +31,28 @@
#include "vega10_enum.h"
#include <uapi/linux/kfd_ioctl.h>
+#define smnPCIE_LC_CNTL 0x11140280
+#define smnPCIE_LC_CNTL3 0x111402d4
+#define smnPCIE_LC_CNTL6 0x111402ec
+#define smnPCIE_LC_CNTL7 0x111402f0
+#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK 0x00001000L
+#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL
+#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L
+#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123530
+#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
+#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
+#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
+#define smnRCC_BIF_STRAP2 0x10123488
+#define smnRCC_BIF_STRAP3 0x1012348c
+#define smnRCC_BIF_STRAP5 0x10123494
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL
+#define RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L
+#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0
+#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10
+#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0
+
static void nbio_v6_1_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
@@ -256,6 +278,111 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
WREG32_PCIE(smnPCIE_CI_CNTL, data);
}
+static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP2);
+ data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP2, data);
+
+ def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
+ data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+}
+
+static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
+ data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL7, data);
+
+ def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
+ data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
+ data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
+
+ def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
+ data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
+ PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+ data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
+ data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
+ PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL6, data);
+
+ nbio_v6_1_program_ltr(adev);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
+ data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
+ data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
+ data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+}
+
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
@@ -274,4 +401,5 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.ih_control = nbio_v6_1_ih_control,
.init_registers = nbio_v6_1_init_registers,
.remap_hdp_registers = nbio_v6_1_remap_hdp_registers,
+ .program_aspm = nbio_v6_1_program_aspm,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index af44aad78171..cef929746739 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -31,7 +31,26 @@
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
+#define smnPCIE_LC_CNTL 0x11140280
+#define smnPCIE_LC_CNTL3 0x111402d4
+#define smnPCIE_LC_CNTL6 0x111402ec
+#define smnPCIE_LC_CNTL7 0x111402f0
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
+#define smnRCC_BIF_STRAP3 0x1012348c
+#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL
+#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L
+#define smnRCC_BIF_STRAP5 0x10123494
+#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL
+#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
+#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
+#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538
+#define smnRCC_BIF_STRAP2 0x10123488
+#define RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L
+#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0
+#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10
+#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0
/*
* These are nbio v7_4_1 registers mask. Temporarily define these here since
@@ -567,6 +586,111 @@ const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs = {
.ras_fini = amdgpu_nbio_ras_fini,
};
+static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP2);
+ data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP2, data);
+
+ def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
+ data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+}
+
+static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
+ data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL7, data);
+
+ def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
+ data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
+ data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
+
+ def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
+ data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
+ PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+ data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
+ data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
+ PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL6, data);
+
+ nbio_v7_4_program_ltr(adev);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
+ data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
+ data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
+ data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+}
+
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
@@ -587,4 +711,5 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.ih_control = nbio_v7_4_ih_control,
.init_registers = nbio_v7_4_init_registers,
.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
+ .program_aspm = nbio_v7_4_program_aspm,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 46d4bbabce75..d54af7f8801b 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -601,8 +601,7 @@ static void nv_program_aspm(struct amdgpu_device *adev)
if (amdgpu_aspm != 1)
return;
- if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
- !(adev->flags & AMD_IS_APU) &&
+ if (!(adev->flags & AMD_IS_APU) &&
(adev->nbio.funcs->program_aspm))
adev->nbio.funcs->program_aspm(adev);
@@ -934,12 +933,7 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
if (adev->gfx.funcs->update_perfmon_mgcg)
adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
- /*
- * The ASPM function is not fully enabled and verified on
- * Navi yet. Temporarily skip this until ASPM enabled.
- */
- if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
- !(adev->flags & AMD_IS_APU) &&
+ if (!(adev->flags & AMD_IS_APU) &&
(adev->nbio.funcs->enable_aspm))
adev->nbio.funcs->enable_aspm(adev, !enter);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index dd4d65f7e0f0..96064c343163 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -185,10 +185,19 @@ struct psp_gfx_cmd_setup_tmr
uint32_t buf_phy_addr_lo; /* bits [31:0] of GPU Virtual address of TMR buffer (must be 4 KB aligned) */
uint32_t buf_phy_addr_hi; /* bits [63:32] of GPU Virtual address of TMR buffer */
uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB) */
+ union {
+ struct {
+ uint32_t sriov_enabled:1; /* whether the device runs under SR-IOV*/
+ uint32_t virt_phy_addr:1; /* driver passes both virtual and physical address to PSP*/
+ uint32_t reserved:30;
+ } bitfield;
+ uint32_t tmr_flags;
+ };
+ uint32_t system_phy_addr_lo; /* bits [31:0] of system physical address of TMR buffer (must be 4 KB aligned) */
+ uint32_t system_phy_addr_hi; /* bits [63:32] of system physical address of TMR buffer */
};
-
/* FW types for GFX_CMD_ID_LOAD_IP_FW command. Limit 31. */
enum psp_gfx_fw_type {
GFX_FW_TYPE_NONE = 0, /* */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
index 6fcb95c89999..bf95007f0843 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
@@ -160,6 +160,7 @@ static const struct soc15_ras_field_entry sdma_v4_4_ras_fields[] = {
};
static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev,
+ uint32_t reg_offset,
uint32_t value,
uint32_t instance,
uint32_t *sec_count)
@@ -169,6 +170,9 @@ static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev,
/* double bits error (multiple bits) error detection is not supported */
for (i = 0; i < ARRAY_SIZE(sdma_v4_4_ras_fields); i++) {
+ if (sdma_v4_4_ras_fields[i].reg_offset != reg_offset)
+ continue;
+
/* the SDMA_EDC_COUNTER register in each sdma instance
* shares the same sed shift_mask
* */
@@ -197,13 +201,30 @@ static int sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev,
reg_value = RREG32(reg_offset);
/* double bit error is not supported */
if (reg_value)
- sdma_v4_4_get_ras_error_count(adev, reg_value, instance, &sec_count);
- /* err_data->ce_count should be initialized to 0
- * before calling into this function */
- err_data->ce_count += sec_count;
- /* double bit error is not supported
- * set ue count to 0 */
- err_data->ue_count = 0;
+ sdma_v4_4_get_ras_error_count(adev, regSDMA0_EDC_COUNTER, reg_value,
+ instance, &sec_count);
+
+ reg_offset = sdma_v4_4_get_reg_offset(adev, instance, regSDMA0_EDC_COUNTER2);
+ reg_value = RREG32(reg_offset);
+ /* double bit error is not supported */
+ if (reg_value)
+ sdma_v4_4_get_ras_error_count(adev, regSDMA0_EDC_COUNTER2, reg_value,
+ instance, &sec_count);
+
+ /*
+ * err_data->ue_count should be initialized to 0
+ * before calling into this function
+ *
+ * SDMA RAS supports single bit uncorrectable error detection.
+ * So, increment uncorrectable error count.
+ */
+ err_data->ue_count += sec_count;
+
+ /*
+ * SDMA RAS does not support correctable errors.
+ * Set ce count to 0.
+ */
+ err_data->ce_count = 0;
return 0;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 93f826a7d3f0..b1ad9e52b234 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -370,6 +370,33 @@ static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring,
}
/**
+ * sdma_v5_2_ring_emit_mem_sync - flush the IB by graphics cache rinse
+ *
+ * @ring: amdgpu ring pointer
+ * @job: job to retrieve vmid from
+ * @ib: IB object to schedule
+ *
+ * flush the IB by graphics cache rinse.
+ */
+static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ uint32_t gcr_cntl =
+ SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
+ SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
+ SDMA_GCR_GLI_INV(1);
+
+ /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
+ SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
+ SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
+ SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
+}
+
+/**
* sdma_v5_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
@@ -1663,6 +1690,7 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
10 + 10 + 10, /* sdma_v5_2_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v5_2_ring_emit_ib */
.emit_ib = sdma_v5_2_ring_emit_ib,
+ .emit_mem_sync = sdma_v5_2_ring_emit_mem_sync,
.emit_fence = sdma_v5_2_ring_emit_fence,
.emit_pipeline_sync = sdma_v5_2_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v5_2_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 5c5eb3aed1b3..d80e12b80c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -816,11 +816,12 @@ static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
static void soc15_program_aspm(struct amdgpu_device *adev)
{
-
- if (amdgpu_aspm == 0)
+ if (amdgpu_aspm != 1)
return;
- /* todo */
+ if (!(adev->flags & AMD_IS_APU) &&
+ (adev->nbio.funcs->program_aspm))
+ adev->nbio.funcs->program_aspm(adev);
}
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index ea338de5818a..735ebbd1148f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -81,6 +81,30 @@
#include "mxgpu_vi.h"
#include "amdgpu_dm.h"
+#define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
+#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK 0x00000004L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK 0x00000008L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK 0x00000010L
+#define ixPCIE_L1_PM_SUB_CNTL 0x378
+#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK 0x00000004L
+#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK 0x00000008L
+#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK 0x00000001L
+#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK 0x00000002L
+#define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK 0x00200000L
+#define LINK_CAP 0x64
+#define PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define ixCPM_CONTROL 0x1400118
+#define ixPCIE_LC_CNTL7 0x100100BC
+#define PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK 0x00000400L
+#define PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT 0x00000007
+#define PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT 0x00000009
+#define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK 0x01000000L
+#define PCIE_L1_PM_SUB_CNTL 0x378
+#define ASIC_IS_P22(asic_type, rid) ((asic_type >= CHIP_POLARIS10) && \
+ (asic_type <= CHIP_POLARIS12) && \
+ (rid >= 0x6E))
/* Topaz */
static const struct amdgpu_video_codecs topaz_video_codecs_encode =
{
@@ -1091,13 +1115,178 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
/* todo */
}
+static void vi_enable_aspm(struct amdgpu_device *adev)
+{
+ u32 data, orig;
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
+ data |= PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT <<
+ PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+ data |= PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT <<
+ PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ data |= PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL, data);
+}
+
static void vi_program_aspm(struct amdgpu_device *adev)
{
+ u32 data, data1, orig;
+ bool bL1SS = false;
+ bool bClkReqSupport = true;
- if (amdgpu_aspm == 0)
+ if (amdgpu_aspm != 1)
return;
- /* todo */
+ if (adev->flags & AMD_IS_APU ||
+ adev->asic_type < CHIP_POLARIS10)
+ return;
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
+ data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
+ data |= 0x0024 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT;
+ data |= PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL3, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
+ data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_P_CNTL, data);
+
+ data = RREG32_PCIE(ixPCIE_LC_L1_PM_SUBSTATE);
+ pci_read_config_dword(adev->pdev, PCIE_L1_PM_SUB_CNTL, &data1);
+ if (data & PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK &&
+ (data & (PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK |
+ PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK |
+ PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK |
+ PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK))) {
+ bL1SS = true;
+ } else if (data1 & (PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK |
+ PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK |
+ PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK |
+ PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK)) {
+ bL1SS = true;
+ }
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL6);
+ data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL6, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
+ data |= PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
+
+ pci_read_config_dword(adev->pdev, LINK_CAP, &data);
+ if (!(data & PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK))
+ bClkReqSupport = false;
+
+ if (bClkReqSupport) {
+ orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
+ data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
+ data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
+ (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
+ if (orig != data)
+ WREG32_SMC(ixTHM_CLK_CNTL, data);
+
+ orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
+ data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
+ MISC_CLK_CTRL__ZCLK_SEL_MASK | MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK);
+ data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
+ (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
+ data |= (0x20 << MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT);
+ if (orig != data)
+ WREG32_SMC(ixMISC_CLK_CTRL, data);
+
+ orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
+ data |= CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK;
+ if (orig != data)
+ WREG32_SMC(ixCG_CLKPIN_CNTL, data);
+
+ orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
+ data |= CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK;
+ if (orig != data)
+ WREG32_SMC(ixCG_CLKPIN_CNTL, data);
+
+ orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
+ data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
+ data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT);
+ if (orig != data)
+ WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
+
+ orig = data = RREG32_PCIE(ixCPM_CONTROL);
+ data |= (CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK |
+ CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK);
+ if (orig != data)
+ WREG32_PCIE(ixCPM_CONTROL, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_CONFIG_CNTL);
+ data &= ~PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK;
+ data |= (0xE << PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT);
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_CONFIG_CNTL, data);
+
+ orig = data = RREG32(mmBIF_CLK_CTRL);
+ data |= BIF_CLK_CTRL__BIF_XSTCLK_READY_MASK;
+ if (orig != data)
+ WREG32(mmBIF_CLK_CTRL, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL7);
+ data |= PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL7, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_HW_DEBUG);
+ data |= PCIE_HW_DEBUG__HW_01_DEBUG_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_HW_DEBUG, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
+ data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+ data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
+ if (bL1SS)
+ data &= ~PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL2, data);
+
+ }
+
+ vi_enable_aspm(adev);
+
+ data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
+ data1 = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ if (((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) &&
+ data1 & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK &&
+ data1 & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK) {
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL, data);
+ }
+
+ if ((adev->asic_type == CHIP_POLARIS12 &&
+ !(ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))) ||
+ ASIC_IS_P22(adev->asic_type, adev->external_rev_id)) {
+ orig = data = RREG32_PCIE(ixPCIE_LC_TRAINING_CNTL);
+ data &= ~PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_TRAINING_CNTL, data);
+ }
}
static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 1c20458f3962..696944fa0177 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -25,6 +25,70 @@
#include "soc15_int.h"
#include "kfd_device_queue_manager.h"
#include "kfd_smi_events.h"
+#include "amdgpu.h"
+
+enum SQ_INTERRUPT_WORD_ENCODING {
+ SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0,
+ SQ_INTERRUPT_WORD_ENCODING_INST,
+ SQ_INTERRUPT_WORD_ENCODING_ERROR,
+};
+
+enum SQ_INTERRUPT_ERROR_TYPE {
+ SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0,
+ SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST,
+ SQ_INTERRUPT_ERROR_TYPE_MEMVIOL,
+ SQ_INTERRUPT_ERROR_TYPE_EDC_FED,
+};
+
+/* SQ_INTERRUPT_WORD_AUTO_CTXID */
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE__SHIFT 0
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT__SHIFT 1
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL__SHIFT 2
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP__SHIFT 3
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP__SHIFT 4
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW__SHIFT 5
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW__SHIFT 6
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW__SHIFT 7
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR__SHIFT 8
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID__SHIFT 24
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING__SHIFT 26
+
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_MASK 0x00000001
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT_MASK 0x00000002
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL_MASK 0x00000004
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP_MASK 0x00000008
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP_MASK 0x00000010
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW_MASK 0x00000020
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW_MASK 0x00000040
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW_MASK 0x00000080
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR_MASK 0x00000100
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID_MASK 0x03000000
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING_MASK 0x0c000000
+
+/* SQ_INTERRUPT_WORD_WAVE_CTXID */
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA__SHIFT 0
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID__SHIFT 12
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV__SHIFT 13
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID__SHIFT 14
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID__SHIFT 18
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID__SHIFT 20
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID__SHIFT 24
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING__SHIFT 26
+
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA_MASK 0x00000fff
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID_MASK 0x00001000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK 0x00002000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID_MASK 0x0003c000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID_MASK 0x000c0000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID_MASK 0x00f00000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x03000000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0x0c000000
+
+#define KFD_CONTEXT_ID_GET_SQ_INT_DATA(ctx0, ctx1) \
+ ((ctx0 & 0xfff) | ((ctx0 >> 16) & 0xf000) | ((ctx1 << 16) & 0xff0000))
+
+#define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000
+#define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
const uint32_t *ih_ring_entry,
@@ -108,13 +172,15 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
const uint32_t *ih_ring_entry)
{
uint16_t source_id, client_id, pasid, vmid;
- uint32_t context_id;
+ uint32_t context_id0, context_id1;
+ uint32_t sq_intr_err, sq_int_data, encoding;
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
- context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
+ context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
+ context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry);
if (client_id == SOC15_IH_CLIENTID_GRBM_CP ||
client_id == SOC15_IH_CLIENTID_SE0SH ||
@@ -122,10 +188,59 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
client_id == SOC15_IH_CLIENTID_SE2SH ||
client_id == SOC15_IH_CLIENTID_SE3SH) {
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
- kfd_signal_event_interrupt(pasid, context_id, 32);
- else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
- kfd_signal_event_interrupt(pasid, context_id & 0xffffff, 24);
- else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
+ kfd_signal_event_interrupt(pasid, context_id0, 32);
+ else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) {
+ sq_int_data = KFD_CONTEXT_ID_GET_SQ_INT_DATA(context_id0, context_id1);
+ encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
+ switch (encoding) {
+ case SQ_INTERRUPT_WORD_ENCODING_AUTO:
+ pr_debug(
+ "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, WLT),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, REG_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, CMD_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_REG_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, IMMED_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_INST:
+ pr_debug("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ sq_int_data);
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_ERROR:
+ sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
+ pr_warn("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ sq_intr_err);
+ if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
+ sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
+ kfd_signal_hw_exception_event(pasid);
+ amdgpu_amdkfd_gpu_reset(dev->kgd);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24);
+ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
kfd_signal_hw_exception_event(pasid);
} else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
client_id == SOC15_IH_CLIENTID_SDMA1 ||
@@ -136,7 +251,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
client_id == SOC15_IH_CLIENTID_SDMA6 ||
client_id == SOC15_IH_CLIENTID_SDMA7) {
if (source_id == SOC15_INTSRC_SDMA_TRAP)
- kfd_signal_event_interrupt(pasid, context_id & 0xfffffff, 28);
+ kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
} else if (client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_VMC1 ||
client_id == SOC15_IH_CLIENTID_UTCL2) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 5a1f2433632b..73f2257acc23 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -333,10 +333,6 @@ int kfd_iommu_resume(struct kfd_dev *kfd)
return 0;
}
-extern bool amd_iommu_pc_supported(void);
-extern u8 amd_iommu_pc_get_max_banks(u16 devid);
-extern u8 amd_iommu_pc_get_max_counters(u16 devid);
-
/** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology
*/
int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index d4241d29ea94..d97e330a5022 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -935,9 +935,6 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
pdd->dev->kgd, pdd->vm);
fput(pdd->drm_file);
}
- else if (pdd->vm)
- amdgpu_amdkfd_gpuvm_destroy_process_vm(
- pdd->dev->kgd, pdd->vm);
if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
@@ -1375,19 +1372,18 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
struct kfd_dev *dev;
int ret;
+ if (!drm_file)
+ return -EINVAL;
+
if (pdd->vm)
- return drm_file ? -EBUSY : 0;
+ return -EBUSY;
p = pdd->process;
dev = pdd->dev;
- if (drm_file)
- ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
- dev->kgd, drm_file, p->pasid,
- &pdd->vm, &p->kgd_process_info, &p->ef);
- else
- ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
- &pdd->vm, &p->kgd_process_info, &p->ef);
+ ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
+ dev->kgd, drm_file, p->pasid,
+ &pdd->vm, &p->kgd_process_info, &p->ef);
if (ret) {
pr_err("Failed to create process VM object\n");
return ret;
@@ -1409,8 +1405,6 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
err_init_cwsr:
err_reserve_ib_mem:
kfd_process_device_free_bos(pdd);
- if (!drm_file)
- amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
pdd->vm = NULL;
return ret;
@@ -1435,6 +1429,9 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
return ERR_PTR(-ENOMEM);
}
+ if (!pdd->vm)
+ return ERR_PTR(-ENODEV);
+
/*
* signal runtime-pm system to auto resume and prevent
* further runtime suspend once device pdd is created until
@@ -1452,10 +1449,6 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
if (err)
goto out;
- err = kfd_process_device_init_vm(pdd, NULL);
- if (err)
- goto out;
-
/*
* make sure that runtime_usage counter is incremented just once
* per pdd
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d3c3b3441ad2..b34ab76c5f4c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2552,11 +2552,14 @@ static void handle_hpd_irq(void *param)
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct amdgpu_device *adev = drm_to_adev(dev);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
#endif
+ if (adev->dm.disable_hpd_irq)
+ return;
+
/*
* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context.
@@ -2696,6 +2699,10 @@ static void handle_hpd_rx_irq(void *param)
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+ if (adev->dm.disable_hpd_irq)
+ return;
+
+
/*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
* conflict, after implement i2c helper, this mutex should be
@@ -4225,6 +4232,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
{
struct amdgpu_device *adev = drm_to_adev(plane->dev);
const struct drm_format_info *info = drm_format_info(format);
+ int i;
enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
@@ -4232,11 +4240,22 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
return false;
/*
- * We always have to allow this modifier, because core DRM still
- * checks LINEAR support if userspace does not provide modifers.
+ * We always have to allow these modifiers:
+ * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
+ * 2. Not passing any modifiers is the same as explicitly passing INVALID.
*/
- if (modifier == DRM_FORMAT_MOD_LINEAR)
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == DRM_FORMAT_MOD_INVALID) {
return true;
+ }
+
+ /* Check that the modifier is on the list of the plane's supported modifiers. */
+ for (i = 0; i < plane->modifier_count; i++) {
+ if (modifier == plane->modifiers[i])
+ break;
+ }
+ if (i == plane->modifier_count)
+ return false;
/*
* For D swizzle the canonical modifier depends on the bpp, so check
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 018943113025..b2f2ccfc20bb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -410,6 +410,7 @@ struct amdgpu_display_manager {
*/
struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
bool force_timing_sync;
+ bool disable_hpd_irq;
bool dmcub_trace_event_en;
/**
* @da_list:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 9a13f47022df..529545045a3e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3077,6 +3077,37 @@ static int force_timing_sync_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(force_timing_sync_ops, force_timing_sync_get,
force_timing_sync_set, "%llu\n");
+
+/*
+ * Disables all HPD and HPD RX interrupt handling in the
+ * driver when set to 1. Default is 0.
+ */
+static int disable_hpd_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = data;
+
+ adev->dm.disable_hpd_irq = (bool)val;
+
+ return 0;
+}
+
+
+/*
+ * Returns 1 if HPD and HPRX interrupt handling is disabled,
+ * 0 otherwise.
+ */
+static int disable_hpd_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = data;
+
+ *val = adev->dm.disable_hpd_irq;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get,
+ disable_hpd_set, "%llu\n");
+
/*
* Sets the DC visual confirm debug option from the given string.
* Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm
@@ -3213,4 +3244,8 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file_unsafe("amdgpu_dm_dcc_en", 0644, root, adev,
&dcc_en_bits_fops);
+
+ debugfs_create_file_unsafe("amdgpu_dm_disable_hpd", 0644, root, adev,
+ &disable_hpd_ops);
+
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 60f91853bd82..616f5b1ea3a8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -434,6 +434,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
int link_index = aconnector->dc_link->link_index;
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+ struct drm_connector_state *conn_state;
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
@@ -459,8 +460,13 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
link->adjust.auth_delay = 3;
link->adjust.hdcp1.disable = 0;
+ conn_state = aconnector->base.state;
- hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
+ pr_debug("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
+ (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1,
+ (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1);
+
+ hdcp_update_display(hdcp_work, link_index, aconnector, conn_state->hdcp_content_type, false);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 103e29905b57..e8b325a828c1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -711,3 +711,28 @@ bool dm_helpers_dmub_outbox0_interrupt_control(struct dc_context *ctx, bool enab
enable ? "en" : "dis", ret);
return ret;
}
+
+void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
+{
+ /* TODO: virtual DPCD */
+ struct dc_link *link = stream->link;
+ union down_spread_ctrl old_downspread;
+ union down_spread_ctrl new_downspread;
+
+ if (link->aux_access_disabled)
+ return;
+
+ if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
+ &old_downspread.raw,
+ sizeof(old_downspread)))
+ return;
+
+ new_downspread.raw = old_downspread.raw;
+ new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
+ (stream->ignore_msa_timing_param) ? 1 : 0;
+
+ if (new_downspread.raw != old_downspread.raw)
+ dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
+ &new_downspread.raw,
+ sizeof(new_downspread));
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 73cdb9fe981a..9b221db526dc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -229,6 +229,11 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
(aconnector->edid->extensions + 1) * EDID_LENGTH,
&init_params);
+ if (!dc_sink) {
+ DRM_ERROR("Unable to add a remote sink\n");
+ return 0;
+ }
+
dc_sink->priv = aconnector;
/* dc_link_add_remote_sink returns a new reference */
aconnector->dc_sink = dc_sink;
@@ -745,8 +750,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (!dc_dsc_compute_bandwidth_range(
stream->sink->ctx->dc->res_pool->dscs[0],
stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
- dsc_policy.min_target_bpp,
- dsc_policy.max_target_bpp,
+ dsc_policy.min_target_bpp * 16,
+ dsc_policy.max_target_bpp * 16,
&stream->sink->dsc_caps.dsc_dec_caps,
&stream->timing, &params[count].bw_range))
params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 887a54246bde..a06e86853bb9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -128,7 +128,7 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
+ int display_count, i;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
@@ -210,6 +210,14 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.dppclk_khz,
safe_to_lower);
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->signal == SIGNAL_TYPE_EDP &&
+ context->streams[i]->apply_seamless_boot_optimization) {
+ dc_wait_for_vblank(dc, context->streams[i]);
+ break;
+ }
+ }
+
clk_mgr_base->clks.actual_dppclk_khz =
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
@@ -761,6 +769,43 @@ static struct wm_table ddr4_wm_table_rn = {
}
};
+static struct wm_table ddr4_1R_wm_table_rn = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 13.90,
+ .sr_enter_plus_exit_time_us = 14.80,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 13.90,
+ .sr_enter_plus_exit_time_us = 14.80,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 13.90,
+ .sr_enter_plus_exit_time_us = 14.80,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 13.90,
+ .sr_enter_plus_exit_time_us = 14.80,
+ .valid = true,
+ },
+ }
+};
+
static struct wm_table lpddr4_wm_table_rn = {
.entries = {
{
@@ -945,8 +990,12 @@ void rn_clk_mgr_construct(
} else {
if (is_green_sardine)
rn_bw_params.wm_table = ddr4_wm_table_gs;
- else
- rn_bw_params.wm_table = ddr4_wm_table_rn;
+ else {
+ if (ctx->dc->config.is_single_rank_dimm)
+ rn_bw_params.wm_table = ddr4_1R_wm_table_rn;
+ else
+ rn_bw_params.wm_table = ddr4_wm_table_rn;
+ }
}
/* Saved clocks configured at boot for debug purposes */
rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
@@ -964,6 +1013,9 @@ void rn_clk_mgr_construct(
if (status == PP_SMU_RESULT_OK &&
ctx->dc_bios && ctx->dc_bios->integrated_info) {
rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
+ /* treat memory config as single channel if memory is asymmetrics. */
+ if (ctx->dc->config.is_asymmetric_memory)
+ clk_mgr->base.bw_params->num_channels = 1;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 577e7f97045e..652fa89fae5f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -432,7 +432,7 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
}
-static bool dcn3_is_smu_prsent(struct clk_mgr *clk_mgr_base)
+static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
return clk_mgr->smu_present;
@@ -500,7 +500,7 @@ static struct clk_mgr_funcs dcn3_funcs = {
.are_clock_states_equal = dcn3_are_clock_states_equal,
.enable_pme_wa = dcn3_enable_pme_wa,
.notify_link_rate_change = dcn30_notify_link_rate_change,
- .is_smu_present = dcn3_is_smu_prsent
+ .is_smu_present = dcn3_is_smu_present
};
static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 8f0a13807d05..4713f09bcbf1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -55,6 +55,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
+#include "dc_link.h"
#include "dc_link_ddc.h"
#include "dm_helpers.h"
#include "mem_input.h"
@@ -1322,11 +1323,10 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
struct dc_link *link = sink->link;
unsigned int i, enc_inst, tg_inst = 0;
- // Seamless port only support single DP and EDP so far
- if ((sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT &&
- sink->sink_signal != SIGNAL_TYPE_EDP) ||
- sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ /* Support seamless boot on EDP displays only */
+ if (sink->sink_signal != SIGNAL_TYPE_EDP) {
return false;
+ }
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
@@ -1399,6 +1399,10 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
return false;
+ /* block DSC for now, as VBIOS does not currently support DSC timings */
+ if (crtc_timing->flags.DSC)
+ return false;
+
if (dc_is_dp_signal(link->connector_signal)) {
unsigned int pix_clk_100hz;
@@ -1429,6 +1433,11 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
return false;
}
+ if (is_edp_ilr_optimization_required(link, crtc_timing)) {
+ DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
+ return false;
+ }
+
return true;
}
@@ -2678,6 +2687,10 @@ static void commit_planes_for_stream(struct dc *dc,
plane_state->triplebuffer_flips = true;
}
}
+ if (update_type == UPDATE_TYPE_FULL) {
+ /* force vsync flip when reconfiguring pipes to prevent underflow */
+ plane_state->flip_immediate = false;
+ }
}
}
@@ -2821,7 +2834,8 @@ static void commit_planes_for_stream(struct dc *dc,
if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
!pipe_ctx->stream || pipe_ctx->stream != stream ||
- !pipe_ctx->plane_state->update_flags.bits.addr_update)
+ !pipe_ctx->plane_state->update_flags.bits.addr_update ||
+ pipe_ctx->plane_state->skip_manual_trigger)
continue;
if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
@@ -3205,6 +3219,19 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
}
}
+void dc_wait_for_vblank(struct dc *dc, struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
+ struct timing_generator *tg =
+ dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
+ break;
+ }
+}
+
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
{
info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 29bc2874f6a7..f4374d83662a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1679,21 +1679,27 @@ void link_destroy(struct dc_link **link)
static void enable_stream_features(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- union down_spread_ctrl old_downspread;
- union down_spread_ctrl new_downspread;
- core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
- &old_downspread.raw, sizeof(old_downspread));
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ struct dc_link *link = stream->link;
+ union down_spread_ctrl old_downspread;
+ union down_spread_ctrl new_downspread;
+
+ core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &old_downspread.raw, sizeof(old_downspread));
- new_downspread.raw = old_downspread.raw;
+ new_downspread.raw = old_downspread.raw;
- new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
- (stream->ignore_msa_timing_param) ? 1 : 0;
+ new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
+ (stream->ignore_msa_timing_param) ? 1 : 0;
+
+ if (new_downspread.raw != old_downspread.raw) {
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &new_downspread.raw, sizeof(new_downspread));
+ }
- if (new_downspread.raw != old_downspread.raw) {
- core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
- &new_downspread.raw, sizeof(new_downspread));
+ } else {
+ dm_helpers_mst_enable_stream_features(stream);
}
}
@@ -2813,12 +2819,9 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->psr_level.u32all = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/*skip power down the single pipe since it blocks the cstate*/
- if ((link->ctx->asic_id.chip_family == FAMILY_RV) &&
- ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
+ if (link->ctx->asic_id.chip_family >= FAMILY_RV)
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
-#endif
/* SMU will perform additional powerdown sequence.
* For unsupported ASICs, set psr_level flag to skip PSR
@@ -3139,50 +3142,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
return DC_OK;
}
-enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link)
-{
- int i;
- struct pipe_ctx *pipe_ctx;
-
- // Clear all of MST payload then reallocate
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
-
- /* driver enable split pipe for external monitors
- * we have to check pipe_ctx is split pipe or not
- * If it's split pipe, driver using top pipe to
- * reaallocate.
- */
- if (!pipe_ctx || pipe_ctx->top_pipe)
- continue;
-
- if (pipe_ctx->stream && pipe_ctx->stream->link == link &&
- pipe_ctx->stream->dpms_off == false &&
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- deallocate_mst_payload(pipe_ctx);
- }
- }
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
-
- if (!pipe_ctx || pipe_ctx->top_pipe)
- continue;
-
- if (pipe_ctx->stream && pipe_ctx->stream->link == link &&
- pipe_ctx->stream->dpms_off == false &&
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- /* enable/disable PHY will clear connection between BE and FE
- * need to restore it.
- */
- link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
- pipe_ctx->stream_res.stream_enc->id, true);
- dc_link_allocate_mst_payload(pipe_ctx);
- }
- }
-
- return DC_OK;
-}
#if defined(CONFIG_DRM_AMD_DC_HDCP)
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
@@ -3296,7 +3255,8 @@ void core_link_enable_stream(
/* eDP lit up by bios already, no need to enable again. */
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
- apply_edp_fast_boot_optimization) {
+ apply_edp_fast_boot_optimization &&
+ !pipe_ctx->stream->timing.flags.DSC) {
pipe_ctx->stream->dpms_off = false;
#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, false);
@@ -3358,8 +3318,10 @@ void core_link_enable_stream(
/* Set DPS PPS SDP (AKA "info frames") */
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
+ dc_is_virtual_signal(pipe_ctx->stream->signal)) {
+ dp_set_dsc_on_rx(pipe_ctx, true);
dp_set_dsc_pps_sdp(pipe_ctx, true);
+ }
}
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
@@ -3754,7 +3716,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
link->local_sink &&
link->local_sink->edid_caps.panel_patch.disable_fec) ||
- link->connector_signal == SIGNAL_TYPE_EDP) // Disable FEC for eDP
+ (link->connector_signal == SIGNAL_TYPE_EDP &&
+ link->dc->debug.force_enable_edp_fec == false)) // Disable FEC for eDP
is_fec_disable = true;
if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 7d2e433c2275..3ff3d9e90983 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1132,11 +1132,6 @@ static inline enum link_training_result perform_link_training_int(
enum link_training_result status)
{
union lane_count_set lane_count_set = { {0} };
- union dpcd_training_pattern dpcd_pattern = { {0} };
-
- /* 3. set training not in progress*/
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
- dpcd_set_training_pattern(link, dpcd_pattern);
/* 4. mainlink output idle pattern*/
dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
@@ -1560,6 +1555,7 @@ enum link_training_result dc_link_dp_perform_link_training(
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
struct link_training_settings lt_settings;
+ union dpcd_training_pattern dpcd_pattern = { { 0 } };
bool fec_enable;
uint8_t repeater_cnt;
@@ -1624,6 +1620,9 @@ enum link_training_result dc_link_dp_perform_link_training(
}
}
+ /* 3. set training not in progress*/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+ dpcd_set_training_pattern(link, dpcd_pattern);
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
status = perform_link_training_int(link,
&lt_settings,
@@ -2490,7 +2489,7 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
return false;
}
-static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
+bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
{
struct dc_link_settings initial_link_setting;
struct dc_link_settings current_link_setting;
@@ -3582,6 +3581,8 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
if (is_lttpr_present)
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+ else
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
}
if (!is_lttpr_present)
@@ -3892,7 +3893,7 @@ void detect_edp_sink_caps(struct dc_link *link)
memset(supported_link_rates, 0, sizeof(supported_link_rates));
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
- (link->dc->config.optimize_edp_link_rate ||
+ (link->dc->debug.optimize_edp_link_rate ||
link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
@@ -4718,3 +4719,51 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link)
}
return false;
}
+
+bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing)
+{
+ struct dc_link_settings link_setting;
+ uint8_t link_bw_set;
+ uint8_t link_rate_set;
+ uint32_t req_bw;
+ union lane_count_set lane_count_set = { {0} };
+
+ ASSERT(link || crtc_timing); // invalid input
+
+ if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
+ !link->dc->debug.optimize_edp_link_rate)
+ return false;
+
+
+ // Read DPCD 00100h to find if standard link rates are set
+ core_link_read_dpcd(link, DP_LINK_BW_SET,
+ &link_bw_set, sizeof(link_bw_set));
+
+ if (link_bw_set) {
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS used link_bw_set\n");
+ return true;
+ }
+
+ // Read DPCD 00115h to find the edp link rate set used
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ // Read DPCD 00101h to find out the number of lanes currently set
+ core_link_read_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, sizeof(lane_count_set));
+
+ req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
+
+ decide_edp_link_settings(link, &link_setting, req_bw);
+
+ if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate ||
+ lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) {
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS link_rate_set not optimal\n");
+ return true;
+ }
+
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: No optimization required, VBIOS set optimal link_rate_set\n");
+ return false;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 48ad1a8d4a74..b426f878fb99 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -431,7 +431,7 @@ static void dsc_optc_config_log(struct display_stream_compressor *dsc,
DC_LOG_DSC("\tslice_width %d", config->slice_width);
}
-static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
+bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -541,7 +541,7 @@ bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable)
goto out;
if (enable) {
- if (dp_set_dsc_on_rx(pipe_ctx, true)) {
+ {
dp_set_dsc_on_stream(pipe_ctx, true);
result = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ac7a75887f95..8cb937c046aa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -2506,26 +2506,31 @@ static void set_avi_info_frame(
hdmi_info.bits.ITC = itc_value;
}
+ if (stream->qs_bit == 1) {
+ if (color_space == COLOR_SPACE_SRGB ||
+ color_space == COLOR_SPACE_2020_RGB_FULLRANGE)
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
+ else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
+ else
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+ } else
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+
/* TODO : We should handle YCC quantization */
/* but we do not have matrix calculation */
- if (stream->qs_bit == 1 &&
- stream->qy_bit == 1) {
+ if (stream->qy_bit == 1) {
if (color_space == COLOR_SPACE_SRGB ||
- color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
- hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
+ color_space == COLOR_SPACE_2020_RGB_FULLRANGE)
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- } else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
- color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
- hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
+ else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- } else {
- hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+ else
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- }
- } else {
- hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- }
+ } else
+ hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
///VIC
format = stream->timing.timing_3d_format;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8108b82bac60..100d434f7a03 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -45,7 +45,7 @@
/* forward declaration */
struct aux_payload;
-#define DC_VER "3.2.130"
+#define DC_VER "3.2.132"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -293,7 +293,6 @@ struct dc_config {
bool gpu_vm_support;
bool disable_disp_pll_sharing;
bool fbc_support;
- bool optimize_edp_link_rate;
bool disable_fractional_pwm;
bool allow_seamless_boot_optimization;
bool power_down_display_on_boot;
@@ -309,6 +308,8 @@ struct dc_config {
#endif
uint64_t vblank_alignment_dto_params;
uint8_t vblank_alignment_max_frame_time_diff;
+ bool is_asymmetric_memory;
+ bool is_single_rank_dimm;
};
enum visual_confirm {
@@ -541,6 +542,11 @@ struct dc_debug_options {
/* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc;
+ bool optimize_edp_link_rate; /* eDP ILR */
+ /* force enable edp FEC */
+ bool force_enable_edp_fec;
+ /* FEC/PSR1 sequence enable delay in 100us */
+ uint8_t fec_enable_delay_in100us;
};
struct dc_debug_data {
@@ -713,6 +719,7 @@ void dc_init_callbacks(struct dc *dc,
void dc_deinit_callbacks(struct dc *dc);
void dc_destroy(struct dc **dc);
+void dc_wait_for_vblank(struct dc *dc, struct dc_stream_state *stream);
/*******************************************************************************
* Surface Interfaces
******************************************************************************/
@@ -900,6 +907,8 @@ struct dc_plane_state {
union surface_update_flags update_flags;
bool flip_int_enabled;
+ bool skip_manual_trigger;
+
/* private to DC core */
struct dc_plane_status status;
struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index b0013e674864..054bab45ee17 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -276,7 +276,6 @@ enum dc_detect_reason {
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
bool dc_link_get_hpd_state(struct dc_link *dc_link);
enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
-enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link);
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
* Return:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index b0297f07f9de..13dae7238a58 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -238,7 +238,6 @@ struct dc_stream_state {
bool apply_seamless_boot_optimization;
uint32_t stream_id;
- bool is_dsc_enabled;
struct test_pattern test_pattern;
union stream_update_flags update_flags;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 28ff059aa7f3..5e99553fcdd4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -284,6 +284,8 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->debug.u32All = 0;
copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR;
copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1;
+ copy_settings_data->fec_enable_status = (link->fec_state == dc_link_fec_enabled);
+ copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 873c6f2d2cd9..5ddeee96bf23 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -48,6 +48,7 @@
#include "stream_encoder.h"
#include "link_encoder.h"
#include "link_hwss.h"
+#include "dc_link_dp.h"
#include "clock_source.h"
#include "clk_mgr.h"
#include "abm.h"
@@ -1694,6 +1695,8 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool can_apply_edp_fast_boot = false;
bool can_apply_seamless_boot = false;
bool keep_edp_vdd_on = false;
+ DC_LOGGER_INIT();
+
get_edp_links_with_sink(dc, edp_links_with_sink, &edp_with_sink_num);
get_edp_links(dc, edp_links, &edp_num);
@@ -1714,8 +1717,11 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
/* Set optimization flag on eDP stream*/
if (edp_stream_num && edp_link->link_status.link_active) {
edp_stream = edp_streams[0];
- edp_stream->apply_edp_fast_boot_optimization = true;
- can_apply_edp_fast_boot = true;
+ can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing);
+ edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot;
+ if (can_apply_edp_fast_boot)
+ DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n");
+
break;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index bec7059f6d5d..a1318c31bcfa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-17 Advanced Micro Devices, Inc.
+ * Copyright 2012-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -181,11 +181,14 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
else
Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
*/
- if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
- + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
- value = 1;
- } else
- value = 0;
+ if (pipe_dest->htotal != 0) {
+ if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
+ + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
+ value = 1;
+ } else
+ value = 0;
+ }
+
REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index f65a6904d09c..527e56c353cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2201,10 +2201,11 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2;
break;
case PIXEL_ENCODING_YCBCR422:
- if (true) /* todo */
- pipes[pipe_cnt].dout.output_format = dm_s422;
- else
+ if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC &&
+ !res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.ycbcr422_simple)
pipes[pipe_cnt].dout.output_format = dm_n422;
+ else
+ pipes[pipe_cnt].dout.output_format = dm_s422;
pipes[pipe_cnt].dout.output_bpp = output_bpc * 2;
break;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index d3b643089603..8fccee5a3036 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -218,6 +218,8 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16;
+ cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_SET_BACKLIGHT_VERSION_1;
+ cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_cntl->inst);
cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index 0d90523c7cdc..70b053d9ba40 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -99,6 +99,8 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.set_pipe = dcn21_set_pipe,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
+ .optimize_pwr_state = dcn21_optimize_pwr_state,
+ .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
};
static const struct hwseq_private_funcs dcn301_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index f41db27c44de..7617fab9e1f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -147,6 +147,8 @@ bool dm_helpers_dp_write_dsc_enable(
bool dm_helpers_is_dp_sink_present(
struct dc_link *link);
+void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream);
+
enum dc_edid_status dm_helpers_read_local_edid(
struct dc_context *ctx,
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index b970a32177af..3ae05c96d557 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -52,6 +52,10 @@ bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing);
+bool decide_edp_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint32_t req_bw);
+
void decide_link_settings(
struct dc_stream_state *stream,
struct dc_link_settings *link_setting);
@@ -71,6 +75,8 @@ void detect_edp_sink_caps(struct dc_link *link);
bool is_dp_active_dongle(const struct dc_link *link);
+bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing);
+
void dp_enable_mst_on_sink(struct dc_link *link, bool enable);
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
@@ -86,5 +92,7 @@ bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable);
void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
+bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable);
+
#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 44003836fafd..4195ff10c514 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -47,10 +47,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x7f2db1846
+#define DMUB_FW_VERSION_GIT_HASH 0x23db9b126
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 59
+#define DMUB_FW_VERSION_REVISION 62
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@@ -121,6 +121,16 @@
#define TRACE_BUFFER_ENTRY_OFFSET 16
/**
+ * ABM backlight control version legacy
+ */
+#define DMUB_CMD_ABM_SET_BACKLIGHT_VERSION_UNKNOWN 0x0
+
+/**
+ * ABM backlight control version with multi edp support
+ */
+#define DMUB_CMD_ABM_SET_BACKLIGHT_VERSION_1 0x1
+
+/**
* Physical framebuffer address location, 64-bit.
*/
#ifndef PHYSICAL_ADDRESS_LOC
@@ -1625,6 +1635,23 @@ struct dmub_cmd_abm_set_backlight_data {
* Requested backlight level from user.
*/
uint32_t backlight_user_level;
+
+ /**
+ * Backlight data version.
+ */
+ uint8_t version;
+
+ /**
+ * Panel Control HW instance mask.
+ * Bit 0 is Panel Control HW instance 0.
+ * Bit 1 is Panel Control HW instance 1.
+ */
+ uint8_t panel_mask;
+
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index eeac14300a2a..2cbd931363bd 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -427,8 +427,6 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
- if (!mod_hdcp_is_link_encryption_enabled(hdcp))
- goto out;
if (status == MOD_HDCP_STATUS_SUCCESS)
mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index f164f6a5d4dc..c1331facdcb4 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -564,8 +564,6 @@ static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
- if (!mod_hdcp_is_link_encryption_enabled(hdcp))
- goto out;
process_rxstatus(hdcp, event_ctx, input, &status);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 9d7ca316dc3f..26f96c05e0ec 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -791,6 +791,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
hdcp->connection.is_hdcp2_revoked = 1;
status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
+ } else {
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
}
}
mutex_unlock(&psp->hdcp_context.mutex);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
index 4089cfa081f5..849450caca15 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
@@ -617,6 +617,22 @@
#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK 0x30000000L
#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK 0xC0000000L
+//GCEA_ERR_STATUS
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define GCEA_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define GCEA_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+
// addressBlock: gc_gfxudec
//GRBM_GFX_INDEX
#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index c77ed38c20fb..f2564ba21c0b 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -3336,6 +3336,47 @@ enum atom_smu11_syspll3_1_clock_id {
SMU11_SYSPLL3_1_LCLK_ID = 2, // LCLK
};
+enum atom_smu12_syspll_id {
+ SMU12_SYSPLL0_ID = 0,
+ SMU12_SYSPLL1_ID = 1,
+ SMU12_SYSPLL2_ID = 2,
+ SMU12_SYSPLL3_0_ID = 3,
+ SMU12_SYSPLL3_1_ID = 4,
+};
+
+enum atom_smu12_syspll0_clock_id {
+ SMU12_SYSPLL0_SMNCLK_ID = 0, // SOCCLK
+ SMU12_SYSPLL0_SOCCLK_ID = 1, // SOCCLK
+ SMU12_SYSPLL0_MP0CLK_ID = 2, // MP0CLK
+ SMU12_SYSPLL0_MP1CLK_ID = 3, // MP1CLK
+ SMU12_SYSPLL0_MP2CLK_ID = 4, // MP2CLK
+ SMU12_SYSPLL0_VCLK_ID = 5, // VCLK
+ SMU12_SYSPLL0_LCLK_ID = 6, // LCLK
+ SMU12_SYSPLL0_DCLK_ID = 7, // DCLK
+ SMU12_SYSPLL0_ACLK_ID = 8, // ACLK
+ SMU12_SYSPLL0_ISPCLK_ID = 9, // ISPCLK
+ SMU12_SYSPLL0_SHUBCLK_ID = 10, // SHUBCLK
+};
+
+enum atom_smu12_syspll1_clock_id {
+ SMU12_SYSPLL1_DISPCLK_ID = 0, // DISPCLK
+ SMU12_SYSPLL1_DPPCLK_ID = 1, // DPPCLK
+ SMU12_SYSPLL1_DPREFCLK_ID = 2, // DPREFCLK
+ SMU12_SYSPLL1_DCFCLK_ID = 3, // DCFCLK
+};
+
+enum atom_smu12_syspll2_clock_id {
+ SMU12_SYSPLL2_Pre_GFXCLK_ID = 0, // Pre_GFXCLK
+};
+
+enum atom_smu12_syspll3_0_clock_id {
+ SMU12_SYSPLL3_0_FCLK_ID = 0, // FCLK
+};
+
+enum atom_smu12_syspll3_1_clock_id {
+ SMU12_SYSPLL3_1_UMCCLK_ID = 0, // UMCCLK
+};
+
struct atom_get_smu_clock_info_output_parameters_v3_1
{
union {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 204e34549013..8128603ef495 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1844,7 +1844,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (asic_type < CHIP_VEGA10)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
- if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
+ if (asic_type < CHIP_VEGA10 ||
+ asic_type == CHIP_ARCTURUS ||
+ asic_type == CHIP_ALDEBARAN)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
if (asic_type < CHIP_VEGA20)
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
index 6e23a3f803a7..8361ebd8d876 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
@@ -26,7 +26,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU13_DRIVER_IF_VERSION 2
+#define SMU13_DRIVER_IF_VERSION 3
typedef struct {
int32_t value;
@@ -191,6 +191,44 @@ typedef struct {
uint16_t SocTemperature; //[centi-Celsius]
uint16_t EdgeTemperature;
uint16_t ThrottlerStatus;
+} SmuMetrics_legacy_t;
+
+typedef struct {
+ uint16_t GfxclkFrequency; //[MHz]
+ uint16_t SocclkFrequency; //[MHz]
+ uint16_t VclkFrequency; //[MHz]
+ uint16_t DclkFrequency; //[MHz]
+ uint16_t MemclkFrequency; //[MHz]
+ uint16_t spare;
+
+ uint16_t GfxActivity; //[centi]
+ uint16_t UvdActivity; //[centi]
+ uint16_t C0Residency[4]; //percentage
+
+ uint16_t Voltage[3]; //[mV] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX
+ uint16_t Current[3]; //[mA] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX
+ uint16_t Power[3]; //[mW] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX
+ uint16_t CurrentSocketPower; //[mW]
+
+ //3rd party tools in Windows need info in the case of APUs
+ uint16_t CoreFrequency[4]; //[MHz]
+ uint16_t CorePower[4]; //[mW]
+ uint16_t CoreTemperature[4]; //[centi-Celsius]
+ uint16_t L3Frequency[1]; //[MHz]
+ uint16_t L3Temperature[1]; //[centi-Celsius]
+
+ uint16_t GfxTemperature; //[centi-Celsius]
+ uint16_t SocTemperature; //[centi-Celsius]
+ uint16_t EdgeTemperature;
+ uint16_t ThrottlerStatus;
+} SmuMetricsTable_t;
+
+typedef struct {
+ SmuMetricsTable_t Current;
+ SmuMetricsTable_t Average;
+ //uint32_t AccCnt;
+ uint32_t SampleStartTime;
+ uint32_t SampleStopTime;
} SmuMetrics_t;
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index d5182bbaa598..bb55a96f98e9 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -32,7 +32,7 @@
#define SMU11_DRIVER_IF_VERSION_NV14 0x38
#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3D
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
-#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x02
+#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03
#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
/* MP Apertures */
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
index 02de3b6199e5..1ad2dff71090 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
@@ -60,5 +60,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
int smu_v12_0_set_driver_table_location(struct smu_context *smu);
+int smu_v12_0_get_vbios_bootup_values(struct smu_context *smu);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index e0eb7ca112e2..c29d8b3131b7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2221,6 +2221,7 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
dev_err(smu->adev->dev,
"New power limit (%d) is over the max allowed %d\n",
limit, smu->max_power_limit);
+ ret = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 7bcd35840bf2..77f532a49e37 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -194,18 +194,34 @@ static int vangogh_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version;
+ uint32_t ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to get smu if version!\n");
+ goto err0_out;
+ }
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+
+ if (if_version < 0x3) {
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
+ } else {
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ }
if (!smu_table->metrics_table)
goto err0_out;
smu_table->metrics_time = 0;
@@ -235,13 +251,12 @@ err0_out:
return -ENOMEM;
}
-static int vangogh_get_smu_metrics_data(struct smu_context *smu,
+static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
-
- SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+ SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
int ret = 0;
mutex_lock(&smu->metrics_lock);
@@ -255,7 +270,7 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
}
switch (member) {
- case METRICS_AVERAGE_GFXCLK:
+ case METRICS_CURR_GFXCLK:
*value = metrics->GfxclkFrequency;
break;
case METRICS_AVERAGE_SOCCLK:
@@ -267,7 +282,7 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_DCLK:
*value = metrics->DclkFrequency;
break;
- case METRICS_AVERAGE_UCLK:
+ case METRICS_CURR_UCLK:
*value = metrics->MemclkFrequency;
break;
case METRICS_AVERAGE_GFXACTIVITY:
@@ -311,6 +326,103 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
return ret;
}
+static int vangogh_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+ int ret = 0;
+
+ mutex_lock(&smu->metrics_lock);
+
+ ret = smu_cmn_get_metrics_table_locked(smu,
+ NULL,
+ false);
+ if (ret) {
+ mutex_unlock(&smu->metrics_lock);
+ return ret;
+ }
+
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ *value = metrics->Current.GfxclkFrequency;
+ break;
+ case METRICS_AVERAGE_SOCCLK:
+ *value = metrics->Current.SocclkFrequency;
+ break;
+ case METRICS_AVERAGE_VCLK:
+ *value = metrics->Current.VclkFrequency;
+ break;
+ case METRICS_AVERAGE_DCLK:
+ *value = metrics->Current.DclkFrequency;
+ break;
+ case METRICS_CURR_UCLK:
+ *value = metrics->Current.MemclkFrequency;
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = metrics->Current.GfxActivity;
+ break;
+ case METRICS_AVERAGE_VCNACTIVITY:
+ *value = metrics->Current.UvdActivity;
+ break;
+ case METRICS_AVERAGE_SOCKETPOWER:
+ *value = (metrics->Current.CurrentSocketPower << 8) /
+ 1000;
+ break;
+ case METRICS_TEMPERATURE_EDGE:
+ *value = metrics->Current.GfxTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = metrics->Current.SocTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_THROTTLER_STATUS:
+ *value = metrics->Current.ThrottlerStatus;
+ break;
+ case METRICS_VOLTAGE_VDDGFX:
+ *value = metrics->Current.Voltage[2];
+ break;
+ case METRICS_VOLTAGE_VDDSOC:
+ *value = metrics->Current.Voltage[1];
+ break;
+ case METRICS_AVERAGE_CPUCLK:
+ memcpy(value, &metrics->Current.CoreFrequency[0],
+ smu->cpu_core_num * sizeof(uint16_t));
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ mutex_unlock(&smu->metrics_lock);
+
+ return ret;
+}
+
+static int vangogh_common_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to get smu if version!\n");
+ return ret;
+ }
+
+ if (if_version < 0x3)
+ ret = vangogh_get_legacy_smu_metrics_data(smu, member, value);
+ else
+ ret = vangogh_get_smu_metrics_data(smu, member, value);
+
+ return ret;
+}
+
static int vangogh_allocate_dpm_context(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
@@ -447,11 +559,11 @@ static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_typ
return 0;
}
-static int vangogh_print_clk_levels(struct smu_context *smu,
+static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
- SmuMetrics_t metrics;
+ SmuMetrics_legacy_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
@@ -546,6 +658,126 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
return size;
}
+static int vangogh_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, char *buf)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ SmuMetrics_t metrics;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int i, size = 0, ret = 0;
+ uint32_t cur_value = 0, value = 0, count = 0;
+ bool cur_value_match_level = false;
+
+ memset(&metrics, 0, sizeof(metrics));
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, false);
+ if (ret)
+ return ret;
+
+ switch (clk_type) {
+ case SMU_OD_SCLK:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
+ size += sprintf(buf + size, "0: %10uMhz\n",
+ (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
+ }
+ break;
+ case SMU_OD_CCLK:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ size = sprintf(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sprintf(buf + size, "0: %10uMhz\n",
+ (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
+ }
+ break;
+ case SMU_OD_RANGE:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
+ size += sprintf(buf + size, "CCLK: %7uMhz %10uMhz\n",
+ smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
+ }
+ break;
+ case SMU_SOCCLK:
+ /* the level 3 ~ 6 of socclk use the same frequency for vangogh */
+ count = clk_table->NumSocClkLevelsEnabled;
+ cur_value = metrics.Current.SocclkFrequency;
+ break;
+ case SMU_VCLK:
+ count = clk_table->VcnClkLevelsEnabled;
+ cur_value = metrics.Current.VclkFrequency;
+ break;
+ case SMU_DCLK:
+ count = clk_table->VcnClkLevelsEnabled;
+ cur_value = metrics.Current.DclkFrequency;
+ break;
+ case SMU_MCLK:
+ count = clk_table->NumDfPstatesEnabled;
+ cur_value = metrics.Current.MemclkFrequency;
+ break;
+ case SMU_FCLK:
+ count = clk_table->NumDfPstatesEnabled;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_MCLK:
+ case SMU_FCLK:
+ for (i = 0; i < count; i++) {
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
+ if (ret)
+ return ret;
+ if (!value)
+ continue;
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ if (cur_value == value)
+ cur_value_match_level = true;
+ }
+
+ if (!cur_value_match_level)
+ size += sprintf(buf + size, " %uMhz *\n", cur_value);
+ break;
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int vangogh_common_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, char *buf)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to get smu if version!\n");
+ return ret;
+ }
+
+ if (if_version < 0x3)
+ ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf);
+ else
+ ret = vangogh_print_clk_levels(smu, clk_type, buf);
+
+ return ret;
+}
+
static int vangogh_get_profiling_clk_mask(struct smu_context *smu,
enum amd_dpm_forced_level level,
uint32_t *vclk_mask,
@@ -860,7 +1092,6 @@ static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
return ret;
break;
case SMU_FCLK:
- case SMU_MCLK:
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinFclkByFreq,
min, NULL);
@@ -948,7 +1179,6 @@ static int vangogh_force_clk_levels(struct smu_context *smu,
if (ret)
return ret;
break;
- case SMU_MCLK:
case SMU_FCLK:
ret = vangogh_get_dpm_clk_limited(smu,
clk_type, soft_min_level, &min_freq);
@@ -1035,7 +1265,6 @@ static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
SMU_SOCCLK,
SMU_VCLK,
SMU_DCLK,
- SMU_MCLK,
SMU_FCLK,
};
@@ -1064,7 +1293,6 @@ static int vangogh_unforce_dpm_levels(struct smu_context *smu)
enum smu_clk_type clk_type;
uint32_t feature;
} clk_feature_map[] = {
- {SMU_MCLK, SMU_FEATURE_DPM_FCLK_BIT},
{SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT},
{SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
{SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT},
@@ -1196,7 +1424,6 @@ static int vangogh_set_performance_level(struct smu_context *smu,
if (ret)
return ret;
- vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
@@ -1236,7 +1463,6 @@ static int vangogh_set_performance_level(struct smu_context *smu,
if (ret)
return ret;
- vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
@@ -1278,57 +1504,57 @@ static int vangogh_read_sensor(struct smu_context *smu,
mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_GFXACTIVITY,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_POWER:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_EDGE_TEMP:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_EDGE,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_HOTSPOT,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- ret = vangogh_get_smu_metrics_data(smu,
- METRICS_AVERAGE_UCLK,
+ ret = vangogh_common_get_smu_metrics_data(smu,
+ METRICS_CURR_UCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = vangogh_get_smu_metrics_data(smu,
- METRICS_AVERAGE_GFXCLK,
+ ret = vangogh_common_get_smu_metrics_data(smu,
+ METRICS_CURR_GFXCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_VDDGFX:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDGFX,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_VDDNB:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDSOC,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_CPU_CLK:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_CPUCLK,
(uint32_t *)data);
*size = smu->cpu_core_num * sizeof(uint16_t);
@@ -1402,13 +1628,13 @@ static int vangogh_set_watermarks_table(struct smu_context *smu,
return 0;
}
-static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
+static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v2_1 *gpu_metrics =
(struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
- SmuMetrics_t metrics;
+ SmuMetrics_legacy_t metrics;
int ret = 0;
ret = smu_cmn_get_metrics_table(smu, &metrics, true);
@@ -1421,9 +1647,8 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->temperature_soc = metrics.SocTemperature;
memcpy(&gpu_metrics->temperature_core[0],
&metrics.CoreTemperature[0],
- sizeof(uint16_t) * 8);
+ sizeof(uint16_t) * 4);
gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
- gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1];
gpu_metrics->average_gfx_activity = metrics.GfxActivity;
gpu_metrics->average_mm_activity = metrics.UvdActivity;
@@ -1434,7 +1659,7 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_power = metrics.Power[2];
memcpy(&gpu_metrics->average_core_power[0],
&metrics.CorePower[0],
- sizeof(uint16_t) * 8);
+ sizeof(uint16_t) * 4);
gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
@@ -1445,9 +1670,8 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
memcpy(&gpu_metrics->current_coreclk[0],
&metrics.CoreFrequency[0],
- sizeof(uint16_t) * 8);
+ sizeof(uint16_t) * 4);
gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
- gpu_metrics->current_l3clk[1] = metrics.L3Frequency[1];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
@@ -1458,6 +1682,88 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v2_1);
}
+static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_1 *gpu_metrics =
+ (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
+
+ gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+ memcpy(&gpu_metrics->temperature_core[0],
+ &metrics.Current.CoreTemperature[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
+
+ gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
+ gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
+
+ gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+ gpu_metrics->average_cpu_power = metrics.Current.Power[0];
+ gpu_metrics->average_soc_power = metrics.Current.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Current.Power[2];
+ memcpy(&gpu_metrics->average_core_power[0],
+ &metrics.Average.CorePower[0],
+ sizeof(uint16_t) * 4);
+
+ gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+ gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+ gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+ gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+ gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+ memcpy(&gpu_metrics->current_coreclk[0],
+ &metrics.Current.CoreFrequency[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
+
+ gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_1);
+}
+
+static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to get smu if version!\n");
+ return ret;
+ }
+
+ if (if_version < 0x3)
+ ret = vangogh_get_legacy_gpu_metrics(smu, table);
+ else
+ ret = vangogh_get_gpu_metrics(smu, table);
+
+ return ret;
+}
+
static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
long input[], uint32_t size)
{
@@ -1876,9 +2182,9 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.set_watermarks_table = vangogh_set_watermarks_table,
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.interrupt_work = smu_v11_0_interrupt_work,
- .get_gpu_metrics = vangogh_get_gpu_metrics,
+ .get_gpu_metrics = vangogh_common_get_gpu_metrics,
.od_edit_dpm_table = vangogh_od_edit_dpm_table,
- .print_clk_levels = vangogh_print_clk_levels,
+ .print_clk_levels = vangogh_common_print_clk_levels,
.set_default_dpm_table = vangogh_set_default_dpm_tables,
.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
.system_features_control = vangogh_system_features_control,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index e3232295f2bf..f43b4c623685 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1332,6 +1332,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.gfx_state_change_set = renoir_gfx_state_change_set,
.set_fine_grain_gfx_freq_parameters = renoir_set_fine_grain_gfx_freq_parameters,
.od_edit_dpm_table = renoir_od_edit_dpm_table,
+ .get_vbios_bootup_values = smu_v12_0_get_vbios_bootup_values,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 6cc4855c8a37..d60b8c5e8715 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -27,6 +27,7 @@
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
#include "smu_v12_0.h"
#include "soc15_common.h"
#include "atom.h"
@@ -278,3 +279,125 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
return ret;
}
+
+static int smu_v12_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
+ uint8_t clk_id,
+ uint8_t syspll_id,
+ uint32_t *clk_freq)
+{
+ struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
+ struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
+ int ret, index;
+
+ input.clk_id = clk_id;
+ input.syspll_id = syspll_id;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ return 0;
+}
+
+int smu_v12_0_get_vbios_bootup_values(struct smu_context *smu)
+{
+ int ret, index;
+ uint16_t size;
+ uint8_t frev, crev;
+ struct atom_common_table_header *header;
+ struct atom_firmware_info_v3_1 *v_3_1;
+ struct atom_firmware_info_v3_3 *v_3_3;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
+ (uint8_t **)&header);
+ if (ret)
+ return ret;
+
+ if (header->format_revision != 3) {
+ dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu12\n");
+ return -EINVAL;
+ }
+
+ switch (header->content_revision) {
+ case 0:
+ case 1:
+ case 2:
+ v_3_1 = (struct atom_firmware_info_v3_1 *)header;
+ smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = 0;
+ smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
+ break;
+ case 3:
+ case 4:
+ default:
+ v_3_3 = (struct atom_firmware_info_v3_3 *)header;
+ smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
+ smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability;
+ }
+
+ smu->smu_table.boot_values.format_revision = header->format_revision;
+ smu->smu_table.boot_values.content_revision = header->content_revision;
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL0_SOCCLK_ID,
+ (uint8_t)SMU12_SYSPLL0_ID,
+ &smu->smu_table.boot_values.socclk);
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL1_DCFCLK_ID,
+ (uint8_t)SMU12_SYSPLL1_ID,
+ &smu->smu_table.boot_values.dcefclk);
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL0_VCLK_ID,
+ (uint8_t)SMU12_SYSPLL0_ID,
+ &smu->smu_table.boot_values.vclk);
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL0_DCLK_ID,
+ (uint8_t)SMU12_SYSPLL0_ID,
+ &smu->smu_table.boot_values.dclk);
+
+ if ((smu->smu_table.boot_values.format_revision == 3) &&
+ (smu->smu_table.boot_values.content_revision >= 2))
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL3_0_FCLK_ID,
+ (uint8_t)SMU12_SYSPLL3_0_ID,
+ &smu->smu_table.boot_values.fclk);
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL0_LCLK_ID,
+ (uint8_t)SMU12_SYSPLL0_ID,
+ &smu->smu_table.boot_values.lclk);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index bca02a9fb489..dcbe3a72da09 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -78,6 +78,8 @@
#define smnPCIE_ESM_CTRL 0x111003D0
+#define CLOCK_VALID (1 << 31)
+
static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -405,6 +407,9 @@ static int aldebaran_setup_pptable(struct smu_context *smu)
{
int ret = 0;
+ /* VBIOS pptable is the first choice */
+ smu->smu_table.boot_values.pp_table_id = 0;
+
ret = smu_v13_0_setup_pptable(smu);
if (ret)
return ret;
@@ -670,6 +675,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
struct smu_13_0_dpm_context *dpm_context = NULL;
uint32_t display_levels;
uint32_t freq_values[3] = {0};
+ uint32_t min_clk, max_clk;
if (amdgpu_ras_intr_triggered())
return snprintf(buf, PAGE_SIZE, "unavailable\n");
@@ -697,12 +703,20 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
display_levels = clocks.num_levels;
+ min_clk = smu->gfx_actual_hard_min_freq & CLOCK_VALID ?
+ smu->gfx_actual_hard_min_freq & ~CLOCK_VALID :
+ single_dpm_table->dpm_levels[0].value;
+ max_clk = smu->gfx_actual_soft_max_freq & CLOCK_VALID ?
+ smu->gfx_actual_soft_max_freq & ~CLOCK_VALID :
+ single_dpm_table->dpm_levels[1].value;
+
+ freq_values[0] = min_clk;
+ freq_values[1] = max_clk;
+
/* fine-grained dpm has only 2 levels */
- if (now > single_dpm_table->dpm_levels[0].value &&
- now < single_dpm_table->dpm_levels[1].value) {
+ if (now > min_clk && now < max_clk) {
display_levels = clocks.num_levels + 1;
- freq_values[0] = single_dpm_table->dpm_levels[0].value;
- freq_values[2] = single_dpm_table->dpm_levels[1].value;
+ freq_values[2] = max_clk;
freq_values[1] = now;
}
@@ -712,12 +726,15 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
*/
if (display_levels == clocks.num_levels) {
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n", i,
- clocks.data[i].clocks_in_khz / 1000,
- (clocks.num_levels == 1) ? "*" :
+ size += sprintf(
+ buf + size, "%d: %uMhz %s\n", i,
+ freq_values[i],
+ (clocks.num_levels == 1) ?
+ "*" :
(aldebaran_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
+ freq_values[i], now) ?
+ "*" :
+ ""));
} else {
for (i = 0; i < display_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n", i,
@@ -1117,6 +1134,9 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
&& (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
+ /* Reset user min/max gfx clock */
+ smu->gfx_actual_hard_min_freq = 0;
+ smu->gfx_actual_soft_max_freq = 0;
switch (level) {
@@ -1158,7 +1178,14 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
min_clk = max(min, dpm_context->dpm_tables.gfx_table.min);
max_clk = min(max, dpm_context->dpm_tables.gfx_table.max);
- return smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
+ ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK,
+ min_clk, max_clk);
+
+ if (!ret) {
+ smu->gfx_actual_hard_min_freq = min_clk | CLOCK_VALID;
+ smu->gfx_actual_soft_max_freq = max_clk | CLOCK_VALID;
+ }
+ return ret;
}
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
@@ -1178,9 +1205,15 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnableDeterminism,
max, NULL);
- if (ret)
+ if (ret) {
dev_err(adev->dev,
"Failed to enable determinism at GFX clock %d MHz\n", max);
+ } else {
+ smu->gfx_actual_hard_min_freq =
+ min_clk | CLOCK_VALID;
+ smu->gfx_actual_soft_max_freq =
+ max | CLOCK_VALID;
+ }
}
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 30c9ac635105..0864083e7435 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -276,8 +276,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
void *table;
uint16_t version_major, version_minor;
- /* temporarily hardcode to use vbios pptable */
- smu->smu_table.boot_values.pp_table_id = 0;
if (amdgpu_smu_pptable_id >= 0) {
smu->smu_table.boot_values.pp_table_id = amdgpu_smu_pptable_id;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index d74b263c5f4e..64e9107d70f7 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -1403,7 +1403,8 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
* require the entire fb to accommodate that to avoid
* potential runtime errors at plane configuration time.
*/
- if (IS_DISPLAY_VER(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
+ if ((IS_DISPLAY_VER(dev_priv, 9) || IS_GEMINILAKE(dev_priv)) &&
+ color_plane == 0 && fb->width > 3840)
tile_width *= 4;
/*
* The main surface pitch must be padded to a multiple of four
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index cbcfb0c4c370..02a003fd48fb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -96,7 +96,7 @@ static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
- if (DISPLAY_VER(i915) < 10)
+ if (DISPLAY_VER(i915) < 10 || IS_GEMINILAKE(i915))
return false;
if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 986bbbe3b12f..957252b695d7 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -597,7 +597,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
return false;
/* Display WA #1105: skl,bxt,kbl,cfl,glk */
- if (IS_DISPLAY_VER(dev_priv, 9) &&
+ if ((IS_DISPLAY_VER(dev_priv, 9) || IS_GEMINILAKE(dev_priv)) &&
modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
return false;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index e477b6114a60..e5dadde422f7 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -803,8 +803,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
vma = intel_overlay_pin_fb(new_bo);
- if (IS_ERR(vma))
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto out_pin_section;
+ }
i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 1d561812fcad..8ada4f829cab 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -1519,8 +1519,7 @@ void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
u32 psr_status;
mutex_lock(&intel_dp->psr.lock);
- if (!intel_dp->psr.enabled ||
- (intel_dp->psr.enabled && intel_dp->psr.psr2_enabled)) {
+ if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) {
mutex_unlock(&intel_dp->psr.lock);
continue;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 21cc40897ca8..ce6b664b10aa 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -42,7 +42,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
+ if (is_swiotlb_active()) {
unsigned int max_segment;
max_segment = swiotlb_max_segment();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 3e248d3bd869..4f9c8d3021ab 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -70,6 +70,7 @@ static void try_to_writeback(struct drm_i915_gem_object *obj,
/**
* i915_gem_shrink - Shrink buffer object caches
+ * @ww: i915 gem ww acquire ctx, or NULL
* @i915: i915 device
* @target: amount of memory to make available, in pages
* @nr_scanned: optional output for number of pages scanned (incremental)
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 01c1d1b36acd..ca9c9e27a43d 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -948,11 +948,6 @@ static int cmd_reg_handler(struct parser_exec_state *s,
/* below are all lri handlers */
vreg = &vgpu_vreg(s->vgpu, offset);
- if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
- gvt_vgpu_err("%s access to non-render register (%x)\n",
- cmd, offset);
- return -EBADRQC;
- }
if (is_cmd_update_pdps(offset, s) &&
cmd_pdp_mmio_update_handler(s, offset, index))
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 67a26923aa0e..9478c132d7b6 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -587,12 +587,6 @@ static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
entry, index, false, 0, mm->vgpu);
}
-static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
- struct intel_gvt_gtt_entry *entry, unsigned long index)
-{
- _ppgtt_set_root_entry(mm, entry, index, true);
-}
-
static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
struct intel_gvt_gtt_entry *entry, unsigned long index)
{
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index a66e7b2531d4..e7c2babcee8b 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -117,7 +117,7 @@ static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups
return true;
}
-static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
{
int i, j;
struct intel_vgpu_type *type;
@@ -135,7 +135,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
gvt_vgpu_type_groups[i] = group;
}
- return true;
+ return 0;
unwind:
for (j = 0; j < i; j++) {
@@ -143,7 +143,7 @@ unwind:
kfree(group);
}
- return false;
+ return -ENOMEM;
}
static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
@@ -364,7 +364,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
goto out_clean_thread;
ret = intel_gvt_init_vgpu_type_groups(gvt);
- if (ret == false) {
+ if (ret) {
gvt_err("failed to init vgpu type groups: %d\n", ret);
goto out_clean_types;
}
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 9165971c3c47..bec9c3652188 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -350,6 +350,8 @@ static void __rq_arm_watchdog(struct i915_request *rq)
if (!ce->watchdog.timeout_us)
return;
+ i915_request_get(rq);
+
hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
wdg->timer.function = __rq_watchdog_expired;
hrtimer_start_range_ns(&wdg->timer,
@@ -357,7 +359,6 @@ static void __rq_arm_watchdog(struct i915_request *rq)
NSEC_PER_USEC),
NSEC_PER_MSEC,
HRTIMER_MODE_REL);
- i915_request_get(rq);
}
static void __rq_cancel_watchdog(struct i915_request *rq)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 6a35a30dd281..cf897297656f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -188,10 +188,7 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
void adreno_set_llc_attributes(struct iommu_domain *iommu)
{
- struct io_pgtable_domain_attr pgtbl_cfg;
-
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+ iommu_set_pgtable_quirks(iommu, IO_PGTABLE_QUIRK_ARM_OUTER_WBWA);
}
struct msm_gem_address_space *
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index b81ae90b8449..e8b506a6685b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -321,7 +321,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
}
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
- need_swiotlb = !!swiotlb_nr_tbl();
+ need_swiotlb = is_swiotlb_active();
#endif
ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 8b7a4f7b7576..42a8afa839cb 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7948,8 +7948,6 @@ restart_ih:
DRM_ERROR("Illegal register access in command stream\n");
/* XXX check the bitfield order! */
me_id = (ring_id & 0x60) >> 5;
- pipe_id = (ring_id & 0x18) >> 3;
- queue_id = (ring_id & 0x7) >> 0;
switch (me_id) {
case 0:
/* This results in a full GPU reset, but all we need to do is soft
@@ -7971,8 +7969,6 @@ restart_ih:
DRM_ERROR("Illegal instruction in command stream\n");
/* XXX check the bitfield order! */
me_id = (ring_id & 0x60) >> 5;
- pipe_id = (ring_id & 0x18) >> 3;
- queue_id = (ring_id & 0x7) >> 0;
switch (me_id) {
case 0:
/* This results in a full GPU reset, but all we need to do is soft
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 88731b79c8f5..d0e94b10e4c0 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4511,7 +4511,7 @@ static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
} else {
for (i = 0; i < (command & 0x1fffff); i++) {
reg = start_reg + (4 * i);
- if (!si_vm_reg_valid(reg)) {
+ if (!si_vm_reg_valid(reg)) {
DRM_ERROR("CP DMA Bad DST register\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index c9385cfd0fc1..f9120dc24682 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -832,10 +832,14 @@ static struct drm_plane *tegra_primary_plane_create(struct drm_device *drm,
return &plane->base;
}
-static const u32 tegra_cursor_plane_formats[] = {
+static const u32 tegra_legacy_cursor_plane_formats[] = {
DRM_FORMAT_RGBA8888,
};
+static const u32 tegra_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
static int tegra_cursor_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -875,12 +879,24 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
plane);
struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
- u32 value = CURSOR_CLIP_DISPLAY;
+ struct tegra_drm *tegra = plane->dev->dev_private;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ u64 dma_mask = *dc->dev->dma_mask;
+#endif
+ unsigned int x, y;
+ u32 value = 0;
/* rien ne va plus */
if (!new_state->crtc || !new_state->fb)
return;
+ /*
+ * Legacy display supports hardware clipping of the cursor, but
+ * nvdisplay relies on software to clip the cursor to the screen.
+ */
+ if (!dc->soc->has_nvdisplay)
+ value |= CURSOR_CLIP_DISPLAY;
+
switch (new_state->crtc_w) {
case 32:
value |= CURSOR_SIZE_32x32;
@@ -908,7 +924,7 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- value = (tegra_plane_state->iova[0] >> 32) & 0x3;
+ value = (tegra_plane_state->iova[0] >> 32) & (dma_mask >> 32);
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
#endif
@@ -920,15 +936,39 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
value &= ~CURSOR_DST_BLEND_MASK;
value &= ~CURSOR_SRC_BLEND_MASK;
- value |= CURSOR_MODE_NORMAL;
+
+ if (dc->soc->has_nvdisplay)
+ value &= ~CURSOR_COMPOSITION_MODE_XOR;
+ else
+ value |= CURSOR_MODE_NORMAL;
+
value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
value |= CURSOR_ALPHA;
tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
+ /* nvdisplay relies on software for clipping */
+ if (dc->soc->has_nvdisplay) {
+ struct drm_rect src;
+
+ x = new_state->dst.x1;
+ y = new_state->dst.y1;
+
+ drm_rect_fp_to_int(&src, &new_state->src);
+
+ value = (src.y1 & tegra->vmask) << 16 | (src.x1 & tegra->hmask);
+ tegra_dc_writel(dc, value, DC_DISP_PCALC_HEAD_SET_CROPPED_POINT_IN_CURSOR);
+
+ value = (drm_rect_height(&src) & tegra->vmask) << 16 |
+ (drm_rect_width(&src) & tegra->hmask);
+ tegra_dc_writel(dc, value, DC_DISP_PCALC_HEAD_SET_CROPPED_SIZE_IN_CURSOR);
+ } else {
+ x = new_state->crtc_x;
+ y = new_state->crtc_y;
+ }
+
/* position the cursor */
- value = (new_state->crtc_y & 0x3fff) << 16 |
- (new_state->crtc_x & 0x3fff);
+ value = ((y & tegra->vmask) << 16) | (x & tegra->hmask);
tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
}
@@ -982,8 +1022,13 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
plane->index = 6;
plane->dc = dc;
- num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
- formats = tegra_cursor_plane_formats;
+ if (!dc->soc->has_nvdisplay) {
+ num_formats = ARRAY_SIZE(tegra_legacy_cursor_plane_formats);
+ formats = tegra_legacy_cursor_plane_formats;
+ } else {
+ num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
+ formats = tegra_cursor_plane_formats;
+ }
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
@@ -2035,6 +2080,16 @@ static bool tegra_dc_has_window_groups(struct tegra_dc *dc)
return false;
}
+static int tegra_dc_early_init(struct host1x_client *client)
+{
+ struct drm_device *drm = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = drm->dev_private;
+
+ tegra->num_crtcs++;
+
+ return 0;
+}
+
static int tegra_dc_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->host);
@@ -2046,6 +2101,12 @@ static int tegra_dc_init(struct host1x_client *client)
int err;
/*
+ * DC has been reset by now, so VBLANK syncpoint can be released
+ * for general use.
+ */
+ host1x_syncpt_release_vblank_reservation(client, 26 + dc->pipe);
+
+ /*
* XXX do not register DCs with no window groups because we cannot
* assign a primary plane to them, which in turn will cause KMS to
* crash.
@@ -2111,6 +2172,12 @@ static int tegra_dc_init(struct host1x_client *client)
if (dc->soc->pitch_align > tegra->pitch_align)
tegra->pitch_align = dc->soc->pitch_align;
+ /* track maximum resolution */
+ if (dc->soc->has_nvdisplay)
+ drm->mode_config.max_width = drm->mode_config.max_height = 16384;
+ else
+ drm->mode_config.max_width = drm->mode_config.max_height = 4096;
+
err = tegra_dc_rgb_init(drm, dc);
if (err < 0 && err != -ENODEV) {
dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
@@ -2141,7 +2208,7 @@ cleanup:
drm_plane_cleanup(primary);
host1x_client_iommu_detach(client);
- host1x_syncpt_free(dc->syncpt);
+ host1x_syncpt_put(dc->syncpt);
return err;
}
@@ -2166,7 +2233,17 @@ static int tegra_dc_exit(struct host1x_client *client)
}
host1x_client_iommu_detach(client);
- host1x_syncpt_free(dc->syncpt);
+ host1x_syncpt_put(dc->syncpt);
+
+ return 0;
+}
+
+static int tegra_dc_late_exit(struct host1x_client *client)
+{
+ struct drm_device *drm = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = drm->dev_private;
+
+ tegra->num_crtcs--;
return 0;
}
@@ -2235,8 +2312,10 @@ put_rpm:
}
static const struct host1x_client_ops dc_client_ops = {
+ .early_init = tegra_dc_early_init,
.init = tegra_dc_init,
.exit = tegra_dc_exit,
+ .late_exit = tegra_dc_late_exit,
.suspend = tegra_dc_runtime_suspend,
.resume = tegra_dc_runtime_resume,
};
@@ -2246,6 +2325,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
+ .supports_sector_layout = false,
.has_legacy_blending = true,
.pitch_align = 8,
.has_powergate = false,
@@ -2265,6 +2345,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
+ .supports_sector_layout = false,
.has_legacy_blending = true,
.pitch_align = 8,
.has_powergate = false,
@@ -2284,6 +2365,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
+ .supports_sector_layout = false,
.has_legacy_blending = true,
.pitch_align = 64,
.has_powergate = true,
@@ -2303,6 +2385,7 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
+ .supports_sector_layout = false,
.has_legacy_blending = false,
.pitch_align = 64,
.has_powergate = true,
@@ -2322,6 +2405,7 @@ static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
+ .supports_sector_layout = false,
.has_legacy_blending = false,
.pitch_align = 64,
.has_powergate = true,
@@ -2375,6 +2459,7 @@ static const struct tegra_dc_soc_info tegra186_dc_soc_info = {
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
+ .supports_sector_layout = false,
.has_legacy_blending = false,
.pitch_align = 64,
.has_powergate = false,
@@ -2423,6 +2508,7 @@ static const struct tegra_dc_soc_info tegra194_dc_soc_info = {
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
+ .supports_sector_layout = true,
.has_legacy_blending = false,
.pitch_align = 64,
.has_powergate = false,
@@ -2532,9 +2618,16 @@ static int tegra_dc_couple(struct tegra_dc *dc)
static int tegra_dc_probe(struct platform_device *pdev)
{
+ u64 dma_mask = dma_get_mask(pdev->dev.parent);
struct tegra_dc *dc;
int err;
+ err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
if (!dc)
return -ENOMEM;
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 051d03dcb9b0..29f19c3c6149 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -52,6 +52,7 @@ struct tegra_dc_soc_info {
bool supports_interlacing;
bool supports_cursor;
bool supports_block_linear;
+ bool supports_sector_layout;
bool has_legacy_blending;
unsigned int pitch_align;
bool has_powergate;
@@ -511,6 +512,8 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define DC_DISP_CURSOR_START_ADDR_HI 0x4ec
#define DC_DISP_BLEND_CURSOR_CONTROL 0x4f1
+#define CURSOR_COMPOSITION_MODE_BLEND (0 << 25)
+#define CURSOR_COMPOSITION_MODE_XOR (1 << 25)
#define CURSOR_MODE_LEGACY (0 << 24)
#define CURSOR_MODE_NORMAL (1 << 24)
#define CURSOR_DST_BLEND_ZERO (0 << 16)
@@ -705,6 +708,9 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define PROTOCOL_MASK (0xf << 8)
#define PROTOCOL_SINGLE_TMDS_A (0x1 << 8)
+#define DC_DISP_PCALC_HEAD_SET_CROPPED_POINT_IN_CURSOR 0x442
+#define DC_DISP_PCALC_HEAD_SET_CROPPED_SIZE_IN_CURSOR 0x446
+
#define DC_WIN_CORE_WINDOWGROUP_SET_CONTROL 0x702
#define OWNER_MASK (0xf << 0)
#define OWNER(x) (((x) & 0xf) << 0)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 90709c38c993..0c350b0daab4 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -174,7 +174,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
struct drm_tegra_syncpt syncpt;
struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
struct drm_gem_object **refs;
- struct host1x_syncpt *sp;
+ struct host1x_syncpt *sp = NULL;
struct host1x_job *job;
unsigned int num_refs;
int err;
@@ -301,8 +301,8 @@ int tegra_drm_submit(struct tegra_drm_context *context,
goto fail;
}
- /* check whether syncpoint ID is valid */
- sp = host1x_syncpt_get(host1x, syncpt.id);
+ /* Syncpoint ref will be dropped on job release. */
+ sp = host1x_syncpt_get_by_id(host1x, syncpt.id);
if (!sp) {
err = -ENOENT;
goto fail;
@@ -311,7 +311,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
job->is_addr_reg = context->client->ops->is_addr_reg;
job->is_valid_class = context->client->ops->is_valid_class;
job->syncpt_incrs = syncpt.incrs;
- job->syncpt_id = syncpt.id;
+ job->syncpt = sp;
job->timeout = 10000;
if (args->timeout && args->timeout < 10000)
@@ -383,7 +383,7 @@ static int tegra_syncpt_read(struct drm_device *drm, void *data,
struct drm_tegra_syncpt_read *args = data;
struct host1x_syncpt *sp;
- sp = host1x_syncpt_get(host, args->id);
+ sp = host1x_syncpt_get_by_id_noref(host, args->id);
if (!sp)
return -EINVAL;
@@ -398,7 +398,7 @@ static int tegra_syncpt_incr(struct drm_device *drm, void *data,
struct drm_tegra_syncpt_incr *args = data;
struct host1x_syncpt *sp;
- sp = host1x_syncpt_get(host1x, args->id);
+ sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
if (!sp)
return -EINVAL;
@@ -412,7 +412,7 @@ static int tegra_syncpt_wait(struct drm_device *drm, void *data,
struct drm_tegra_syncpt_wait *args = data;
struct host1x_syncpt *sp;
- sp = host1x_syncpt_get(host1x, args->id);
+ sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
if (!sp)
return -EINVAL;
@@ -1121,9 +1121,8 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
-
- drm->mode_config.max_width = 4096;
- drm->mode_config.max_height = 4096;
+ drm->mode_config.max_width = 0;
+ drm->mode_config.max_height = 0;
drm->mode_config.allow_fb_modifiers = true;
@@ -1142,6 +1141,14 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (err < 0)
goto fbdev;
+ /*
+ * Now that all display controller have been initialized, the maximum
+ * supported resolution is known and the bitmask for horizontal and
+ * vertical bitfields can be computed.
+ */
+ tegra->hmask = drm->mode_config.max_width - 1;
+ tegra->vmask = drm->mode_config.max_height - 1;
+
if (tegra->use_explicit_iommu) {
u64 carveout_start, carveout_end, gem_start, gem_end;
u64 dma_mask = dma_get_mask(&dev->dev);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index f38de08e0c95..87df251c1fcf 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -24,6 +24,9 @@
#include "hub.h"
#include "trace.h"
+/* XXX move to include/uapi/drm/drm_fourcc.h? */
+#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT(22)
+
struct reset_control;
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -54,7 +57,9 @@ struct tegra_drm {
struct tegra_fbdev *fbdev;
#endif
+ unsigned int hmask, vmask;
unsigned int pitch_align;
+ unsigned int num_crtcs;
struct tegra_display_hub *hub;
};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 01939c57fc74..cae8b8cbe9dd 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -44,6 +44,15 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
{
uint64_t modifier = framebuffer->modifier;
+ if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
+ if ((modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) == 0)
+ tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_TEGRA;
+ else
+ tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_GPU;
+
+ modifier &= ~DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT;
+ }
+
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
tiling->mode = TEGRA_BO_TILING_MODE_PITCH;
@@ -86,6 +95,7 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
break;
default:
+ DRM_DEBUG_KMS("unknown format modifier: %llx\n", modifier);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index fafb5724499b..c15fd99d6cb2 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -21,9 +21,15 @@ enum tegra_bo_tiling_mode {
TEGRA_BO_TILING_MODE_BLOCK,
};
+enum tegra_bo_sector_layout {
+ TEGRA_BO_SECTOR_LAYOUT_TEGRA,
+ TEGRA_BO_SECTOR_LAYOUT_GPU,
+};
+
struct tegra_bo_tiling {
enum tegra_bo_tiling_mode mode;
unsigned long value;
+ enum tegra_bo_sector_layout sector_layout;
};
struct tegra_bo {
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index adbe2ddcda19..de288cba3905 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -67,7 +67,7 @@ static int gr2d_init(struct host1x_client *client)
detach:
host1x_client_iommu_detach(client);
free:
- host1x_syncpt_free(client->syncpts[0]);
+ host1x_syncpt_put(client->syncpts[0]);
put:
host1x_channel_put(gr2d->channel);
return err;
@@ -86,7 +86,7 @@ static int gr2d_exit(struct host1x_client *client)
return err;
host1x_client_iommu_detach(client);
- host1x_syncpt_free(client->syncpts[0]);
+ host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(gr2d->channel);
return 0;
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index b0b8154e8104..24442ade0da3 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -76,7 +76,7 @@ static int gr3d_init(struct host1x_client *client)
detach:
host1x_client_iommu_detach(client);
free:
- host1x_syncpt_free(client->syncpts[0]);
+ host1x_syncpt_put(client->syncpts[0]);
put:
host1x_channel_put(gr3d->channel);
return err;
@@ -94,7 +94,7 @@ static int gr3d_exit(struct host1x_client *client)
return err;
host1x_client_iommu_detach(client);
- host1x_syncpt_free(client->syncpts[0]);
+ host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(gr3d->channel);
return 0;
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 8e6d329d062b..79bff8b48271 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -55,6 +55,18 @@ static const u64 tegra_shared_plane_modifiers[] = {
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
+ /*
+ * The GPU sector layout is only supported on Tegra194, but these will
+ * be filtered out later on by ->format_mod_supported() on SoCs where
+ * it isn't supported.
+ */
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ /* sentinel */
DRM_FORMAT_MOD_INVALID
};
@@ -366,6 +378,12 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+ if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
+ !dc->soc->supports_sector_layout) {
+ DRM_ERROR("hardware doesn't support GPU sector layout\n");
+ return -EINVAL;
+ }
+
/*
* Tegra doesn't support different strides for U and V planes so we
* error out if the user tries to display a framebuffer with such a
@@ -485,6 +503,16 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
base = tegra_plane_state->iova[0] + fb->offsets[0];
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ /*
+ * Physical address bit 39 in Tegra194 is used as a switch for special
+ * logic that swizzles the memory using either the legacy Tegra or the
+ * dGPU sector layout.
+ */
+ if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
+ base |= BIT(39);
+#endif
+
tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
@@ -562,9 +590,8 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub *hub = tegra->hub;
- /* planes can be assigned to arbitrary CRTCs */
- unsigned int possible_crtcs = 0x7;
struct tegra_shared_plane *plane;
+ unsigned int possible_crtcs;
unsigned int num_formats;
const u64 *modifiers;
struct drm_plane *p;
@@ -583,6 +610,9 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
p = &plane->base.base;
+ /* planes can be assigned to arbitrary CRTCs */
+ possible_crtcs = BIT(tegra->num_crtcs) - 1;
+
num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
formats = tegra_shared_plane_formats;
modifiers = tegra_shared_plane_modifiers;
@@ -848,12 +878,19 @@ static const struct host1x_client_ops tegra_display_hub_ops = {
static int tegra_display_hub_probe(struct platform_device *pdev)
{
+ u64 dma_mask = dma_get_mask(pdev->dev.parent);
struct device_node *child = NULL;
struct tegra_display_hub *hub;
struct clk *clk;
unsigned int i;
int err;
+ err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
if (!hub)
return -ENOMEM;
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 19e8847a164b..2e11b4b1f702 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -83,6 +83,22 @@ static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
kfree(state);
}
+static bool tegra_plane_supports_sector_layout(struct drm_plane *plane)
+{
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, plane->dev) {
+ if (plane->possible_crtcs & drm_crtc_mask(crtc)) {
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (!dc->soc->supports_sector_layout)
+ return false;
+ }
+ }
+
+ return true;
+}
+
static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
@@ -92,6 +108,14 @@ static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
+ /* check for the sector layout bit */
+ if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
+ if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
+ if (!tegra_plane_supports_sector_layout(plane))
+ return false;
+ }
+ }
+
if (info->num_planes == 1)
return true;
@@ -119,6 +143,14 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
dma_addr_t phys_addr, *phys;
struct sg_table *sgt;
+ /*
+ * If we're not attached to a domain, we already stored the
+ * physical address when the buffer was allocated. If we're
+ * part of a group that's shared between all display
+ * controllers, we've also already mapped the framebuffer
+ * through the SMMU. In both cases we can short-circuit the
+ * code below and retrieve the stored IOV address.
+ */
if (!domain || dc->client.group)
phys = &phys_addr;
else
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 77e128832920..72aea1cc0cfa 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -214,7 +214,7 @@ static int vic_init(struct host1x_client *client)
return 0;
free_syncpt:
- host1x_syncpt_free(client->syncpts[0]);
+ host1x_syncpt_put(client->syncpts[0]);
free_channel:
host1x_channel_put(vic->channel);
detach:
@@ -238,7 +238,7 @@ static int vic_exit(struct host1x_client *client)
if (err < 0)
return err;
- host1x_syncpt_free(client->syncpts[0]);
+ host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(vic->channel);
host1x_client_iommu_detach(client);
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 9b787b3caeb5..510e3e001dab 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -112,7 +112,7 @@ int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
{
struct ttm_global *glob = &ttm_glob;
struct ttm_device *bdev;
- int ret = -EBUSY;
+ int ret = 0;
mutex_lock(&ttm_global_mutex);
list_for_each_entry(bdev, &glob->device_list, device_list) {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index eecc930e97ab..a1a25410ec74 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -329,6 +329,8 @@ int ttm_tt_populate(struct ttm_device *bdev,
ttm_dma32_pages_limit) {
ret = ttm_global_swapout(ctx, GFP_KERNEL);
+ if (ret == 0)
+ break;
if (ret < 0)
goto error;
}
diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c
index 104b95a8c7a2..aeb0a22a2c34 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_memory.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_memory.c
@@ -280,7 +280,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
spin_unlock(&glob->lock);
ret = ttm_global_swapout(ctx, GFP_KERNEL);
spin_lock(&glob->lock);
- if (unlikely(ret < 0))
+ if (unlikely(ret <= 0))
break;
}
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 68a766ff0e9d..46f69c532b6b 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -197,6 +197,17 @@ int host1x_device_init(struct host1x_device *device)
mutex_lock(&device->clients_lock);
list_for_each_entry(client, &device->clients, list) {
+ if (client->ops && client->ops->early_init) {
+ err = client->ops->early_init(client);
+ if (err < 0) {
+ dev_err(&device->dev, "failed to early initialize %s: %d\n",
+ dev_name(client->dev), err);
+ goto teardown_late;
+ }
+ }
+ }
+
+ list_for_each_entry(client, &device->clients, list) {
if (client->ops && client->ops->init) {
err = client->ops->init(client);
if (err < 0) {
@@ -217,6 +228,14 @@ teardown:
if (client->ops->exit)
client->ops->exit(client);
+ /* reset client to end of list for late teardown */
+ client = list_entry(&device->clients, struct host1x_client, list);
+
+teardown_late:
+ list_for_each_entry_continue_reverse(client, &device->clients, list)
+ if (client->ops->late_exit)
+ client->ops->late_exit(client);
+
mutex_unlock(&device->clients_lock);
return err;
}
@@ -251,6 +270,18 @@ int host1x_device_exit(struct host1x_device *device)
}
}
+ list_for_each_entry_reverse(client, &device->clients, list) {
+ if (client->ops && client->ops->late_exit) {
+ err = client->ops->late_exit(client);
+ if (err < 0) {
+ dev_err(&device->dev, "failed to late cleanup %s: %d\n",
+ dev_name(client->dev), err);
+ mutex_unlock(&device->clients_lock);
+ return err;
+ }
+ }
+ }
+
mutex_unlock(&device->clients_lock);
return 0;
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index e8d3fda91d8a..6e6ca774f68d 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -273,15 +273,13 @@ static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
static void cdma_start_timer_locked(struct host1x_cdma *cdma,
struct host1x_job *job)
{
- struct host1x *host = cdma_to_host1x(cdma);
-
if (cdma->timeout.client) {
/* timer already started */
return;
}
cdma->timeout.client = job->client;
- cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id);
+ cdma->timeout.syncpt = job->syncpt;
cdma->timeout.syncpt_val = job->syncpt_end;
cdma->timeout.start_ktime = ktime_get();
@@ -312,7 +310,6 @@ static void stop_cdma_timer_locked(struct host1x_cdma *cdma)
static void update_cdma_locked(struct host1x_cdma *cdma)
{
bool signal = false;
- struct host1x *host1x = cdma_to_host1x(cdma);
struct host1x_job *job, *n;
/* If CDMA is stopped, queue is cleared and we can return */
@@ -324,8 +321,7 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
* to consume as many sync queue entries as possible without blocking
*/
list_for_each_entry_safe(job, n, &cdma->sync_queue, list) {
- struct host1x_syncpt *sp =
- host1x_syncpt_get(host1x, job->syncpt_id);
+ struct host1x_syncpt *sp = job->syncpt;
/* Check whether this syncpt has completed, and bail if not */
if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) {
@@ -499,8 +495,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
if (!cdma->timeout.initialized) {
int err;
- err = host1x_hw_cdma_timeout_init(host1x, cdma,
- job->syncpt_id);
+ err = host1x_hw_cdma_timeout_init(host1x, cdma);
if (err) {
mutex_unlock(&cdma->lock);
return err;
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index 1b4997bda1c7..8a14880c61bb 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -69,6 +69,7 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
static void show_syncpts(struct host1x *m, struct output *o)
{
+ struct list_head *pos;
unsigned int i;
host1x_debug_output(o, "---- syncpts ----\n");
@@ -76,12 +77,19 @@ static void show_syncpts(struct host1x *m, struct output *o)
for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
u32 max = host1x_syncpt_read_max(m->syncpt + i);
u32 min = host1x_syncpt_load(m->syncpt + i);
+ unsigned int waiters = 0;
- if (!min && !max)
+ spin_lock(&m->syncpt[i].intr.lock);
+ list_for_each(pos, &m->syncpt[i].intr.wait_head)
+ waiters++;
+ spin_unlock(&m->syncpt[i].intr.lock);
+
+ if (!min && !max && !waiters)
continue;
- host1x_debug_output(o, "id %u (%s) min %d max %d\n",
- i, m->syncpt[i].name, min, max);
+ host1x_debug_output(o,
+ "id %u (%s) min %d max %d (%d waiters)\n",
+ i, m->syncpt[i].name, min, max, waiters);
}
for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index d0ebb70e2fdd..fbb6447b8659 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -77,6 +77,7 @@ static const struct host1x_info host1x01_info = {
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
+ .reserve_vblank_syncpts = true,
};
static const struct host1x_info host1x02_info = {
@@ -91,6 +92,7 @@ static const struct host1x_info host1x02_info = {
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
+ .reserve_vblank_syncpts = true,
};
static const struct host1x_info host1x04_info = {
@@ -105,6 +107,7 @@ static const struct host1x_info host1x04_info = {
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
+ .reserve_vblank_syncpts = false,
};
static const struct host1x_info host1x05_info = {
@@ -119,6 +122,7 @@ static const struct host1x_info host1x05_info = {
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
+ .reserve_vblank_syncpts = false,
};
static const struct host1x_sid_entry tegra186_sid_table[] = {
@@ -142,6 +146,7 @@ static const struct host1x_info host1x06_info = {
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
.sid_table = tegra186_sid_table,
+ .reserve_vblank_syncpts = false,
};
static const struct host1x_sid_entry tegra194_sid_table[] = {
@@ -165,6 +170,7 @@ static const struct host1x_info host1x07_info = {
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
.sid_table = tegra194_sid_table,
+ .reserve_vblank_syncpts = false,
};
static const struct of_device_id host1x_of_match[] = {
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index f781a9b0f39d..fa6d4bc46e98 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -37,7 +37,7 @@ struct host1x_cdma_ops {
void (*start)(struct host1x_cdma *cdma);
void (*stop)(struct host1x_cdma *cdma);
void (*flush)(struct host1x_cdma *cdma);
- int (*timeout_init)(struct host1x_cdma *cdma, unsigned int syncpt);
+ int (*timeout_init)(struct host1x_cdma *cdma);
void (*timeout_destroy)(struct host1x_cdma *cdma);
void (*freeze)(struct host1x_cdma *cdma);
void (*resume)(struct host1x_cdma *cdma, u32 getptr);
@@ -101,6 +101,12 @@ struct host1x_info {
bool has_hypervisor; /* has hypervisor registers */
unsigned int num_sid_entries;
const struct host1x_sid_entry *sid_table;
+ /*
+ * On T20-T148, the boot chain may setup DC to increment syncpoints
+ * 26/27 on VBLANK. As such we cannot use these syncpoints until
+ * the display driver disables VBLANK increments.
+ */
+ bool reserve_vblank_syncpts;
};
struct host1x {
@@ -261,10 +267,9 @@ static inline void host1x_hw_cdma_flush(struct host1x *host,
}
static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
- struct host1x_cdma *cdma,
- unsigned int syncpt)
+ struct host1x_cdma *cdma)
{
- return host->cdma_op->timeout_init(cdma, syncpt);
+ return host->cdma_op->timeout_init(cdma);
}
static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 2f3bf94cf365..e49cd5b8f735 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -295,7 +295,7 @@ static void cdma_timeout_handler(struct work_struct *work)
/*
* Init timeout resources
*/
-static int cdma_timeout_init(struct host1x_cdma *cdma, unsigned int syncpt)
+static int cdma_timeout_init(struct host1x_cdma *cdma)
{
INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
cdma->timeout.initialized = true;
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 5eaa29d171c9..d4c28faf27d1 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -86,8 +86,7 @@ static void submit_gathers(struct host1x_job *job)
static inline void synchronize_syncpt_base(struct host1x_job *job)
{
- struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
- struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
+ struct host1x_syncpt *sp = job->syncpt;
unsigned int id;
u32 value;
@@ -118,7 +117,7 @@ static void host1x_channel_set_streamid(struct host1x_channel *channel)
static int channel_submit(struct host1x_job *job)
{
struct host1x_channel *ch = job->channel;
- struct host1x_syncpt *sp;
+ struct host1x_syncpt *sp = job->syncpt;
u32 user_syncpt_incrs = job->syncpt_incrs;
u32 prev_max = 0;
u32 syncval;
@@ -126,10 +125,9 @@ static int channel_submit(struct host1x_job *job)
struct host1x_waitlist *completed_waiter = NULL;
struct host1x *host = dev_get_drvdata(ch->dev->parent);
- sp = host->syncpt + job->syncpt_id;
trace_host1x_channel_submit(dev_name(ch->dev),
job->num_gathers, job->num_relocs,
- job->syncpt_id, job->syncpt_incrs);
+ job->syncpt->id, job->syncpt_incrs);
/* before error checks, return current max */
prev_max = job->syncpt_end = host1x_syncpt_read_max(sp);
@@ -163,7 +161,7 @@ static int channel_submit(struct host1x_job *job)
host1x_cdma_push(&ch->cdma,
host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
host1x_uclass_wait_syncpt_r(), 1),
- host1x_class_host_wait_syncpt(job->syncpt_id,
+ host1x_class_host_wait_syncpt(job->syncpt->id,
host1x_syncpt_read_max(sp)));
}
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index f31bcfa1b837..ceb48229d14b 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -204,7 +204,7 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
unsigned int i;
host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
- job, job->syncpt_id, job->syncpt_end,
+ job, job->syncpt->id, job->syncpt_end,
job->first_get, job->timeout,
job->num_slots, job->num_unpins);
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_vm.h b/drivers/gpu/host1x/hw/hw_host1x07_vm.h
index 3058b3c9a91d..b766851d5b83 100644
--- a/drivers/gpu/host1x/hw/hw_host1x07_vm.h
+++ b/drivers/gpu/host1x/hw/hw_host1x07_vm.h
@@ -29,6 +29,6 @@
#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(x) (0x652c + 4 * (x))
#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(x) (0x6590 + 4 * (x))
#define HOST1X_SYNC_SYNCPT(x) (0x8080 + 4 * (x))
-#define HOST1X_SYNC_SYNCPT_INT_THRESH(x) (0x8d00 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(x) (0x9980 + 4 * (x))
#define HOST1X_SYNC_SYNCPT_CH_APP(x) (0xa604 + 4 * (x))
#define HOST1X_SYNC_SYNCPT_CH_APP_CH(v) (((v) & 0x3f) << 8)
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 9245add23b5d..6d1f3c0fdbe7 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -235,25 +235,37 @@ int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
host1x_hw_intr_enable_syncpt_intr(host, syncpt->id);
}
- spin_unlock(&syncpt->intr.lock);
-
if (ref)
*ref = waiter;
+
+ spin_unlock(&syncpt->intr.lock);
+
return 0;
}
-void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref)
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref,
+ bool flush)
{
struct host1x_waitlist *waiter = ref;
struct host1x_syncpt *syncpt;
- while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
- WLS_REMOVED)
- schedule();
+ atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED);
syncpt = host->syncpt + id;
- (void)process_wait_list(host, syncpt,
- host1x_syncpt_load(host->syncpt + id));
+
+ spin_lock(&syncpt->intr.lock);
+ if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED) ==
+ WLS_CANCELLED) {
+ list_del(&waiter->list);
+ kref_put(&waiter->refcount, waiter_release);
+ }
+ spin_unlock(&syncpt->intr.lock);
+
+ if (flush) {
+ /* Wait until any concurrently executing handler has finished. */
+ while (atomic_read(&waiter->state) != WLS_HANDLED)
+ schedule();
+ }
kref_put(&waiter->refcount, waiter_release);
}
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
index aac38194398f..6ea55e615e3a 100644
--- a/drivers/gpu/host1x/intr.h
+++ b/drivers/gpu/host1x/intr.h
@@ -74,8 +74,10 @@ int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
* Unreference an action submitted to host1x_intr_add_action().
* You must call this if you passed non-NULL as ref.
* @ref the ref returned from host1x_intr_add_action()
+ * @flush wait until any pending handlers have completed before returning.
*/
-void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref);
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref,
+ bool flush);
/* Initialize host1x sync point interrupt */
int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 82d0a60ba3f7..adbdc225de8d 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -79,6 +79,9 @@ static void job_free(struct kref *ref)
{
struct host1x_job *job = container_of(ref, struct host1x_job, ref);
+ if (job->syncpt)
+ host1x_syncpt_put(job->syncpt);
+
kfree(job);
}
@@ -674,7 +677,7 @@ EXPORT_SYMBOL(host1x_job_unpin);
*/
void host1x_job_dump(struct device *dev, struct host1x_job *job)
{
- dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
+ dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt->id);
dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index fce7892d5137..e648ebbb2027 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -42,17 +42,32 @@ static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
base->requested = false;
}
-static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
- struct host1x_client *client,
- unsigned long flags)
+/**
+ * host1x_syncpt_alloc() - allocate a syncpoint
+ * @host: host1x device data
+ * @flags: bitfield of HOST1X_SYNCPT_* flags
+ * @name: name for the syncpoint for use in debug prints
+ *
+ * Allocates a hardware syncpoint for the caller's use. The caller then has
+ * the sole authority to mutate the syncpoint's value until it is freed again.
+ *
+ * If no free syncpoints are available, or a NULL name was specified, returns
+ * NULL.
+ */
+struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
+ unsigned long flags,
+ const char *name)
{
struct host1x_syncpt *sp = host->syncpt;
+ char *full_name;
unsigned int i;
- char *name;
+
+ if (!name)
+ return NULL;
mutex_lock(&host->syncpt_mutex);
- for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
+ for (i = 0; i < host->info->nb_pts && kref_read(&sp->ref); i++, sp++)
;
if (i >= host->info->nb_pts)
@@ -64,19 +79,19 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
goto unlock;
}
- name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
- client ? dev_name(client->dev) : NULL);
- if (!name)
+ full_name = kasprintf(GFP_KERNEL, "%u-%s", sp->id, name);
+ if (!full_name)
goto free_base;
- sp->client = client;
- sp->name = name;
+ sp->name = full_name;
if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
sp->client_managed = true;
else
sp->client_managed = false;
+ kref_init(&sp->ref);
+
mutex_unlock(&host->syncpt_mutex);
return sp;
@@ -87,6 +102,7 @@ unlock:
mutex_unlock(&host->syncpt_mutex);
return NULL;
}
+EXPORT_SYMBOL(host1x_syncpt_alloc);
/**
* host1x_syncpt_id() - retrieve syncpoint ID
@@ -294,7 +310,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
}
}
- host1x_intr_put_ref(sp->host, sp->id, ref);
+ host1x_intr_put_ref(sp->host, sp->id, ref, true);
done:
return err;
@@ -307,59 +323,12 @@ EXPORT_SYMBOL(host1x_syncpt_wait);
bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
{
u32 current_val;
- u32 future_val;
smp_rmb();
current_val = (u32)atomic_read(&sp->min_val);
- future_val = (u32)atomic_read(&sp->max_val);
-
- /* Note the use of unsigned arithmetic here (mod 1<<32).
- *
- * c = current_val = min_val = the current value of the syncpoint.
- * t = thresh = the value we are checking
- * f = future_val = max_val = the value c will reach when all
- * outstanding increments have completed.
- *
- * Note that c always chases f until it reaches f.
- *
- * Dtf = (f - t)
- * Dtc = (c - t)
- *
- * Consider all cases:
- *
- * A) .....c..t..f..... Dtf < Dtc need to wait
- * B) .....c.....f..t.. Dtf > Dtc expired
- * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
- *
- * Any case where f==c: always expired (for any t). Dtf == Dcf
- * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
- * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
- * Dtc!=0)
- *
- * Other cases:
- *
- * A) .....t..f..c..... Dtf < Dtc need to wait
- * A) .....f..c..t..... Dtf < Dtc need to wait
- * A) .....f..t..c..... Dtf > Dtc expired
- *
- * So:
- * Dtf >= Dtc implies EXPIRED (return true)
- * Dtf < Dtc implies WAIT (return false)
- *
- * Note: If t is expired then we *cannot* wait on it. We would wait
- * forever (hang the system).
- *
- * Note: do NOT get clever and remove the -thresh from both sides. It
- * is NOT the same.
- *
- * If future valueis zero, we have a client managed sync point. In that
- * case we do a direct comparison.
- */
- if (!host1x_syncpt_client_managed(sp))
- return future_val - thresh >= current_val - thresh;
- else
- return (s32)(current_val - thresh) >= 0;
+
+ return ((current_val - thresh) & 0x80000000U) == 0U;
}
int host1x_syncpt_init(struct host1x *host)
@@ -401,10 +370,15 @@ int host1x_syncpt_init(struct host1x *host)
host1x_hw_syncpt_enable_protection(host);
/* Allocate sync point to use for clearing waits for expired fences */
- host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
+ host->nop_sp = host1x_syncpt_alloc(host, 0, "reserved-nop");
if (!host->nop_sp)
return -ENOMEM;
+ if (host->info->reserve_vblank_syncpts) {
+ kref_init(&host->syncpt[26].ref);
+ kref_init(&host->syncpt[27].ref);
+ }
+
return 0;
}
@@ -416,44 +390,50 @@ int host1x_syncpt_init(struct host1x *host)
* host1x client drivers can use this function to allocate a syncpoint for
* subsequent use. A syncpoint returned by this function will be reserved for
* use by the client exclusively. When no longer using a syncpoint, a host1x
- * client driver needs to release it using host1x_syncpt_free().
+ * client driver needs to release it using host1x_syncpt_put().
*/
struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
unsigned long flags)
{
struct host1x *host = dev_get_drvdata(client->host->parent);
- return host1x_syncpt_alloc(host, client, flags);
+ return host1x_syncpt_alloc(host, flags, dev_name(client->dev));
}
EXPORT_SYMBOL(host1x_syncpt_request);
-/**
- * host1x_syncpt_free() - free a requested syncpoint
- * @sp: host1x syncpoint
- *
- * Release a syncpoint previously allocated using host1x_syncpt_request(). A
- * host1x client driver should call this when the syncpoint is no longer in
- * use. Note that client drivers must ensure that the syncpoint doesn't remain
- * under the control of hardware after calling this function, otherwise two
- * clients may end up trying to access the same syncpoint concurrently.
- */
-void host1x_syncpt_free(struct host1x_syncpt *sp)
+static void syncpt_release(struct kref *ref)
{
- if (!sp)
- return;
+ struct host1x_syncpt *sp = container_of(ref, struct host1x_syncpt, ref);
+
+ atomic_set(&sp->max_val, host1x_syncpt_read(sp));
mutex_lock(&sp->host->syncpt_mutex);
host1x_syncpt_base_free(sp->base);
kfree(sp->name);
sp->base = NULL;
- sp->client = NULL;
sp->name = NULL;
sp->client_managed = false;
mutex_unlock(&sp->host->syncpt_mutex);
}
-EXPORT_SYMBOL(host1x_syncpt_free);
+
+/**
+ * host1x_syncpt_put() - free a requested syncpoint
+ * @sp: host1x syncpoint
+ *
+ * Release a syncpoint previously allocated using host1x_syncpt_request(). A
+ * host1x client driver should call this when the syncpoint is no longer in
+ * use.
+ */
+void host1x_syncpt_put(struct host1x_syncpt *sp)
+{
+ if (!sp)
+ return;
+
+ kref_put(&sp->ref, syncpt_release);
+}
+EXPORT_SYMBOL(host1x_syncpt_put);
void host1x_syncpt_deinit(struct host1x *host)
{
@@ -520,16 +500,48 @@ unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
}
/**
- * host1x_syncpt_get() - obtain a syncpoint by ID
+ * host1x_syncpt_get_by_id() - obtain a syncpoint by ID
* @host: host1x controller
* @id: syncpoint ID
*/
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
+struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host,
+ unsigned int id)
{
if (id >= host->info->nb_pts)
return NULL;
- return host->syncpt + id;
+ if (kref_get_unless_zero(&host->syncpt[id].ref))
+ return &host->syncpt[id];
+ else
+ return NULL;
+}
+EXPORT_SYMBOL(host1x_syncpt_get_by_id);
+
+/**
+ * host1x_syncpt_get_by_id_noref() - obtain a syncpoint by ID but don't
+ * increase the refcount.
+ * @host: host1x controller
+ * @id: syncpoint ID
+ */
+struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host,
+ unsigned int id)
+{
+ if (id >= host->info->nb_pts)
+ return NULL;
+
+ return &host->syncpt[id];
+}
+EXPORT_SYMBOL(host1x_syncpt_get_by_id_noref);
+
+/**
+ * host1x_syncpt_get() - increment syncpoint refcount
+ * @sp: syncpoint
+ */
+struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp)
+{
+ kref_get(&sp->ref);
+
+ return sp;
}
EXPORT_SYMBOL(host1x_syncpt_get);
@@ -552,3 +564,31 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
return base->id;
}
EXPORT_SYMBOL(host1x_syncpt_base_id);
+
+static void do_nothing(struct kref *ref)
+{
+}
+
+/**
+ * host1x_syncpt_release_vblank_reservation() - Make VBLANK syncpoint
+ * available for allocation
+ *
+ * @client: host1x bus client
+ * @syncpt_id: syncpoint ID to make available
+ *
+ * Makes VBLANK<i> syncpoint available for allocatation if it was
+ * reserved at initialization time. This should be called by the display
+ * driver after it has ensured that any VBLANK increment programming configured
+ * by the boot chain has been disabled.
+ */
+void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
+ u32 syncpt_id)
+{
+ struct host1x *host = dev_get_drvdata(client->host->parent);
+
+ if (!host->info->reserve_vblank_syncpts)
+ return;
+
+ kref_put(&host->syncpt[syncpt_id].ref, do_nothing);
+}
+EXPORT_SYMBOL(host1x_syncpt_release_vblank_reservation);
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index 8e1d04dacaa0..a6766f8d55ee 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -11,6 +11,7 @@
#include <linux/atomic.h>
#include <linux/host1x.h>
#include <linux/kernel.h>
+#include <linux/kref.h>
#include <linux/sched.h>
#include "intr.h"
@@ -26,6 +27,8 @@ struct host1x_syncpt_base {
};
struct host1x_syncpt {
+ struct kref ref;
+
unsigned int id;
atomic_t min_val;
atomic_t max_val;
@@ -33,7 +36,6 @@ struct host1x_syncpt {
const char *name;
bool client_managed;
struct host1x *host;
- struct host1x_client *client;
struct host1x_syncpt_base *base;
/* interrupt data */
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 786b71ef7738..4bf263c2d61a 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -351,6 +351,17 @@ config HID_EZKEY
help
Support for Ezkey BTC 8193 keyboard.
+config HID_FT260
+ tristate "FTDI FT260 USB HID to I2C host support"
+ depends on USB_HID && HIDRAW && I2C
+ help
+ Provides I2C host adapter functionality over USB-HID through FT260
+ device. The customizable USB descriptor fields are exposed as sysfs
+ attributes.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hid-ft260.
+
config HID_GEMBIRD
tristate "Gembird Joypad"
depends on HID
@@ -1042,10 +1053,11 @@ config HID_THINGM
config HID_THRUSTMASTER
tristate "ThrustMaster devices support"
- depends on HID
+ depends on USB_HID
help
- Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or
- a THRUSTMASTER Ferrari GT Rumble Wheel.
+ Say Y here if you have a THRUSTMASTER FireStore Dual Power 2,
+ a THRUSTMASTER Ferrari GT Rumble Wheel or Thrustmaster FFB
+ Wheel (T150RS, T300RS, T300 Ferrari Alcantara Edition, T500RS).
config THRUSTMASTER_FF
bool "ThrustMaster devices force feedback support"
@@ -1206,4 +1218,6 @@ source "drivers/hid/intel-ish-hid/Kconfig"
source "drivers/hid/amd-sfh-hid/Kconfig"
+source "drivers/hid/surface-hid/Kconfig"
+
endmenu
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index c4f6d5c613dc..193431ec4db8 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_HID_ELAN) += hid-elan.o
obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_ELO) += hid-elo.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
+obj-$(CONFIG_HID_FT260) += hid-ft260.o
obj-$(CONFIG_HID_GEMBIRD) += hid-gembird.o
obj-$(CONFIG_HID_GFRM) += hid-gfrm.o
obj-$(CONFIG_HID_GLORIOUS) += hid-glorious.o
@@ -112,7 +113,8 @@ obj-$(CONFIG_HID_STEAM) += hid-steam.o
obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o
obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
-obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
+obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o hid-thrustmaster.o
+obj-$(CONFIG_HID_TMINIT) += hid-tminit.o
obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
@@ -145,3 +147,5 @@ obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/
obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
+
+obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index 6b665931147d..2b986d0dbde4 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -74,7 +74,7 @@ enum dev_num {
UNKNOWN,
};
/**
- * struct u1_data
+ * struct alps_dev
*
* @input: pointer to the kernel input device
* @input2: pointer to the kernel input2 device
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 097cb1ee3126..0ae9f6df59d1 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2129,7 +2129,7 @@ struct hid_dynid {
};
/**
- * store_new_id - add a new HID device ID to this driver and re-probe devices
+ * new_id_store - add a new HID device ID to this driver and re-probe devices
* @drv: target device driver
* @buf: buffer for scanning device ID data
* @count: input size
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index d7eaf9100370..59f8d716d78f 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -417,6 +417,7 @@ static const struct hid_usage_entry hid_usage_table[] = {
{ 0x85, 0x44, "Charging" },
{ 0x85, 0x45, "Discharging" },
{ 0x85, 0x4b, "NeedReplacement" },
+ { 0x85, 0x65, "AbsoluteStateOfCharge" },
{ 0x85, 0x66, "RemainingCapacity" },
{ 0x85, 0x68, "RunTimeToEmpty" },
{ 0x85, 0x6a, "AverageTimeToFull" },
diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
index dae193749d44..021049805bb7 100644
--- a/drivers/hid/hid-elan.c
+++ b/drivers/hid/hid-elan.c
@@ -410,15 +410,6 @@ static int elan_start_multitouch(struct hid_device *hdev)
return 0;
}
-static enum led_brightness elan_mute_led_get_brigtness(struct led_classdev *led_cdev)
-{
- struct device *dev = led_cdev->dev->parent;
- struct hid_device *hdev = to_hid_device(dev);
- struct elan_drvdata *drvdata = hid_get_drvdata(hdev);
-
- return drvdata->mute_led_state;
-}
-
static int elan_mute_led_set_brigtness(struct led_classdev *led_cdev,
enum led_brightness value)
{
@@ -445,8 +436,9 @@ static int elan_mute_led_set_brigtness(struct led_classdev *led_cdev,
kfree(dmabuf);
if (ret != ELAN_LED_REPORT_SIZE) {
- hid_err(hdev, "Failed to set mute led brightness: %d\n", ret);
- return ret;
+ if (ret != -ENODEV)
+ hid_err(hdev, "Failed to set mute led brightness: %d\n", ret);
+ return ret < 0 ? ret : -EIO;
}
drvdata->mute_led_state = led_state;
@@ -459,9 +451,10 @@ static int elan_init_mute_led(struct hid_device *hdev)
struct led_classdev *mute_led = &drvdata->mute_led;
mute_led->name = "elan:red:mute";
- mute_led->brightness_get = elan_mute_led_get_brigtness;
+ mute_led->default_trigger = "audio-mute";
mute_led->brightness_set_blocking = elan_mute_led_set_brigtness;
mute_led->max_brightness = LED_ON;
+ mute_led->flags = LED_HW_PLUGGABLE;
mute_led->dev = &hdev->dev;
return devm_led_classdev_register(&hdev->dev, mute_led);
diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c
new file mode 100644
index 000000000000..a5751607ce24
--- /dev/null
+++ b/drivers/hid/hid-ft260.c
@@ -0,0 +1,1054 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * hid-ft260.c - FTDI FT260 USB HID to I2C host bridge
+ *
+ * Copyright (c) 2021, Michael Zaidman <michaelz@xsightlabs.com>
+ *
+ * Data Sheet:
+ * https://www.ftdichip.com/Support/Documents/DataSheets/ICs/DS_FT260.pdf
+ */
+
+#include "hid-ids.h"
+#include <linux/hidraw.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#ifdef DEBUG
+static int ft260_debug = 1;
+#else
+static int ft260_debug;
+#endif
+module_param_named(debug, ft260_debug, int, 0600);
+MODULE_PARM_DESC(debug, "Toggle FT260 debugging messages");
+
+#define ft260_dbg(format, arg...) \
+ do { \
+ if (ft260_debug) \
+ pr_info("%s: " format, __func__, ##arg); \
+ } while (0)
+
+#define FT260_REPORT_MAX_LENGTH (64)
+#define FT260_I2C_DATA_REPORT_ID(len) (FT260_I2C_REPORT_MIN + (len - 1) / 4)
+/*
+ * The input report format assigns 62 bytes for the data payload, but ft260
+ * returns 60 and 2 in two separate transactions. To minimize transfer time
+ * in reading chunks mode, set the maximum read payload length to 60 bytes.
+ */
+#define FT260_RD_DATA_MAX (60)
+#define FT260_WR_DATA_MAX (60)
+
+/*
+ * Device interface configuration.
+ * The FT260 has 2 interfaces that are controlled by DCNF0 and DCNF1 pins.
+ * First implementes USB HID to I2C bridge function and
+ * second - USB HID to UART bridge function.
+ */
+enum {
+ FT260_MODE_ALL = 0x00,
+ FT260_MODE_I2C = 0x01,
+ FT260_MODE_UART = 0x02,
+ FT260_MODE_BOTH = 0x03,
+};
+
+/* Control pipe */
+enum {
+ FT260_GET_RQST_TYPE = 0xA1,
+ FT260_GET_REPORT = 0x01,
+ FT260_SET_RQST_TYPE = 0x21,
+ FT260_SET_REPORT = 0x09,
+ FT260_FEATURE = 0x03,
+};
+
+/* Report IDs / Feature In */
+enum {
+ FT260_CHIP_VERSION = 0xA0,
+ FT260_SYSTEM_SETTINGS = 0xA1,
+ FT260_I2C_STATUS = 0xC0,
+ FT260_I2C_READ_REQ = 0xC2,
+ FT260_I2C_REPORT_MIN = 0xD0,
+ FT260_I2C_REPORT_MAX = 0xDE,
+ FT260_GPIO = 0xB0,
+ FT260_UART_INTERRUPT_STATUS = 0xB1,
+ FT260_UART_STATUS = 0xE0,
+ FT260_UART_RI_DCD_STATUS = 0xE1,
+ FT260_UART_REPORT = 0xF0,
+};
+
+/* Feature Out */
+enum {
+ FT260_SET_CLOCK = 0x01,
+ FT260_SET_I2C_MODE = 0x02,
+ FT260_SET_UART_MODE = 0x03,
+ FT260_ENABLE_INTERRUPT = 0x05,
+ FT260_SELECT_GPIO2_FUNC = 0x06,
+ FT260_ENABLE_UART_DCD_RI = 0x07,
+ FT260_SELECT_GPIOA_FUNC = 0x08,
+ FT260_SELECT_GPIOG_FUNC = 0x09,
+ FT260_SET_INTERRUPT_TRIGGER = 0x0A,
+ FT260_SET_SUSPEND_OUT_POLAR = 0x0B,
+ FT260_ENABLE_UART_RI_WAKEUP = 0x0C,
+ FT260_SET_UART_RI_WAKEUP_CFG = 0x0D,
+ FT260_SET_I2C_RESET = 0x20,
+ FT260_SET_I2C_CLOCK_SPEED = 0x22,
+ FT260_SET_UART_RESET = 0x40,
+ FT260_SET_UART_CONFIG = 0x41,
+ FT260_SET_UART_BAUD_RATE = 0x42,
+ FT260_SET_UART_DATA_BIT = 0x43,
+ FT260_SET_UART_PARITY = 0x44,
+ FT260_SET_UART_STOP_BIT = 0x45,
+ FT260_SET_UART_BREAKING = 0x46,
+ FT260_SET_UART_XON_XOFF = 0x49,
+};
+
+/* Response codes in I2C status report */
+enum {
+ FT260_I2C_STATUS_SUCCESS = 0x00,
+ FT260_I2C_STATUS_CTRL_BUSY = 0x01,
+ FT260_I2C_STATUS_ERROR = 0x02,
+ FT260_I2C_STATUS_ADDR_NO_ACK = 0x04,
+ FT260_I2C_STATUS_DATA_NO_ACK = 0x08,
+ FT260_I2C_STATUS_ARBITR_LOST = 0x10,
+ FT260_I2C_STATUS_CTRL_IDLE = 0x20,
+ FT260_I2C_STATUS_BUS_BUSY = 0x40,
+};
+
+/* I2C Conditions flags */
+enum {
+ FT260_FLAG_NONE = 0x00,
+ FT260_FLAG_START = 0x02,
+ FT260_FLAG_START_REPEATED = 0x03,
+ FT260_FLAG_STOP = 0x04,
+ FT260_FLAG_START_STOP = 0x06,
+ FT260_FLAG_START_STOP_REPEATED = 0x07,
+};
+
+#define FT260_SET_REQUEST_VALUE(report_id) ((FT260_FEATURE << 8) | report_id)
+
+/* Feature In reports */
+
+struct ft260_get_chip_version_report {
+ u8 report; /* FT260_CHIP_VERSION */
+ u8 chip_code[4]; /* FTDI chip identification code */
+ u8 reserved[8];
+} __packed;
+
+struct ft260_get_system_status_report {
+ u8 report; /* FT260_SYSTEM_SETTINGS */
+ u8 chip_mode; /* DCNF0 and DCNF1 status, bits 0-1 */
+ u8 clock_ctl; /* 0 - 12MHz, 1 - 24MHz, 2 - 48MHz */
+ u8 suspend_status; /* 0 - not suspended, 1 - suspended */
+ u8 pwren_status; /* 0 - FT260 is not ready, 1 - ready */
+ u8 i2c_enable; /* 0 - disabled, 1 - enabled */
+ u8 uart_mode; /* 0 - OFF; 1 - RTS_CTS, 2 - DTR_DSR, */
+ /* 3 - XON_XOFF, 4 - No flow control */
+ u8 hid_over_i2c_en; /* 0 - disabled, 1 - enabled */
+ u8 gpio2_function; /* 0 - GPIO, 1 - SUSPOUT, */
+ /* 2 - PWREN, 4 - TX_LED */
+ u8 gpioA_function; /* 0 - GPIO, 3 - TX_ACTIVE, 4 - TX_LED */
+ u8 gpioG_function; /* 0 - GPIO, 2 - PWREN, */
+ /* 5 - RX_LED, 6 - BCD_DET */
+ u8 suspend_out_pol; /* 0 - active-high, 1 - active-low */
+ u8 enable_wakeup_int; /* 0 - disabled, 1 - enabled */
+ u8 intr_cond; /* Interrupt trigger conditions */
+ u8 power_saving_en; /* 0 - disabled, 1 - enabled */
+ u8 reserved[10];
+} __packed;
+
+struct ft260_get_i2c_status_report {
+ u8 report; /* FT260_I2C_STATUS */
+ u8 bus_status; /* I2C bus status */
+ __le16 clock; /* I2C bus clock in range 60-3400 KHz */
+ u8 reserved;
+} __packed;
+
+/* Feature Out reports */
+
+struct ft260_set_system_clock_report {
+ u8 report; /* FT260_SYSTEM_SETTINGS */
+ u8 request; /* FT260_SET_CLOCK */
+ u8 clock_ctl; /* 0 - 12MHz, 1 - 24MHz, 2 - 48MHz */
+} __packed;
+
+struct ft260_set_i2c_mode_report {
+ u8 report; /* FT260_SYSTEM_SETTINGS */
+ u8 request; /* FT260_SET_I2C_MODE */
+ u8 i2c_enable; /* 0 - disabled, 1 - enabled */
+} __packed;
+
+struct ft260_set_uart_mode_report {
+ u8 report; /* FT260_SYSTEM_SETTINGS */
+ u8 request; /* FT260_SET_UART_MODE */
+ u8 uart_mode; /* 0 - OFF; 1 - RTS_CTS, 2 - DTR_DSR, */
+ /* 3 - XON_XOFF, 4 - No flow control */
+} __packed;
+
+struct ft260_set_i2c_reset_report {
+ u8 report; /* FT260_SYSTEM_SETTINGS */
+ u8 request; /* FT260_SET_I2C_RESET */
+} __packed;
+
+struct ft260_set_i2c_speed_report {
+ u8 report; /* FT260_SYSTEM_SETTINGS */
+ u8 request; /* FT260_SET_I2C_CLOCK_SPEED */
+ __le16 clock; /* I2C bus clock in range 60-3400 KHz */
+} __packed;
+
+/* Data transfer reports */
+
+struct ft260_i2c_write_request_report {
+ u8 report; /* FT260_I2C_REPORT */
+ u8 address; /* 7-bit I2C address */
+ u8 flag; /* I2C transaction condition */
+ u8 length; /* data payload length */
+ u8 data[60]; /* data payload */
+} __packed;
+
+struct ft260_i2c_read_request_report {
+ u8 report; /* FT260_I2C_READ_REQ */
+ u8 address; /* 7-bit I2C address */
+ u8 flag; /* I2C transaction condition */
+ __le16 length; /* data payload length */
+} __packed;
+
+struct ft260_i2c_input_report {
+ u8 report; /* FT260_I2C_REPORT */
+ u8 length; /* data payload length */
+ u8 data[2]; /* data payload */
+} __packed;
+
+static const struct hid_device_id ft260_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_FUTURE_TECHNOLOGY,
+ USB_DEVICE_ID_FT260) },
+ { /* END OF LIST */ }
+};
+MODULE_DEVICE_TABLE(hid, ft260_devices);
+
+struct ft260_device {
+ struct i2c_adapter adap;
+ struct hid_device *hdev;
+ struct completion wait;
+ struct mutex lock;
+ u8 write_buf[FT260_REPORT_MAX_LENGTH];
+ u8 *read_buf;
+ u16 read_idx;
+ u16 read_len;
+ u16 clock;
+};
+
+static int ft260_hid_feature_report_get(struct hid_device *hdev,
+ unsigned char report_id, u8 *data,
+ size_t len)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, report_id, buf, len, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ memcpy(data, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static int ft260_hid_feature_report_set(struct hid_device *hdev, u8 *data,
+ size_t len)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmemdup(data, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = FT260_SYSTEM_SETTINGS;
+
+ ret = hid_hw_raw_request(hdev, buf[0], buf, len, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
+
+ kfree(buf);
+ return ret;
+}
+
+static int ft260_i2c_reset(struct hid_device *hdev)
+{
+ struct ft260_set_i2c_reset_report report;
+ int ret;
+
+ report.request = FT260_SET_I2C_RESET;
+
+ ret = ft260_hid_feature_report_set(hdev, (u8 *)&report, sizeof(report));
+ if (ret < 0) {
+ hid_err(hdev, "failed to reset I2C controller: %d\n", ret);
+ return ret;
+ }
+
+ ft260_dbg("done\n");
+ return ret;
+}
+
+static int ft260_xfer_status(struct ft260_device *dev)
+{
+ struct hid_device *hdev = dev->hdev;
+ struct ft260_get_i2c_status_report report;
+ int ret;
+
+ ret = ft260_hid_feature_report_get(hdev, FT260_I2C_STATUS,
+ (u8 *)&report, sizeof(report));
+ if (ret < 0) {
+ hid_err(hdev, "failed to retrieve status: %d\n", ret);
+ return ret;
+ }
+
+ dev->clock = le16_to_cpu(report.clock);
+ ft260_dbg("bus_status %#02x, clock %u\n", report.bus_status,
+ dev->clock);
+
+ if (report.bus_status & FT260_I2C_STATUS_CTRL_BUSY)
+ return -EAGAIN;
+
+ if (report.bus_status & FT260_I2C_STATUS_BUS_BUSY)
+ return -EBUSY;
+
+ if (report.bus_status & FT260_I2C_STATUS_ERROR)
+ return -EIO;
+
+ ret = -EIO;
+
+ if (report.bus_status & FT260_I2C_STATUS_ADDR_NO_ACK)
+ ft260_dbg("unacknowledged address\n");
+
+ if (report.bus_status & FT260_I2C_STATUS_DATA_NO_ACK)
+ ft260_dbg("unacknowledged data\n");
+
+ if (report.bus_status & FT260_I2C_STATUS_ARBITR_LOST)
+ ft260_dbg("arbitration loss\n");
+
+ if (report.bus_status & FT260_I2C_STATUS_CTRL_IDLE)
+ ret = 0;
+
+ return ret;
+}
+
+static int ft260_hid_output_report(struct hid_device *hdev, u8 *data,
+ size_t len)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmemdup(data, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_output_report(hdev, buf, len);
+
+ kfree(buf);
+ return ret;
+}
+
+static int ft260_hid_output_report_check_status(struct ft260_device *dev,
+ u8 *data, int len)
+{
+ int ret, usec, try = 3;
+ struct hid_device *hdev = dev->hdev;
+
+ ret = ft260_hid_output_report(hdev, data, len);
+ if (ret < 0) {
+ hid_err(hdev, "%s: failed to start transfer, ret %d\n",
+ __func__, ret);
+ ft260_i2c_reset(hdev);
+ return ret;
+ }
+
+ /* transfer time = 1 / clock(KHz) * 10 bits * bytes */
+ usec = 10000 / dev->clock * len;
+ usleep_range(usec, usec + 100);
+ ft260_dbg("wait %d usec, len %d\n", usec, len);
+ do {
+ ret = ft260_xfer_status(dev);
+ if (ret != -EAGAIN)
+ break;
+ } while (--try);
+
+ if (ret == 0 || ret == -EBUSY)
+ return 0;
+
+ ft260_i2c_reset(hdev);
+ return -EIO;
+}
+
+static int ft260_i2c_write(struct ft260_device *dev, u8 addr, u8 *data,
+ int data_len, u8 flag)
+{
+ int len, ret, idx = 0;
+ struct hid_device *hdev = dev->hdev;
+ struct ft260_i2c_write_request_report *rep =
+ (struct ft260_i2c_write_request_report *)dev->write_buf;
+
+ do {
+ if (data_len <= FT260_WR_DATA_MAX)
+ len = data_len;
+ else
+ len = FT260_WR_DATA_MAX;
+
+ rep->report = FT260_I2C_DATA_REPORT_ID(len);
+ rep->address = addr;
+ rep->length = len;
+ rep->flag = flag;
+
+ memcpy(rep->data, &data[idx], len);
+
+ ft260_dbg("rep %#02x addr %#02x off %d len %d d[0] %#02x\n",
+ rep->report, addr, idx, len, data[0]);
+
+ ret = ft260_hid_output_report_check_status(dev, (u8 *)rep,
+ len + 4);
+ if (ret < 0) {
+ hid_err(hdev, "%s: failed to start transfer, ret %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ data_len -= len;
+ idx += len;
+
+ } while (data_len > 0);
+
+ return 0;
+}
+
+static int ft260_smbus_write(struct ft260_device *dev, u8 addr, u8 cmd,
+ u8 *data, u8 data_len, u8 flag)
+{
+ int ret = 0;
+ int len = 4;
+
+ struct ft260_i2c_write_request_report *rep =
+ (struct ft260_i2c_write_request_report *)dev->write_buf;
+
+ rep->address = addr;
+ rep->data[0] = cmd;
+ rep->length = data_len + 1;
+ rep->flag = flag;
+ len += rep->length;
+
+ rep->report = FT260_I2C_DATA_REPORT_ID(len);
+
+ if (data_len > 0)
+ memcpy(&rep->data[1], data, data_len);
+
+ ft260_dbg("rep %#02x addr %#02x cmd %#02x datlen %d replen %d\n",
+ rep->report, addr, cmd, rep->length, len);
+
+ ret = ft260_hid_output_report_check_status(dev, (u8 *)rep, len);
+
+ return ret;
+}
+
+static int ft260_i2c_read(struct ft260_device *dev, u8 addr, u8 *data,
+ u16 len, u8 flag)
+{
+ struct ft260_i2c_read_request_report rep;
+ struct hid_device *hdev = dev->hdev;
+ int timeout;
+ int ret;
+
+ if (len > FT260_RD_DATA_MAX) {
+ hid_err(hdev, "%s: unsupported rd len: %d\n", __func__, len);
+ return -EINVAL;
+ }
+
+ dev->read_idx = 0;
+ dev->read_buf = data;
+ dev->read_len = len;
+
+ rep.report = FT260_I2C_READ_REQ;
+ rep.length = cpu_to_le16(len);
+ rep.address = addr;
+ rep.flag = flag;
+
+ ft260_dbg("rep %#02x addr %#02x len %d\n", rep.report, rep.address,
+ rep.length);
+
+ reinit_completion(&dev->wait);
+
+ ret = ft260_hid_output_report(hdev, (u8 *)&rep, sizeof(rep));
+ if (ret < 0) {
+ hid_err(hdev, "%s: failed to start transaction, ret %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ timeout = msecs_to_jiffies(5000);
+ if (!wait_for_completion_timeout(&dev->wait, timeout)) {
+ ft260_i2c_reset(hdev);
+ return -ETIMEDOUT;
+ }
+
+ ret = ft260_xfer_status(dev);
+ if (ret == 0)
+ return 0;
+
+ ft260_i2c_reset(hdev);
+ return -EIO;
+}
+
+/*
+ * A random read operation is implemented as a dummy write operation, followed
+ * by a current address read operation. The dummy write operation is used to
+ * load the target byte address into the current byte address counter, from
+ * which the subsequent current address read operation then reads.
+ */
+static int ft260_i2c_write_read(struct ft260_device *dev, struct i2c_msg *msgs)
+{
+ int len, ret;
+ u16 left_len = msgs[1].len;
+ u8 *read_buf = msgs[1].buf;
+ u8 addr = msgs[0].addr;
+ u16 read_off = 0;
+ struct hid_device *hdev = dev->hdev;
+
+ if (msgs[0].len > 2) {
+ hid_err(hdev, "%s: unsupported wr len: %d\n", __func__,
+ msgs[0].len);
+ return -EOPNOTSUPP;
+ }
+
+ memcpy(&read_off, msgs[0].buf, msgs[0].len);
+
+ do {
+ if (left_len <= FT260_RD_DATA_MAX)
+ len = left_len;
+ else
+ len = FT260_RD_DATA_MAX;
+
+ ft260_dbg("read_off %#x left_len %d len %d\n", read_off,
+ left_len, len);
+
+ ret = ft260_i2c_write(dev, addr, (u8 *)&read_off, msgs[0].len,
+ FT260_FLAG_START);
+ if (ret < 0)
+ return ret;
+
+ ret = ft260_i2c_read(dev, addr, read_buf, len,
+ FT260_FLAG_START_STOP);
+ if (ret < 0)
+ return ret;
+
+ left_len -= len;
+ read_buf += len;
+ read_off += len;
+
+ } while (left_len > 0);
+
+ return 0;
+}
+
+static int ft260_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ int num)
+{
+ int ret;
+ struct ft260_device *dev = i2c_get_adapdata(adapter);
+ struct hid_device *hdev = dev->hdev;
+
+ mutex_lock(&dev->lock);
+
+ ret = hid_hw_power(hdev, PM_HINT_FULLON);
+ if (ret < 0) {
+ hid_err(hdev, "failed to enter FULLON power mode: %d\n", ret);
+ mutex_unlock(&dev->lock);
+ return ret;
+ }
+
+ if (num == 1) {
+ if (msgs->flags & I2C_M_RD)
+ ret = ft260_i2c_read(dev, msgs->addr, msgs->buf,
+ msgs->len, FT260_FLAG_START_STOP);
+ else
+ ret = ft260_i2c_write(dev, msgs->addr, msgs->buf,
+ msgs->len, FT260_FLAG_START_STOP);
+ if (ret < 0)
+ goto i2c_exit;
+
+ } else {
+ /* Combined write then read message */
+ ret = ft260_i2c_write_read(dev, msgs);
+ if (ret < 0)
+ goto i2c_exit;
+ }
+
+ ret = num;
+i2c_exit:
+ hid_hw_power(hdev, PM_HINT_NORMAL);
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static int ft260_smbus_xfer(struct i2c_adapter *adapter, u16 addr, u16 flags,
+ char read_write, u8 cmd, int size,
+ union i2c_smbus_data *data)
+{
+ int ret;
+ struct ft260_device *dev = i2c_get_adapdata(adapter);
+ struct hid_device *hdev = dev->hdev;
+
+ ft260_dbg("smbus size %d\n", size);
+
+ mutex_lock(&dev->lock);
+
+ ret = hid_hw_power(hdev, PM_HINT_FULLON);
+ if (ret < 0) {
+ hid_err(hdev, "power management error: %d\n", ret);
+ mutex_unlock(&dev->lock);
+ return ret;
+ }
+
+ switch (size) {
+ case I2C_SMBUS_QUICK:
+ if (read_write == I2C_SMBUS_READ)
+ ret = ft260_i2c_read(dev, addr, &data->byte, 0,
+ FT260_FLAG_START_STOP);
+ else
+ ret = ft260_smbus_write(dev, addr, cmd, NULL, 0,
+ FT260_FLAG_START_STOP);
+ break;
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_READ)
+ ret = ft260_i2c_read(dev, addr, &data->byte, 1,
+ FT260_FLAG_START_STOP);
+ else
+ ret = ft260_smbus_write(dev, addr, cmd, NULL, 0,
+ FT260_FLAG_START_STOP);
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = ft260_smbus_write(dev, addr, cmd, NULL, 0,
+ FT260_FLAG_START);
+ if (ret)
+ goto smbus_exit;
+
+ ret = ft260_i2c_read(dev, addr, &data->byte, 1,
+ FT260_FLAG_START_STOP_REPEATED);
+ } else {
+ ret = ft260_smbus_write(dev, addr, cmd, &data->byte, 1,
+ FT260_FLAG_START_STOP);
+ }
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = ft260_smbus_write(dev, addr, cmd, NULL, 0,
+ FT260_FLAG_START);
+ if (ret)
+ goto smbus_exit;
+
+ ret = ft260_i2c_read(dev, addr, (u8 *)&data->word, 2,
+ FT260_FLAG_START_STOP_REPEATED);
+ } else {
+ ret = ft260_smbus_write(dev, addr, cmd,
+ (u8 *)&data->word, 2,
+ FT260_FLAG_START_STOP);
+ }
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = ft260_smbus_write(dev, addr, cmd, NULL, 0,
+ FT260_FLAG_START);
+ if (ret)
+ goto smbus_exit;
+
+ ret = ft260_i2c_read(dev, addr, data->block,
+ data->block[0] + 1,
+ FT260_FLAG_START_STOP_REPEATED);
+ } else {
+ ret = ft260_smbus_write(dev, addr, cmd, data->block,
+ data->block[0] + 1,
+ FT260_FLAG_START_STOP);
+ }
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = ft260_smbus_write(dev, addr, cmd, NULL, 0,
+ FT260_FLAG_START);
+ if (ret)
+ goto smbus_exit;
+
+ ret = ft260_i2c_read(dev, addr, data->block + 1,
+ data->block[0],
+ FT260_FLAG_START_STOP_REPEATED);
+ } else {
+ ret = ft260_smbus_write(dev, addr, cmd, data->block + 1,
+ data->block[0],
+ FT260_FLAG_START_STOP);
+ }
+ break;
+ default:
+ hid_err(hdev, "unsupported smbus transaction size %d\n", size);
+ ret = -EOPNOTSUPP;
+ }
+
+smbus_exit:
+ hid_hw_power(hdev, PM_HINT_NORMAL);
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static u32 ft260_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_QUICK |
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_I2C_BLOCK;
+}
+
+static const struct i2c_adapter_quirks ft260_i2c_quirks = {
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+ .max_comb_1st_msg_len = 2,
+};
+
+static const struct i2c_algorithm ft260_i2c_algo = {
+ .master_xfer = ft260_i2c_xfer,
+ .smbus_xfer = ft260_smbus_xfer,
+ .functionality = ft260_functionality,
+};
+
+static int ft260_get_system_config(struct hid_device *hdev,
+ struct ft260_get_system_status_report *cfg)
+{
+ int ret;
+ int len = sizeof(struct ft260_get_system_status_report);
+
+ ret = ft260_hid_feature_report_get(hdev, FT260_SYSTEM_SETTINGS,
+ (u8 *)cfg, len);
+ if (ret != len) {
+ hid_err(hdev, "failed to retrieve system status\n");
+ if (ret >= 0)
+ return -EIO;
+ }
+ return 0;
+}
+
+static int ft260_is_interface_enabled(struct hid_device *hdev)
+{
+ struct ft260_get_system_status_report cfg;
+ struct usb_interface *usbif = to_usb_interface(hdev->dev.parent);
+ int interface = usbif->cur_altsetting->desc.bInterfaceNumber;
+ int ret;
+
+ ret = ft260_get_system_config(hdev, &cfg);
+ if (ret)
+ return ret;
+
+ ft260_dbg("interface: 0x%02x\n", interface);
+ ft260_dbg("chip mode: 0x%02x\n", cfg.chip_mode);
+ ft260_dbg("clock_ctl: 0x%02x\n", cfg.clock_ctl);
+ ft260_dbg("i2c_enable: 0x%02x\n", cfg.i2c_enable);
+ ft260_dbg("uart_mode: 0x%02x\n", cfg.uart_mode);
+
+ switch (cfg.chip_mode) {
+ case FT260_MODE_ALL:
+ case FT260_MODE_BOTH:
+ if (interface == 1) {
+ hid_info(hdev, "uart interface is not supported\n");
+ return 0;
+ }
+ ret = 1;
+ break;
+ case FT260_MODE_UART:
+ if (interface == 0) {
+ hid_info(hdev, "uart is unsupported on interface 0\n");
+ ret = 0;
+ }
+ break;
+ case FT260_MODE_I2C:
+ if (interface == 1) {
+ hid_info(hdev, "i2c is unsupported on interface 1\n");
+ ret = 0;
+ }
+ break;
+ }
+ return ret;
+}
+
+static int ft260_byte_show(struct hid_device *hdev, int id, u8 *cfg, int len,
+ u8 *field, u8 *buf)
+{
+ int ret;
+
+ ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
+ if (ret != len && ret >= 0)
+ return -EIO;
+
+ return scnprintf(buf, PAGE_SIZE, "%hi\n", *field);
+}
+
+static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
+ u16 *field, u8 *buf)
+{
+ int ret;
+
+ ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
+ if (ret != len && ret >= 0)
+ return -EIO;
+
+ return scnprintf(buf, PAGE_SIZE, "%hi\n", le16_to_cpu(*field));
+}
+
+#define FT260_ATTR_SHOW(name, reptype, id, type, func) \
+ static ssize_t name##_show(struct device *kdev, \
+ struct device_attribute *attr, char *buf) \
+ { \
+ struct reptype rep; \
+ struct hid_device *hdev = to_hid_device(kdev); \
+ type *field = &rep.name; \
+ int len = sizeof(rep); \
+ \
+ return func(hdev, id, (u8 *)&rep, len, field, buf); \
+ }
+
+#define FT260_SSTAT_ATTR_SHOW(name) \
+ FT260_ATTR_SHOW(name, ft260_get_system_status_report, \
+ FT260_SYSTEM_SETTINGS, u8, ft260_byte_show)
+
+#define FT260_I2CST_ATTR_SHOW(name) \
+ FT260_ATTR_SHOW(name, ft260_get_i2c_status_report, \
+ FT260_I2C_STATUS, u16, ft260_word_show)
+
+#define FT260_ATTR_STORE(name, reptype, id, req, type, func) \
+ static ssize_t name##_store(struct device *kdev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct reptype rep; \
+ struct hid_device *hdev = to_hid_device(kdev); \
+ type name; \
+ int ret; \
+ \
+ if (!func(buf, 10, &name)) { \
+ rep.name = name; \
+ rep.report = id; \
+ rep.request = req; \
+ ret = ft260_hid_feature_report_set(hdev, (u8 *)&rep, \
+ sizeof(rep)); \
+ if (!ret) \
+ ret = count; \
+ } else { \
+ ret = -EINVAL; \
+ } \
+ return ret; \
+ }
+
+#define FT260_BYTE_ATTR_STORE(name, reptype, req) \
+ FT260_ATTR_STORE(name, reptype, FT260_SYSTEM_SETTINGS, req, \
+ u8, kstrtou8)
+
+#define FT260_WORD_ATTR_STORE(name, reptype, req) \
+ FT260_ATTR_STORE(name, reptype, FT260_SYSTEM_SETTINGS, req, \
+ u16, kstrtou16)
+
+FT260_SSTAT_ATTR_SHOW(chip_mode);
+static DEVICE_ATTR_RO(chip_mode);
+
+FT260_SSTAT_ATTR_SHOW(pwren_status);
+static DEVICE_ATTR_RO(pwren_status);
+
+FT260_SSTAT_ATTR_SHOW(suspend_status);
+static DEVICE_ATTR_RO(suspend_status);
+
+FT260_SSTAT_ATTR_SHOW(hid_over_i2c_en);
+static DEVICE_ATTR_RO(hid_over_i2c_en);
+
+FT260_SSTAT_ATTR_SHOW(power_saving_en);
+static DEVICE_ATTR_RO(power_saving_en);
+
+FT260_SSTAT_ATTR_SHOW(i2c_enable);
+FT260_BYTE_ATTR_STORE(i2c_enable, ft260_set_i2c_mode_report,
+ FT260_SET_I2C_MODE);
+static DEVICE_ATTR_RW(i2c_enable);
+
+FT260_SSTAT_ATTR_SHOW(uart_mode);
+FT260_BYTE_ATTR_STORE(uart_mode, ft260_set_uart_mode_report,
+ FT260_SET_UART_MODE);
+static DEVICE_ATTR_RW(uart_mode);
+
+FT260_SSTAT_ATTR_SHOW(clock_ctl);
+FT260_BYTE_ATTR_STORE(clock_ctl, ft260_set_system_clock_report,
+ FT260_SET_CLOCK);
+static DEVICE_ATTR_RW(clock_ctl);
+
+FT260_I2CST_ATTR_SHOW(clock);
+FT260_WORD_ATTR_STORE(clock, ft260_set_i2c_speed_report,
+ FT260_SET_I2C_CLOCK_SPEED);
+static DEVICE_ATTR_RW(clock);
+
+static ssize_t i2c_reset_store(struct device *kdev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev = to_hid_device(kdev);
+ int ret = ft260_i2c_reset(hdev);
+
+ if (ret)
+ return ret;
+ return count;
+}
+static DEVICE_ATTR_WO(i2c_reset);
+
+static const struct attribute_group ft260_attr_group = {
+ .attrs = (struct attribute *[]) {
+ &dev_attr_chip_mode.attr,
+ &dev_attr_pwren_status.attr,
+ &dev_attr_suspend_status.attr,
+ &dev_attr_hid_over_i2c_en.attr,
+ &dev_attr_power_saving_en.attr,
+ &dev_attr_i2c_enable.attr,
+ &dev_attr_uart_mode.attr,
+ &dev_attr_clock_ctl.attr,
+ &dev_attr_i2c_reset.attr,
+ &dev_attr_clock.attr,
+ NULL
+ }
+};
+
+static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct ft260_device *dev;
+ struct ft260_get_chip_version_report version;
+ int ret;
+
+ dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "failed to parse HID\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "failed to start HID HW\n");
+ return ret;
+ }
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "failed to open HID HW\n");
+ goto err_hid_stop;
+ }
+
+ ret = ft260_hid_feature_report_get(hdev, FT260_CHIP_VERSION,
+ (u8 *)&version, sizeof(version));
+ if (ret != sizeof(version)) {
+ hid_err(hdev, "failed to retrieve chip version\n");
+ if (ret >= 0)
+ ret = -EIO;
+ goto err_hid_close;
+ }
+
+ hid_info(hdev, "chip code: %02x%02x %02x%02x\n",
+ version.chip_code[0], version.chip_code[1],
+ version.chip_code[2], version.chip_code[3]);
+
+ ret = ft260_is_interface_enabled(hdev);
+ if (ret <= 0)
+ goto err_hid_close;
+
+ hid_set_drvdata(hdev, dev);
+ dev->hdev = hdev;
+ dev->adap.owner = THIS_MODULE;
+ dev->adap.class = I2C_CLASS_HWMON;
+ dev->adap.algo = &ft260_i2c_algo;
+ dev->adap.quirks = &ft260_i2c_quirks;
+ dev->adap.dev.parent = &hdev->dev;
+ snprintf(dev->adap.name, sizeof(dev->adap.name),
+ "FT260 usb-i2c bridge on hidraw%d",
+ ((struct hidraw *)hdev->hidraw)->minor);
+
+ mutex_init(&dev->lock);
+ init_completion(&dev->wait);
+
+ ret = i2c_add_adapter(&dev->adap);
+ if (ret) {
+ hid_err(hdev, "failed to add i2c adapter\n");
+ goto err_hid_close;
+ }
+
+ i2c_set_adapdata(&dev->adap, dev);
+
+ ret = sysfs_create_group(&hdev->dev.kobj, &ft260_attr_group);
+ if (ret < 0) {
+ hid_err(hdev, "failed to create sysfs attrs\n");
+ goto err_i2c_free;
+ }
+
+ ret = ft260_xfer_status(dev);
+ if (ret)
+ ft260_i2c_reset(hdev);
+
+ return 0;
+
+err_i2c_free:
+ i2c_del_adapter(&dev->adap);
+err_hid_close:
+ hid_hw_close(hdev);
+err_hid_stop:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
+static void ft260_remove(struct hid_device *hdev)
+{
+ int ret;
+ struct ft260_device *dev = hid_get_drvdata(hdev);
+
+ ret = ft260_is_interface_enabled(hdev);
+ if (ret <= 0)
+ return;
+
+ sysfs_remove_group(&hdev->dev.kobj, &ft260_attr_group);
+ i2c_del_adapter(&dev->adap);
+
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+}
+
+static int ft260_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct ft260_device *dev = hid_get_drvdata(hdev);
+ struct ft260_i2c_input_report *xfer = (void *)data;
+
+ if (xfer->report >= FT260_I2C_REPORT_MIN &&
+ xfer->report <= FT260_I2C_REPORT_MAX) {
+ ft260_dbg("i2c resp: rep %#02x len %d\n", xfer->report,
+ xfer->length);
+
+ memcpy(&dev->read_buf[dev->read_idx], &xfer->data,
+ xfer->length);
+ dev->read_idx += xfer->length;
+
+ if (dev->read_idx == dev->read_len)
+ complete(&dev->wait);
+
+ } else {
+ hid_err(hdev, "unknown report: %#02x\n", xfer->report);
+ return 0;
+ }
+ return 1;
+}
+
+static struct hid_driver ft260_driver = {
+ .name = "ft260",
+ .id_table = ft260_devices,
+ .probe = ft260_probe,
+ .remove = ft260_remove,
+ .raw_event = ft260_raw_event,
+};
+
+module_hid_driver(ft260_driver);
+MODULE_DESCRIPTION("FTDI FT260 USB HID to I2C host bridge");
+MODULE_AUTHOR("Michael Zaidman <michael.zaidman@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 67fd8a2f5aba..84b8da3e7d09 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -93,6 +93,7 @@
#define BT_VENDOR_ID_APPLE 0x004c
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
+#define USB_DEVICE_ID_APPLE_MAGICMOUSE2 0x0269
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 0x0265
#define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e
@@ -431,6 +432,7 @@
#define USB_VENDOR_ID_FUTURE_TECHNOLOGY 0x0403
#define USB_DEVICE_ID_RETRODE2 0x97c1
+#define USB_DEVICE_ID_FT260 0x6030
#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f
#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
@@ -808,6 +810,7 @@
#define USB_DEVICE_ID_LOGITECH_27MHZ_MOUSE_RECEIVER 0xc51b
#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER 0xc52b
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER 0xc52f
+#define USB_DEVICE_ID_LOGITECH_G700_RECEIVER 0xc531
#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
@@ -816,8 +819,14 @@
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
-#define USB_DEVICE_ID_DINOVO_EDGE 0xc714
-#define USB_DEVICE_ID_DINOVO_MINI 0xc71f
+#define USB_DEVICE_ID_MX5000_RECEIVER_MOUSE_DEV 0xc70a
+#define USB_DEVICE_ID_MX5000_RECEIVER_KBD_DEV 0xc70e
+#define USB_DEVICE_ID_DINOVO_EDGE_RECEIVER_KBD_DEV 0xc713
+#define USB_DEVICE_ID_DINOVO_EDGE_RECEIVER_MOUSE_DEV 0xc714
+#define USB_DEVICE_ID_MX5500_RECEIVER_KBD_DEV 0xc71b
+#define USB_DEVICE_ID_MX5500_RECEIVER_MOUSE_DEV 0xc71c
+#define USB_DEVICE_ID_DINOVO_MINI_RECEIVER_KBD_DEV 0xc71e
+#define USB_DEVICE_ID_DINOVO_MINI_RECEIVER_MOUSE_DEV 0xc71f
#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2 0xca03
#define USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL 0xca04
@@ -946,6 +955,7 @@
#define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003
#define USB_VENDOR_ID_PLANTRONICS 0x047f
+#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056
#define USB_VENDOR_ID_PANASONIC 0x04da
#define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 236bccd37760..18f5e28d475c 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -435,7 +435,8 @@ static int hidinput_get_battery_property(struct power_supply *psy,
return ret;
}
-static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, struct hid_field *field)
+static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
+ struct hid_field *field, bool is_percentage)
{
struct power_supply_desc *psy_desc;
struct power_supply_config psy_cfg = { .drv_data = dev, };
@@ -475,7 +476,7 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
min = field->logical_minimum;
max = field->logical_maximum;
- if (quirks & HID_BATTERY_QUIRK_PERCENT) {
+ if (is_percentage || (quirks & HID_BATTERY_QUIRK_PERCENT)) {
min = 0;
max = 100;
}
@@ -552,7 +553,7 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
}
#else /* !CONFIG_HID_BATTERY_STRENGTH */
static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
- struct hid_field *field)
+ struct hid_field *field, bool is_percentage)
{
return 0;
}
@@ -806,7 +807,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
case 0x3b: /* Battery Strength */
- hidinput_setup_battery(device, HID_INPUT_REPORT, field);
+ hidinput_setup_battery(device, HID_INPUT_REPORT, field, false);
usage->type = EV_PWR;
return;
@@ -1068,7 +1069,16 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case HID_UP_GENDEVCTRLS:
switch (usage->hid) {
case HID_DC_BATTERYSTRENGTH:
- hidinput_setup_battery(device, HID_INPUT_REPORT, field);
+ hidinput_setup_battery(device, HID_INPUT_REPORT, field, false);
+ usage->type = EV_PWR;
+ return;
+ }
+ goto unknown;
+
+ case HID_UP_BATTERY:
+ switch (usage->hid) {
+ case HID_BAT_ABSOLUTESTATEOFCHARGE:
+ hidinput_setup_battery(device, HID_INPUT_REPORT, field, true);
usage->type = EV_PWR;
return;
}
@@ -1672,7 +1682,7 @@ static void report_features(struct hid_device *hid)
/* Verify if Battery Strength feature is available */
if (usage->hid == HID_DC_BATTERYSTRENGTH)
hidinput_setup_battery(hid, HID_FEATURE_REPORT,
- rep->field[i]);
+ rep->field[i], false);
if (drv->feature_mapping)
drv->feature_mapping(hid, rep->field[i], usage);
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index c8b40c07eca6..f46616390a98 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -655,7 +655,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
/**
- * Enable fully-functional tablet mode by setting a special feature report.
+ * kye_tablet_enable() - Enable fully-functional tablet mode by setting a special feature report.
*
* @hdev: HID device
*
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index c6c8e20f3e8d..93b1f935e526 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -33,6 +33,9 @@
#include "hid-ids.h"
+/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
+#define LENOVO_KEY_MICMUTE KEY_F20
+
struct lenovo_drvdata {
u8 led_report[3]; /* Must be first for proper alignment */
int led_state;
@@ -62,8 +65,8 @@ struct lenovo_drvdata {
#define TP10UBKBD_LED_OFF 1
#define TP10UBKBD_LED_ON 2
-static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
- enum led_brightness value)
+static int lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
+ enum led_brightness value)
{
struct lenovo_drvdata *data = hid_get_drvdata(hdev);
int ret;
@@ -75,10 +78,18 @@ static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
data->led_report[2] = value ? TP10UBKBD_LED_ON : TP10UBKBD_LED_OFF;
ret = hid_hw_raw_request(hdev, data->led_report[0], data->led_report, 3,
HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
- if (ret)
- hid_err(hdev, "Set LED output report error: %d\n", ret);
+ if (ret != 3) {
+ if (ret != -ENODEV)
+ hid_err(hdev, "Set LED output report error: %d\n", ret);
+
+ ret = ret < 0 ? ret : -EIO;
+ } else {
+ ret = 0;
+ }
mutex_unlock(&data->led_report_mutex);
+
+ return ret;
}
static void lenovo_tp10ubkbd_sync_fn_lock(struct work_struct *work)
@@ -126,7 +137,7 @@ static int lenovo_input_mapping_tpkbd(struct hid_device *hdev,
if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
/* This sub-device contains trackpoint, mark it */
hid_set_drvdata(hdev, (void *)1);
- map_key_clear(KEY_MICMUTE);
+ map_key_clear(LENOVO_KEY_MICMUTE);
return 1;
}
return 0;
@@ -141,7 +152,7 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
(usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) {
switch (usage->hid & HID_USAGE) {
case 0x00f1: /* Fn-F4: Mic mute */
- map_key_clear(KEY_MICMUTE);
+ map_key_clear(LENOVO_KEY_MICMUTE);
return 1;
case 0x00f2: /* Fn-F5: Brightness down */
map_key_clear(KEY_BRIGHTNESSDOWN);
@@ -231,7 +242,7 @@ static int lenovo_input_mapping_tp10_ultrabook_kbd(struct hid_device *hdev,
map_key_clear(KEY_FN_ESC);
return 1;
case 9: /* Fn-F4: Mic mute */
- map_key_clear(KEY_MICMUTE);
+ map_key_clear(LENOVO_KEY_MICMUTE);
return 1;
case 10: /* Fn-F7: Control panel */
map_key_clear(KEY_CONFIG);
@@ -255,6 +266,54 @@ static int lenovo_input_mapping_tp10_ultrabook_kbd(struct hid_device *hdev,
return 0;
}
+static int lenovo_input_mapping_x1_tab_kbd(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ /*
+ * The ThinkPad X1 Tablet Thin Keyboard uses 0x000c0001 usage for
+ * a bunch of keys which have no standard consumer page code.
+ */
+ if (usage->hid == 0x000c0001) {
+ switch (usage->usage_index) {
+ case 0: /* Fn-F10: Enable/disable bluetooth */
+ map_key_clear(KEY_BLUETOOTH);
+ return 1;
+ case 1: /* Fn-F11: Keyboard settings */
+ map_key_clear(KEY_KEYBOARD);
+ return 1;
+ case 2: /* Fn-F12: User function / Cortana */
+ map_key_clear(KEY_MACRO1);
+ return 1;
+ case 3: /* Fn-PrtSc: Snipping tool */
+ map_key_clear(KEY_SELECTIVE_SCREENSHOT);
+ return 1;
+ case 8: /* Fn-Esc: Fn-lock toggle */
+ map_key_clear(KEY_FN_ESC);
+ return 1;
+ case 9: /* Fn-F4: Mute/unmute microphone */
+ map_key_clear(KEY_MICMUTE);
+ return 1;
+ case 10: /* Fn-F9: Settings */
+ map_key_clear(KEY_CONFIG);
+ return 1;
+ case 13: /* Fn-F7: Manage external displays */
+ map_key_clear(KEY_SWITCHVIDEOMODE);
+ return 1;
+ case 14: /* Fn-F8: Enable/disable wifi */
+ map_key_clear(KEY_WLAN);
+ return 1;
+ }
+ }
+
+ if (usage->hid == (HID_UP_KEYBOARD | 0x009a)) {
+ map_key_clear(KEY_SYSRQ);
+ return 1;
+ }
+
+ return 0;
+}
+
static int lenovo_input_mapping(struct hid_device *hdev,
struct hid_input *hi, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max)
@@ -278,6 +337,8 @@ static int lenovo_input_mapping(struct hid_device *hdev,
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field,
usage, bit, max);
+ case USB_DEVICE_ID_LENOVO_X1_TAB:
+ return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max);
default:
return 0;
}
@@ -349,7 +410,7 @@ static ssize_t attr_fn_lock_store(struct device *dev,
{
struct hid_device *hdev = to_hid_device(dev);
struct lenovo_drvdata *data = hid_get_drvdata(hdev);
- int value;
+ int value, ret;
if (kstrtoint(buf, 10, &value))
return -EINVAL;
@@ -364,7 +425,10 @@ static ssize_t attr_fn_lock_store(struct device *dev,
lenovo_features_set_cptkbd(hdev);
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
- lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
+ case USB_DEVICE_ID_LENOVO_X1_TAB:
+ ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
+ if (ret)
+ return ret;
break;
}
@@ -498,11 +562,15 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
+ if (!hid_get_drvdata(hdev))
+ return 0;
+
switch (hdev->product) {
case USB_DEVICE_ID_LENOVO_CUSBKBD:
case USB_DEVICE_ID_LENOVO_CBTKBD:
return lenovo_event_cptkbd(hdev, field, usage, value);
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+ case USB_DEVICE_ID_LENOVO_X1_TAB:
return lenovo_event_tp10ubkbd(hdev, field, usage, value);
default:
return 0;
@@ -761,23 +829,7 @@ static void lenovo_led_set_tpkbd(struct hid_device *hdev)
hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
}
-static enum led_brightness lenovo_led_brightness_get(
- struct led_classdev *led_cdev)
-{
- struct device *dev = led_cdev->dev->parent;
- struct hid_device *hdev = to_hid_device(dev);
- struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
- int led_nr = 0;
-
- if (led_cdev == &data_pointer->led_micmute)
- led_nr = 1;
-
- return data_pointer->led_state & (1 << led_nr)
- ? LED_FULL
- : LED_OFF;
-}
-
-static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
+static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct device *dev = led_cdev->dev->parent;
@@ -785,6 +837,7 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED };
int led_nr = 0;
+ int ret = 0;
if (led_cdev == &data_pointer->led_micmute)
led_nr = 1;
@@ -799,9 +852,12 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
lenovo_led_set_tpkbd(hdev);
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
- lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
+ case USB_DEVICE_ID_LENOVO_X1_TAB:
+ ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
break;
}
+
+ return ret;
}
static int lenovo_register_leds(struct hid_device *hdev)
@@ -821,16 +877,20 @@ static int lenovo_register_leds(struct hid_device *hdev)
snprintf(name_micm, name_sz, "%s:amber:micmute", dev_name(&hdev->dev));
data->led_mute.name = name_mute;
- data->led_mute.brightness_get = lenovo_led_brightness_get;
- data->led_mute.brightness_set = lenovo_led_brightness_set;
+ data->led_mute.default_trigger = "audio-mute";
+ data->led_mute.brightness_set_blocking = lenovo_led_brightness_set;
+ data->led_mute.max_brightness = 1;
+ data->led_mute.flags = LED_HW_PLUGGABLE;
data->led_mute.dev = &hdev->dev;
ret = led_classdev_register(&hdev->dev, &data->led_mute);
if (ret < 0)
return ret;
data->led_micmute.name = name_micm;
- data->led_micmute.brightness_get = lenovo_led_brightness_get;
- data->led_micmute.brightness_set = lenovo_led_brightness_set;
+ data->led_micmute.default_trigger = "audio-micmute";
+ data->led_micmute.brightness_set_blocking = lenovo_led_brightness_set;
+ data->led_micmute.max_brightness = 1;
+ data->led_micmute.flags = LED_HW_PLUGGABLE;
data->led_micmute.dev = &hdev->dev;
ret = led_classdev_register(&hdev->dev, &data->led_micmute);
if (ret < 0) {
@@ -952,11 +1012,24 @@ static const struct attribute_group lenovo_attr_group_tp10ubkbd = {
static int lenovo_probe_tp10ubkbd(struct hid_device *hdev)
{
+ struct hid_report_enum *rep_enum;
struct lenovo_drvdata *data;
+ struct hid_report *rep;
+ bool found;
int ret;
- /* All the custom action happens on the USBMOUSE device for USB */
- if (hdev->type != HID_TYPE_USBMOUSE)
+ /*
+ * The LEDs and the Fn-lock functionality use output report 9,
+ * with an application of 0xffa0001, add the LEDs on the interface
+ * with this output report.
+ */
+ found = false;
+ rep_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ if (rep->application == 0xffa00001)
+ found = true;
+ }
+ if (!found)
return 0;
data = devm_kzalloc(&hdev->dev, sizeof(*data), GFP_KERNEL);
@@ -1018,6 +1091,7 @@ static int lenovo_probe(struct hid_device *hdev,
ret = lenovo_probe_cptkbd(hdev);
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+ case USB_DEVICE_ID_LENOVO_X1_TAB:
ret = lenovo_probe_tp10ubkbd(hdev);
break;
default:
@@ -1083,6 +1157,7 @@ static void lenovo_remove(struct hid_device *hdev)
lenovo_remove_cptkbd(hdev);
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+ case USB_DEVICE_ID_LENOVO_X1_TAB:
lenovo_remove_tp10ubkbd(hdev);
break;
}
@@ -1122,6 +1197,12 @@ static const struct hid_device_id lenovo_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TP10UBKBD) },
+ /*
+ * Note bind to the HID_GROUP_GENERIC group, so that we only bind to the keyboard
+ * part, while letting hid-multitouch.c handle the touchpad and trackpoint.
+ */
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) },
{ }
};
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 0dc7cdfc56f7..d40af911df63 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -568,22 +568,6 @@ static int lg_ultrax_remote_mapping(struct hid_input *hi,
return 1;
}
-static int lg_dinovo_mapping(struct hid_input *hi, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
- return 0;
-
- switch (usage->hid & HID_USAGE) {
-
- case 0x00d: lg_map_key_clear(KEY_MEDIA); break;
- default:
- return 0;
-
- }
- return 1;
-}
-
static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage,
unsigned long **bit, int *max)
{
@@ -668,10 +652,6 @@ static int lg_input_mapping(struct hid_device *hdev, struct hid_input *hi,
lg_ultrax_remote_mapping(hi, usage, bit, max))
return 1;
- if (hdev->product == USB_DEVICE_ID_DINOVO_MINI &&
- lg_dinovo_mapping(hi, usage, bit, max))
- return 1;
-
if ((drv_data->quirks & LG_WIRELESS) && lg_wireless_mapping(hi, usage, bit, max))
return 1;
@@ -879,10 +859,6 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP),
.driver_data = LG_DUPLICATE_USAGES },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE),
- .driver_data = LG_DUPLICATE_USAGES },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI),
- .driver_data = LG_DUPLICATE_USAGES },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD),
.driver_data = LG_IGNORE_DOUBLED_WHEEL | LG_EXPANDED_KEYMAP },
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 271bd8d24339..fa835d565982 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -84,6 +84,7 @@
#define STD_MOUSE BIT(2)
#define MULTIMEDIA BIT(3)
#define POWER_KEYS BIT(4)
+#define KBD_MOUSE BIT(5)
#define MEDIA_CENTER BIT(8)
#define KBD_LEDS BIT(14)
/* Fake (bitnr > NUMBER_OF_HID_REPORTS) bit to track HID++ capability */
@@ -117,6 +118,7 @@ enum recvr_type {
recvr_type_mouse_only,
recvr_type_27mhz,
recvr_type_bluetooth,
+ recvr_type_dinovo,
};
struct dj_report {
@@ -333,6 +335,47 @@ static const char mse_bluetooth_descriptor[] = {
0xC0, /* END_COLLECTION */
};
+/* Mouse descriptor (5) for Bluetooth receiver, normal-res hwheel, 8 buttons */
+static const char mse5_bluetooth_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* Usage (Mouse) */
+ 0xa1, 0x01, /* Collection (Application) */
+ 0x85, 0x05, /* Report ID (5) */
+ 0x09, 0x01, /* Usage (Pointer) */
+ 0xa1, 0x00, /* Collection (Physical) */
+ 0x05, 0x09, /* Usage Page (Button) */
+ 0x19, 0x01, /* Usage Minimum (1) */
+ 0x29, 0x08, /* Usage Maximum (8) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x25, 0x01, /* Logical Maximum (1) */
+ 0x95, 0x08, /* Report Count (8) */
+ 0x75, 0x01, /* Report Size (1) */
+ 0x81, 0x02, /* Input (Data,Var,Abs) */
+ 0x05, 0x01, /* Usage Page (Generic Desktop) */
+ 0x16, 0x01, 0xf8, /* Logical Minimum (-2047) */
+ 0x26, 0xff, 0x07, /* Logical Maximum (2047) */
+ 0x75, 0x0c, /* Report Size (12) */
+ 0x95, 0x02, /* Report Count (2) */
+ 0x09, 0x30, /* Usage (X) */
+ 0x09, 0x31, /* Usage (Y) */
+ 0x81, 0x06, /* Input (Data,Var,Rel) */
+ 0x15, 0x81, /* Logical Minimum (-127) */
+ 0x25, 0x7f, /* Logical Maximum (127) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x95, 0x01, /* Report Count (1) */
+ 0x09, 0x38, /* Usage (Wheel) */
+ 0x81, 0x06, /* Input (Data,Var,Rel) */
+ 0x05, 0x0c, /* Usage Page (Consumer Devices) */
+ 0x0a, 0x38, 0x02, /* Usage (AC Pan) */
+ 0x15, 0x81, /* Logical Minimum (-127) */
+ 0x25, 0x7f, /* Logical Maximum (127) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x95, 0x01, /* Report Count (1) */
+ 0x81, 0x06, /* Input (Data,Var,Rel) */
+ 0xc0, /* End Collection */
+ 0xc0, /* End Collection */
+};
+
/* Gaming Mouse descriptor (2) */
static const char mse_high_res_descriptor[] = {
0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
@@ -480,6 +523,7 @@ static const char hidpp_descriptor[] = {
#define MAX_RDESC_SIZE \
(sizeof(kbd_descriptor) + \
sizeof(mse_bluetooth_descriptor) + \
+ sizeof(mse5_bluetooth_descriptor) + \
sizeof(consumer_descriptor) + \
sizeof(syscontrol_descriptor) + \
sizeof(media_descriptor) + \
@@ -517,6 +561,11 @@ static void delayedwork_callback(struct work_struct *work);
static LIST_HEAD(dj_hdev_list);
static DEFINE_MUTEX(dj_hdev_list_lock);
+static bool recvr_type_is_bluetooth(enum recvr_type type)
+{
+ return type == recvr_type_bluetooth || type == recvr_type_dinovo;
+}
+
/*
* dj/HID++ receivers are really a single logical entity, but for BIOS/Windows
* compatibility they have multiple USB interfaces. On HID++ receivers we need
@@ -534,7 +583,7 @@ static struct dj_receiver_dev *dj_find_receiver_dev(struct hid_device *hdev,
* The bluetooth receiver contains a built-in hub and has separate
* USB-devices for the keyboard and mouse interfaces.
*/
- sep = (type == recvr_type_bluetooth) ? '.' : '/';
+ sep = recvr_type_is_bluetooth(type) ? '.' : '/';
/* Try to find an already-probed interface from the same device */
list_for_each_entry(djrcv_dev, &dj_hdev_list, list) {
@@ -872,6 +921,14 @@ static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
* touchpad to work we must also forward mouse input reports to the dj_hiddev
* created for the keyboard (instead of forwarding them to a second paired
* device with a device_type of REPORT_TYPE_MOUSE as we normally would).
+ *
+ * On Dinovo receivers the keyboard's touchpad and an optional paired actual
+ * mouse send separate input reports, INPUT(2) aka STD_MOUSE for the mouse
+ * and INPUT(5) aka KBD_MOUSE for the keyboard's touchpad.
+ *
+ * On MX5x00 receivers (which can also be paired with a Dinovo keyboard)
+ * INPUT(2) is used for both an optional paired actual mouse and for the
+ * keyboard's touchpad.
*/
static const u16 kbd_builtin_touchpad_ids[] = {
0xb309, /* Dinovo Edge */
@@ -898,7 +955,10 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
id = (workitem->quad_id_msb << 8) | workitem->quad_id_lsb;
for (i = 0; i < ARRAY_SIZE(kbd_builtin_touchpad_ids); i++) {
if (id == kbd_builtin_touchpad_ids[i]) {
- workitem->reports_supported |= STD_MOUSE;
+ if (djrcv_dev->type == recvr_type_dinovo)
+ workitem->reports_supported |= KBD_MOUSE;
+ else
+ workitem->reports_supported |= STD_MOUSE;
break;
}
}
@@ -1367,7 +1427,7 @@ static int logi_dj_ll_parse(struct hid_device *hid)
else if (djdev->dj_receiver_dev->type == recvr_type_27mhz)
rdcat(rdesc, &rsize, mse_27mhz_descriptor,
sizeof(mse_27mhz_descriptor));
- else if (djdev->dj_receiver_dev->type == recvr_type_bluetooth)
+ else if (recvr_type_is_bluetooth(djdev->dj_receiver_dev->type))
rdcat(rdesc, &rsize, mse_bluetooth_descriptor,
sizeof(mse_bluetooth_descriptor));
else
@@ -1375,6 +1435,13 @@ static int logi_dj_ll_parse(struct hid_device *hid)
sizeof(mse_descriptor));
}
+ if (djdev->reports_supported & KBD_MOUSE) {
+ dbg_hid("%s: sending a kbd-mouse descriptor, reports_supported: %llx\n",
+ __func__, djdev->reports_supported);
+ rdcat(rdesc, &rsize, mse5_bluetooth_descriptor,
+ sizeof(mse5_bluetooth_descriptor));
+ }
+
if (djdev->reports_supported & MULTIMEDIA) {
dbg_hid("%s: sending a multimedia report descriptor: %llx\n",
__func__, djdev->reports_supported);
@@ -1692,6 +1759,7 @@ static int logi_dj_probe(struct hid_device *hdev,
case recvr_type_mouse_only: no_dj_interfaces = 2; break;
case recvr_type_27mhz: no_dj_interfaces = 2; break;
case recvr_type_bluetooth: no_dj_interfaces = 2; break;
+ case recvr_type_dinovo: no_dj_interfaces = 2; break;
}
if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
intf = to_usb_interface(hdev->dev.parent);
@@ -1857,23 +1925,27 @@ static void logi_dj_remove(struct hid_device *hdev)
}
static const struct hid_device_id logi_dj_receivers[] = {
- {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ { /* Logitech unifying receiver (0xc52b) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER),
.driver_data = recvr_type_dj},
- {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ { /* Logitech unifying receiver (0xc532) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2),
.driver_data = recvr_type_dj},
- { /* Logitech Nano mouse only receiver */
+
+ { /* Logitech Nano mouse only receiver (0xc52f) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER),
.driver_data = recvr_type_mouse_only},
- { /* Logitech Nano (non DJ) receiver */
+ { /* Logitech Nano (non DJ) receiver (0xc534) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2),
.driver_data = recvr_type_hidpp},
+
{ /* Logitech G700(s) receiver (0xc531) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- 0xc531),
+ USB_DEVICE_ID_LOGITECH_G700_RECEIVER),
.driver_data = recvr_type_gaming_hidpp},
{ /* Logitech G602 receiver (0xc537) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
@@ -1883,17 +1955,18 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1),
.driver_data = recvr_type_gaming_hidpp},
+ { /* Logitech powerplay receiver (0xc53a) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY),
+ .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech lightspeed receiver (0xc53f) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1),
.driver_data = recvr_type_gaming_hidpp},
+
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
.driver_data = recvr_type_27mhz},
- { /* Logitech powerplay receiver (0xc53a) */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY),
- .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc517) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_S510_RECEIVER_2),
@@ -1902,22 +1975,40 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_27MHZ_MOUSE_RECEIVER),
.driver_data = recvr_type_27mhz},
- { /* Logitech MX5000 HID++ / bluetooth receiver keyboard intf. */
+
+ { /* Logitech MX5000 HID++ / bluetooth receiver keyboard intf. (0xc70e) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- 0xc70e),
+ USB_DEVICE_ID_MX5000_RECEIVER_KBD_DEV),
.driver_data = recvr_type_bluetooth},
- { /* Logitech MX5000 HID++ / bluetooth receiver mouse intf. */
+ { /* Logitech MX5000 HID++ / bluetooth receiver mouse intf. (0xc70a) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- 0xc70a),
+ USB_DEVICE_ID_MX5000_RECEIVER_MOUSE_DEV),
.driver_data = recvr_type_bluetooth},
- { /* Logitech MX5500 HID++ / bluetooth receiver keyboard intf. */
+ { /* Logitech MX5500 HID++ / bluetooth receiver keyboard intf. (0xc71b) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- 0xc71b),
+ USB_DEVICE_ID_MX5500_RECEIVER_KBD_DEV),
.driver_data = recvr_type_bluetooth},
- { /* Logitech MX5500 HID++ / bluetooth receiver mouse intf. */
+ { /* Logitech MX5500 HID++ / bluetooth receiver mouse intf. (0xc71c) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- 0xc71c),
+ USB_DEVICE_ID_MX5500_RECEIVER_MOUSE_DEV),
.driver_data = recvr_type_bluetooth},
+
+ { /* Logitech Dinovo Edge HID++ / bluetooth receiver keyboard intf. (0xc713) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_DINOVO_EDGE_RECEIVER_KBD_DEV),
+ .driver_data = recvr_type_dinovo},
+ { /* Logitech Dinovo Edge HID++ / bluetooth receiver mouse intf. (0xc714) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_DINOVO_EDGE_RECEIVER_MOUSE_DEV),
+ .driver_data = recvr_type_dinovo},
+ { /* Logitech DiNovo Mini HID++ / bluetooth receiver mouse intf. (0xc71e) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_DINOVO_MINI_RECEIVER_KBD_DEV),
+ .driver_data = recvr_type_dinovo},
+ { /* Logitech DiNovo Mini HID++ / bluetooth receiver keyboard intf. (0xc71f) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_DINOVO_MINI_RECEIVER_MOUSE_DEV),
+ .driver_data = recvr_type_dinovo},
{}
};
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index d459e2dbe647..d598094dadd0 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -261,7 +261,7 @@ static int __hidpp_send_report(struct hid_device *hdev,
return ret == fields_count ? 0 : -1;
}
-/**
+/*
* hidpp_send_message_sync() returns 0 in case of success, and something else
* in case of a failure.
* - If ' something else' is positive, that means that an error has been raised
@@ -423,7 +423,7 @@ static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp,
(report->rap.sub_id == 0x41));
}
-/**
+/*
* hidpp_prefix_name() prefixes the current given name with "Logitech ".
*/
static void hidpp_prefix_name(char **name, int name_length)
@@ -454,6 +454,7 @@ static void hidpp_prefix_name(char **name, int name_length)
* hidpp_scroll_counter_handle_scroll() - Send high- and low-resolution scroll
* events given a high-resolution wheel
* movement.
+ * @input_dev: Pointer to the input device
* @counter: a hid_scroll_counter struct describing the wheel.
* @hi_res_value: the movement of the wheel, in the mouse's high-resolution
* units.
@@ -1884,7 +1885,7 @@ struct hidpp_touchpad_fw_items {
uint8_t persistent;
};
-/**
+/*
* send a set state command to the device by reading the current items->state
* field. items is then filled with the current state.
*/
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index abd86903875f..2bb473d8c424 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -16,6 +16,7 @@
#include <linux/input/mt.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include "hid-ids.h"
@@ -54,6 +55,7 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define TRACKPAD2_USB_REPORT_ID 0x02
#define TRACKPAD2_BT_REPORT_ID 0x31
#define MOUSE_REPORT_ID 0x29
+#define MOUSE2_REPORT_ID 0x12
#define DOUBLE_REPORT_ID 0xf7
/* These definitions are not precise, but they're close enough. (Bits
* 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem
@@ -127,6 +129,9 @@ struct magicmouse_sc {
u8 size;
} touches[16];
int tracking_ids[16];
+
+ struct hid_device *hdev;
+ struct delayed_work work;
};
static int magicmouse_firm_touch(struct magicmouse_sc *msc)
@@ -195,7 +200,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
int id, x, y, size, orientation, touch_major, touch_minor, state, down;
int pressure = 0;
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
id = (tdata[6] << 2 | tdata[5] >> 6) & 0xf;
x = (tdata[1] << 28 | tdata[0] << 20) >> 20;
y = -((tdata[2] << 24 | tdata[1] << 16) >> 20);
@@ -296,7 +302,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
input_report_abs(input, ABS_MT_PRESSURE, pressure);
if (report_undeciphered) {
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
input_event(input, EV_MSC, MSC_RAW, tdata[7]);
else if (input->id.product !=
USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
@@ -380,6 +387,34 @@ static int magicmouse_raw_event(struct hid_device *hdev,
* ts = data[3] >> 6 | data[4] << 2 | data[5] << 10;
*/
break;
+ case MOUSE2_REPORT_ID:
+ /* Size is either 8 or (14 + 8 * N) */
+ if (size != 8 && (size < 14 || (size - 14) % 8 != 0))
+ return 0;
+ npoints = (size - 14) / 8;
+ if (npoints > 15) {
+ hid_warn(hdev, "invalid size value (%d) for MOUSE2_REPORT_ID\n",
+ size);
+ return 0;
+ }
+ msc->ntouches = 0;
+ for (ii = 0; ii < npoints; ii++)
+ magicmouse_emit_touch(msc, ii, data + ii * 8 + 14);
+
+ /* When emulating three-button mode, it is important
+ * to have the current touch information before
+ * generating a click event.
+ */
+ x = (int)((data[3] << 24) | (data[2] << 16)) >> 16;
+ y = (int)((data[5] << 24) | (data[4] << 16)) >> 16;
+ clicks = data[1];
+
+ /* The following bits provide a device specific timestamp. They
+ * are unused here.
+ *
+ * ts = data[11] >> 6 | data[12] << 2 | data[13] << 10;
+ */
+ break;
case DOUBLE_REPORT_ID:
/* Sometimes the trackpad sends two touch reports in one
* packet.
@@ -392,7 +427,8 @@ static int magicmouse_raw_event(struct hid_device *hdev,
return 0;
}
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
magicmouse_emit_buttons(msc, clicks & 3);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
@@ -408,6 +444,23 @@ static int magicmouse_raw_event(struct hid_device *hdev,
return 1;
}
+static int magicmouse_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+ if (msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
+ field->report->id == MOUSE2_REPORT_ID) {
+ /*
+ * magic_mouse_raw_event has done all the work. Skip hidinput.
+ *
+ * Specifically, hidinput may modify BTN_LEFT and BTN_RIGHT,
+ * breaking emulate_3button.
+ */
+ return 1;
+ }
+ return 0;
+}
+
static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
{
int error;
@@ -415,7 +468,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(EV_KEY, input->evbit);
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
__set_bit(BTN_LEFT, input->keybit);
__set_bit(BTN_RIGHT, input->keybit);
if (emulate_3button)
@@ -480,7 +534,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
* the origin at the same position, and just uses the additive
* inverse of the reported Y.
*/
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
input_set_abs_params(input, ABS_MT_POSITION_X,
MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
@@ -580,19 +635,60 @@ static int magicmouse_input_configured(struct hid_device *hdev,
return 0;
}
-
-static int magicmouse_probe(struct hid_device *hdev,
- const struct hid_device_id *id)
+static int magicmouse_enable_multitouch(struct hid_device *hdev)
{
const u8 *feature;
const u8 feature_mt[] = { 0xD7, 0x01 };
+ const u8 feature_mt_mouse2[] = { 0xF1, 0x02, 0x01 };
const u8 feature_mt_trackpad2_usb[] = { 0x02, 0x01 };
const u8 feature_mt_trackpad2_bt[] = { 0xF1, 0x02, 0x01 };
u8 *buf;
+ int ret;
+ int feature_size;
+
+ if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ if (hdev->vendor == BT_VENDOR_ID_APPLE) {
+ feature_size = sizeof(feature_mt_trackpad2_bt);
+ feature = feature_mt_trackpad2_bt;
+ } else { /* USB_VENDOR_ID_APPLE */
+ feature_size = sizeof(feature_mt_trackpad2_usb);
+ feature = feature_mt_trackpad2_usb;
+ }
+ } else if (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ feature_size = sizeof(feature_mt_mouse2);
+ feature = feature_mt_mouse2;
+ } else {
+ feature_size = sizeof(feature_mt);
+ feature = feature_mt;
+ }
+
+ buf = kmemdup(feature, feature_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, buf[0], buf, feature_size,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ kfree(buf);
+ return ret;
+}
+
+static void magicmouse_enable_mt_work(struct work_struct *work)
+{
+ struct magicmouse_sc *msc =
+ container_of(work, struct magicmouse_sc, work.work);
+ int ret;
+
+ ret = magicmouse_enable_multitouch(msc->hdev);
+ if (ret < 0)
+ hid_err(msc->hdev, "unable to request touch data (%d)\n", ret);
+}
+
+static int magicmouse_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
struct magicmouse_sc *msc;
struct hid_report *report;
int ret;
- int feature_size;
if (id->vendor == USB_VENDOR_ID_APPLE &&
id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
@@ -606,6 +702,8 @@ static int magicmouse_probe(struct hid_device *hdev,
}
msc->scroll_accel = SCROLL_ACCEL_DEFAULT;
+ msc->hdev = hdev;
+ INIT_DEFERRABLE_WORK(&msc->work, magicmouse_enable_mt_work);
msc->quirks = id->driver_data;
hid_set_drvdata(hdev, msc);
@@ -631,6 +729,9 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
MOUSE_REPORT_ID, 0);
+ else if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
+ report = hid_register_report(hdev, HID_INPUT_REPORT,
+ MOUSE2_REPORT_ID, 0);
else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
if (id->vendor == BT_VENDOR_ID_APPLE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
@@ -652,25 +753,6 @@ static int magicmouse_probe(struct hid_device *hdev,
}
report->size = 6;
- if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
- if (id->vendor == BT_VENDOR_ID_APPLE) {
- feature_size = sizeof(feature_mt_trackpad2_bt);
- feature = feature_mt_trackpad2_bt;
- } else { /* USB_VENDOR_ID_APPLE */
- feature_size = sizeof(feature_mt_trackpad2_usb);
- feature = feature_mt_trackpad2_usb;
- }
- } else {
- feature_size = sizeof(feature_mt);
- feature = feature_mt;
- }
-
- buf = kmemdup(feature, feature_size, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto err_stop_hw;
- }
-
/*
* Some devices repond with 'invalid report id' when feature
* report switching it into multitouch mode is sent to it.
@@ -679,13 +761,14 @@ static int magicmouse_probe(struct hid_device *hdev,
* but there seems to be no other way of switching the mode.
* Thus the super-ugly hacky success check below.
*/
- ret = hid_hw_raw_request(hdev, buf[0], buf, feature_size,
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
- kfree(buf);
- if (ret != -EIO && ret != feature_size) {
+ ret = magicmouse_enable_multitouch(hdev);
+ if (ret != -EIO && ret < 0) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
+ if (ret == -EIO && id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ schedule_delayed_work(&msc->work, msecs_to_jiffies(500));
+ }
return 0;
err_stop_hw:
@@ -693,9 +776,18 @@ err_stop_hw:
return ret;
}
+static void magicmouse_remove(struct hid_device *hdev)
+{
+ struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+ cancel_delayed_work_sync(&msc->work);
+ hid_hw_stop(hdev);
+}
+
static const struct hid_device_id magic_mice[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICMOUSE), .driver_data = 0 },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICMOUSE2), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICTRACKPAD), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
@@ -710,7 +802,9 @@ static struct hid_driver magicmouse_driver = {
.name = "magicmouse",
.id_table = magic_mice,
.probe = magicmouse_probe,
+ .remove = magicmouse_remove,
.raw_event = magicmouse_raw_event,
+ .event = magicmouse_event,
.input_mapping = magicmouse_input_mapping,
.input_configured = magicmouse_input_configured,
};
diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
index 1b5c63241af0..bbda231a7ce3 100644
--- a/drivers/hid/hid-picolcd_core.c
+++ b/drivers/hid/hid-picolcd_core.c
@@ -329,7 +329,6 @@ static int picolcd_raw_event(struct hid_device *hdev,
{
struct picolcd_data *data = hid_get_drvdata(hdev);
unsigned long flags;
- int ret = 0;
if (!data)
return 1;
@@ -342,9 +341,9 @@ static int picolcd_raw_event(struct hid_device *hdev,
if (report->id == REPORT_KEY_STATE) {
if (data->input_keys)
- ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
+ picolcd_raw_keypad(data, report, raw_data+1, size-1);
} else if (report->id == REPORT_IR_DATA) {
- ret = picolcd_raw_cir(data, report, raw_data+1, size-1);
+ picolcd_raw_cir(data, report, raw_data+1, size-1);
} else {
spin_lock_irqsave(&data->lock, flags);
/*
diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
index 85b685efc12f..e81b7cec2d12 100644
--- a/drivers/hid/hid-plantronics.c
+++ b/drivers/hid/hid-plantronics.c
@@ -13,6 +13,7 @@
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/jiffies.h>
#define PLT_HID_1_0_PAGE 0xffa00000
#define PLT_HID_2_0_PAGE 0xffa20000
@@ -36,6 +37,16 @@
#define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
(usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
+#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
+
+#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
+
+struct plt_drv_data {
+ unsigned long device_type;
+ unsigned long last_volume_key_ts;
+ u32 quirks;
+};
+
static int plantronics_input_mapping(struct hid_device *hdev,
struct hid_input *hi,
struct hid_field *field,
@@ -43,7 +54,8 @@ static int plantronics_input_mapping(struct hid_device *hdev,
unsigned long **bit, int *max)
{
unsigned short mapped_key;
- unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
+ struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
+ unsigned long plt_type = drv_data->device_type;
/* special case for PTT products */
if (field->application == HID_GD_JOYSTICK)
@@ -105,6 +117,30 @@ mapped:
return 1;
}
+static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
+
+ if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) {
+ unsigned long prev_ts, cur_ts;
+
+ /* Usages are filtered in plantronics_usages. */
+
+ if (!value) /* Handle key presses only. */
+ return 0;
+
+ prev_ts = drv_data->last_volume_key_ts;
+ cur_ts = jiffies;
+ if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT)
+ return 1; /* Ignore the repeated key. */
+
+ drv_data->last_volume_key_ts = cur_ts;
+ }
+
+ return 0;
+}
+
static unsigned long plantronics_device_type(struct hid_device *hdev)
{
unsigned i, col_page;
@@ -133,15 +169,24 @@ exit:
static int plantronics_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
+ struct plt_drv_data *drv_data;
int ret;
+ drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
goto err;
}
- hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev));
+ drv_data->device_type = plantronics_device_type(hdev);
+ drv_data->quirks = id->driver_data;
+ drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
+
+ hid_set_drvdata(hdev, drv_data);
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE);
@@ -153,15 +198,26 @@ err:
}
static const struct hid_device_id plantronics_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
+ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, plantronics_devices);
+static const struct hid_usage_id plantronics_usages[] = {
+ { HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID },
+ { HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID },
+ { HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR }
+};
+
static struct hid_driver plantronics_driver = {
.name = "plantronics",
.id_table = plantronics_devices,
+ .usage_table = plantronics_usages,
.input_mapping = plantronics_input_mapping,
+ .event = plantronics_event,
.probe = plantronics_probe,
};
module_hid_driver(plantronics_driver);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 1a9daf03dbfa..3dd6f15f2a67 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -445,8 +445,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
@@ -661,6 +659,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
#endif
+#if IS_ENABLED(CONFIG_HID_TMINIT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65d) },
+#endif
#if IS_ENABLED(CONFIG_HID_TIVO)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 2628bc53ed80..2e6662173a79 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -397,15 +397,14 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
if (!strncmp(name, "value", strlen("value"))) {
u32 report_id;
- int ret;
if (kstrtoint(buf, 0, &value) != 0)
return -EINVAL;
report_id = sensor_inst->fields[field_index].attribute.
report_id;
- ret = sensor_hub_set_feature(sensor_inst->hsdev, report_id,
- index, sizeof(value), &value);
+ sensor_hub_set_feature(sensor_inst->hsdev, report_id,
+ index, sizeof(value), &value);
} else
return -EINVAL;
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 3dd7d3246737..95cf88f3bafb 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -18,7 +18,6 @@
/**
* struct sensor_hub_data - Hold a instance data for a HID hub device
- * @hsdev: Stored hid instance for current hub device.
* @mutex: Mutex to serialize synchronous request.
* @lock: Spin lock to protect pending request structure.
* @dyn_callback_list: Holds callback function
@@ -34,7 +33,6 @@ struct sensor_hub_data {
spinlock_t dyn_callback_lock;
struct mfd_cell *hid_sensor_hub_client_devs;
int hid_sensor_client_cnt;
- unsigned long quirks;
int ref_cnt;
};
@@ -42,6 +40,7 @@ struct sensor_hub_data {
* struct hid_sensor_hub_callbacks_list - Stores callback list
* @list: list head.
* @usage_id: usage id for a physical device.
+ * @hsdev: Stored hid instance for current hub device.
* @usage_callback: Stores registered callback functions.
* @priv: Private data for a physical device.
*/
@@ -615,7 +614,6 @@ static int sensor_hub_probe(struct hid_device *hdev,
}
hid_set_drvdata(hdev, sd);
- sd->quirks = id->driver_data;
spin_lock_init(&sd->lock);
spin_lock_init(&sd->dyn_callback_lock);
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
new file mode 100644
index 000000000000..2e452c6e8ef4
--- /dev/null
+++ b/drivers/hid/hid-thrustmaster.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * When connected to the machine, the Thrustmaster wheels appear as
+ * a «generic» hid gamepad called "Thrustmaster FFB Wheel".
+ *
+ * When in this mode not every functionality of the wheel, like the force feedback,
+ * are available. To enable all functionalities of a Thrustmaster wheel we have to send
+ * to it a specific USB CONTROL request with a code different for each wheel.
+ *
+ * This driver tries to understand which model of Thrustmaster wheel the generic
+ * "Thrustmaster FFB Wheel" really is and then sends the appropriate control code.
+ *
+ * Copyright (c) 2020-2021 Dario Pagani <dario.pagani.146+linuxk@gmail.com>
+ * Copyright (c) 2020-2021 Kim Kuparinen <kimi.h.kuparinen@gmail.com>
+ */
+#include <linux/hid.h>
+#include <linux/usb.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+/*
+ * These interrupts are used to prevent a nasty crash when initializing the
+ * T300RS. Used in thrustmaster_interrupts().
+ */
+static const u8 setup_0[] = { 0x42, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 setup_1[] = { 0x0a, 0x04, 0x90, 0x03, 0x00, 0x00, 0x00, 0x00 };
+static const u8 setup_2[] = { 0x0a, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00 };
+static const u8 setup_3[] = { 0x0a, 0x04, 0x12, 0x10, 0x00, 0x00, 0x00, 0x00 };
+static const u8 setup_4[] = { 0x0a, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00 };
+static const u8 *const setup_arr[] = { setup_0, setup_1, setup_2, setup_3, setup_4 };
+static const unsigned int setup_arr_sizes[] = {
+ ARRAY_SIZE(setup_0),
+ ARRAY_SIZE(setup_1),
+ ARRAY_SIZE(setup_2),
+ ARRAY_SIZE(setup_3),
+ ARRAY_SIZE(setup_4)
+};
+/*
+ * This struct contains for each type of
+ * Thrustmaster wheel
+ *
+ * Note: The values are stored in the CPU
+ * endianness, the USB protocols always use
+ * little endian; the macro cpu_to_le[BIT]()
+ * must be used when preparing USB packets
+ * and vice-versa
+ */
+struct tm_wheel_info {
+ uint16_t wheel_type;
+
+ /*
+ * See when the USB control out packet is prepared...
+ * @TODO The TMX seems to require multiple control codes to switch.
+ */
+ uint16_t switch_value;
+
+ char const *const wheel_name;
+};
+
+/*
+ * Known wheels.
+ * Note: TMX does not work as it requires 2 control packets
+ */
+static const struct tm_wheel_info tm_wheels_infos[] = {
+ {0x0306, 0x0006, "Thrustmaster T150RS"},
+ {0x0206, 0x0005, "Thrustmaster T300RS"},
+ {0x0204, 0x0005, "Thrustmaster T300 Ferrari Alcantara Edition"},
+ {0x0002, 0x0002, "Thrustmaster T500RS"}
+ //{0x0407, 0x0001, "Thrustmaster TMX"}
+};
+
+static const uint8_t tm_wheels_infos_length = 4;
+
+/*
+ * This structs contains (in little endian) the response data
+ * of the wheel to the request 73
+ *
+ * A sufficient research to understand what each field does is not
+ * beign conducted yet. The position and meaning of fields are a
+ * just a very optimistic guess based on instinct....
+ */
+struct __packed tm_wheel_response
+{
+ /*
+ * Seems to be the type of packet
+ * - 0x0049 if is data.a (15 bytes)
+ * - 0x0047 if is data.b (7 bytes)
+ */
+ uint16_t type;
+
+ union {
+ struct __packed {
+ uint16_t field0;
+ uint16_t field1;
+ /*
+ * Seems to be the model code of the wheel
+ * Read table thrustmaster_wheels to values
+ */
+ uint16_t model;
+
+ uint16_t field2;
+ uint16_t field3;
+ uint16_t field4;
+ uint16_t field5;
+ } a;
+ struct __packed {
+ uint16_t field0;
+ uint16_t field1;
+ uint16_t model;
+ } b;
+ } data;
+};
+
+struct tm_wheel {
+ struct usb_device *usb_dev;
+ struct urb *urb;
+
+ struct usb_ctrlrequest *model_request;
+ struct tm_wheel_response *response;
+
+ struct usb_ctrlrequest *change_request;
+};
+
+/* The control packet to send to wheel */
+static const struct usb_ctrlrequest model_request = {
+ .bRequestType = 0xc1,
+ .bRequest = 73,
+ .wValue = 0,
+ .wIndex = 0,
+ .wLength = cpu_to_le16(0x0010)
+};
+
+static const struct usb_ctrlrequest change_request = {
+ .bRequestType = 0x41,
+ .bRequest = 83,
+ .wValue = 0, // Will be filled by the driver
+ .wIndex = 0,
+ .wLength = 0
+};
+
+/*
+ * On some setups initializing the T300RS crashes the kernel,
+ * these interrupts fix that particular issue. So far they haven't caused any
+ * adverse effects in other wheels.
+ */
+static void thrustmaster_interrupts(struct hid_device *hdev)
+{
+ int ret, trans, i, b_ep;
+ u8 *send_buf = kmalloc(256, GFP_KERNEL);
+ struct usb_host_endpoint *ep;
+ struct device *dev = &hdev->dev;
+ struct usb_interface *usbif = to_usb_interface(dev->parent);
+ struct usb_device *usbdev = interface_to_usbdev(usbif);
+
+ if (!send_buf) {
+ hid_err(hdev, "failed allocating send buffer\n");
+ return;
+ }
+
+ ep = &usbif->cur_altsetting->endpoint[1];
+ b_ep = ep->desc.bEndpointAddress;
+
+ for (i = 0; i < ARRAY_SIZE(setup_arr); ++i) {
+ memcpy(send_buf, setup_arr[i], setup_arr_sizes[i]);
+
+ ret = usb_interrupt_msg(usbdev,
+ usb_sndintpipe(usbdev, b_ep),
+ send_buf,
+ setup_arr_sizes[i],
+ &trans,
+ USB_CTRL_SET_TIMEOUT);
+
+ if (ret) {
+ hid_err(hdev, "setup data couldn't be sent\n");
+ return;
+ }
+ }
+
+ kfree(send_buf);
+}
+
+static void thrustmaster_change_handler(struct urb *urb)
+{
+ struct hid_device *hdev = urb->context;
+
+ // The wheel seems to kill himself before answering the host and therefore is violating the USB protocol...
+ if (urb->status == 0 || urb->status == -EPROTO || urb->status == -EPIPE)
+ hid_info(hdev, "Success?! The wheel should have been initialized!\n");
+ else
+ hid_warn(hdev, "URB to change wheel mode seems to have failed with error %d\n", urb->status);
+}
+
+/*
+ * Called by the USB subsystem when the wheel responses to our request
+ * to get [what it seems to be] the wheel's model.
+ *
+ * If the model id is recognized then we send an opportune USB CONTROL REQUEST
+ * to switch the wheel to its full capabilities
+ */
+static void thrustmaster_model_handler(struct urb *urb)
+{
+ struct hid_device *hdev = urb->context;
+ struct tm_wheel *tm_wheel = hid_get_drvdata(hdev);
+ uint16_t model = 0;
+ int i, ret;
+ const struct tm_wheel_info *twi = 0;
+
+ if (urb->status) {
+ hid_err(hdev, "URB to get model id failed with error %d\n", urb->status);
+ return;
+ }
+
+ if (tm_wheel->response->type == cpu_to_le16(0x49))
+ model = le16_to_cpu(tm_wheel->response->data.a.model);
+ else if (tm_wheel->response->type == cpu_to_le16(0x47))
+ model = le16_to_cpu(tm_wheel->response->data.b.model);
+ else {
+ hid_err(hdev, "Unknown packet type 0x%x, unable to proceed further with wheel init\n", tm_wheel->response->type);
+ return;
+ }
+
+ for (i = 0; i < tm_wheels_infos_length && !twi; i++)
+ if (tm_wheels_infos[i].wheel_type == model)
+ twi = tm_wheels_infos + i;
+
+ if (twi)
+ hid_info(hdev, "Wheel with model id 0x%x is a %s\n", model, twi->wheel_name);
+ else {
+ hid_err(hdev, "Unknown wheel's model id 0x%x, unable to proceed further with wheel init\n", model);
+ return;
+ }
+
+ tm_wheel->change_request->wValue = cpu_to_le16(twi->switch_value);
+ usb_fill_control_urb(
+ tm_wheel->urb,
+ tm_wheel->usb_dev,
+ usb_sndctrlpipe(tm_wheel->usb_dev, 0),
+ (char *)tm_wheel->change_request,
+ 0, 0, // We do not expect any response from the wheel
+ thrustmaster_change_handler,
+ hdev
+ );
+
+ ret = usb_submit_urb(tm_wheel->urb, GFP_ATOMIC);
+ if (ret)
+ hid_err(hdev, "Error %d while submitting the change URB. I am unable to initialize this wheel...\n", ret);
+}
+
+static void thrustmaster_remove(struct hid_device *hdev)
+{
+ struct tm_wheel *tm_wheel = hid_get_drvdata(hdev);
+
+ usb_kill_urb(tm_wheel->urb);
+
+ kfree(tm_wheel->response);
+ kfree(tm_wheel->model_request);
+ usb_free_urb(tm_wheel->urb);
+ kfree(tm_wheel);
+
+ hid_hw_stop(hdev);
+}
+
+/*
+ * Function called by HID when a hid Thrustmaster FFB wheel is connected to the host.
+ * This function starts the hid dev, tries to allocate the tm_wheel data structure and
+ * finally send an USB CONTROL REQUEST to the wheel to get [what it seems to be] its
+ * model type.
+ */
+static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret = 0;
+ struct tm_wheel *tm_wheel = 0;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed with error %d\n", ret);
+ goto error0;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (ret) {
+ hid_err(hdev, "hw start failed with error %d\n", ret);
+ goto error0;
+ }
+
+ // Now we allocate the tm_wheel
+ tm_wheel = kzalloc(sizeof(struct tm_wheel), GFP_KERNEL);
+ if (!tm_wheel) {
+ ret = -ENOMEM;
+ goto error1;
+ }
+
+ tm_wheel->urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!tm_wheel->urb) {
+ ret = -ENOMEM;
+ goto error2;
+ }
+
+ tm_wheel->model_request = kmemdup(&model_request,
+ sizeof(struct usb_ctrlrequest),
+ GFP_KERNEL);
+ if (!tm_wheel->model_request) {
+ ret = -ENOMEM;
+ goto error3;
+ }
+
+ tm_wheel->response = kzalloc(sizeof(struct tm_wheel_response), GFP_KERNEL);
+ if (!tm_wheel->response) {
+ ret = -ENOMEM;
+ goto error4;
+ }
+
+ tm_wheel->change_request = kzalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!tm_wheel->model_request) {
+ ret = -ENOMEM;
+ goto error5;
+ }
+ memcpy(tm_wheel->change_request, &change_request, sizeof(struct usb_ctrlrequest));
+
+ tm_wheel->usb_dev = interface_to_usbdev(to_usb_interface(hdev->dev.parent));
+ hid_set_drvdata(hdev, tm_wheel);
+
+ thrustmaster_interrupts(hdev);
+
+ usb_fill_control_urb(
+ tm_wheel->urb,
+ tm_wheel->usb_dev,
+ usb_rcvctrlpipe(tm_wheel->usb_dev, 0),
+ (char *)tm_wheel->model_request,
+ tm_wheel->response,
+ sizeof(struct tm_wheel_response),
+ thrustmaster_model_handler,
+ hdev
+ );
+
+ ret = usb_submit_urb(tm_wheel->urb, GFP_ATOMIC);
+ if (ret)
+ hid_err(hdev, "Error %d while submitting the URB. I am unable to initialize this wheel...\n", ret);
+
+ return ret;
+
+error5: kfree(tm_wheel->response);
+error4: kfree(tm_wheel->model_request);
+error3: usb_free_urb(tm_wheel->urb);
+error2: kfree(tm_wheel);
+error1: hid_hw_stop(hdev);
+error0:
+ return ret;
+}
+
+static const struct hid_device_id thrustmaster_devices[] = {
+ { HID_USB_DEVICE(0x044f, 0xb65d)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(hid, thrustmaster_devices);
+
+static struct hid_driver thrustmaster_driver = {
+ .name = "hid-thrustmaster",
+ .id_table = thrustmaster_devices,
+ .probe = thrustmaster_probe,
+ .remove = thrustmaster_remove,
+};
+
+module_hid_driver(thrustmaster_driver);
+
+MODULE_AUTHOR("Dario Pagani <dario.pagani.146+linuxk@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Driver to initialize some steering wheel joysticks from Thrustmaster");
+
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 6af25c38b9cc..3d67b748a3b9 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -21,7 +21,8 @@
#include <asm/unaligned.h>
/**
- * Convert a pen in-range reporting type to a string.
+ * uclogic_params_pen_inrange_to_str() - Convert a pen in-range reporting type
+ * to a string.
*
* @inrange: The in-range reporting type to convert.
*
@@ -516,7 +517,8 @@ void uclogic_params_cleanup(struct uclogic_params *params)
}
/**
- * Get a replacement report descriptor for a tablet's interface.
+ * uclogic_params_get_desc() - Get a replacement report descriptor for a
+ * tablet's interface.
*
* @params: The parameters of a tablet interface to get report
* descriptor for. Cannot be NULL.
@@ -689,7 +691,7 @@ static void uclogic_params_init_with_pen_unused(struct uclogic_params *params)
}
/**
- * uclogic_params_init() - initialize a Huion tablet interface and discover
+ * uclogic_params_huion_init() - initialize a Huion tablet interface and discover
* its parameters.
*
* @params: Parameters to fill in (to be cleaned with
diff --git a/drivers/hid/hid-uclogic-rdesc.c b/drivers/hid/hid-uclogic-rdesc.c
index bf5da6de7bba..6dd6dcd09c8b 100644
--- a/drivers/hid/hid-uclogic-rdesc.c
+++ b/drivers/hid/hid-uclogic-rdesc.c
@@ -641,7 +641,7 @@ const __u8 uclogic_rdesc_pen_v2_template_arr[] = {
const size_t uclogic_rdesc_pen_v2_template_size =
sizeof(uclogic_rdesc_pen_v2_template_arr);
-/**
+/*
* Expand to the contents of a generic buttonpad report descriptor.
*
* @_padding: Padding from the end of button bits at bit 44, until
diff --git a/drivers/hid/i2c-hid/i2c-hid-acpi.c b/drivers/hid/i2c-hid/i2c-hid-acpi.c
index bb8c00e6be78..a6f0257a26de 100644
--- a/drivers/hid/i2c-hid/i2c-hid-acpi.c
+++ b/drivers/hid/i2c-hid/i2c-hid-acpi.c
@@ -25,12 +25,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pm.h>
+#include <linux/uuid.h>
#include "i2c-hid.h"
struct i2c_hid_acpi {
struct i2chid_ops ops;
- struct i2c_client *client;
+ struct acpi_device *adev;
};
static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
@@ -42,29 +43,24 @@ static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
{ },
};
-static int i2c_hid_acpi_get_descriptor(struct i2c_client *client)
+/* HID I²C Device: 3cdff6f7-4267-4555-ad05-b30a3d8938de */
+static guid_t i2c_hid_guid =
+ GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
+ 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
+
+static int i2c_hid_acpi_get_descriptor(struct acpi_device *adev)
{
- static guid_t i2c_hid_guid =
- GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
- 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
+ acpi_handle handle = acpi_device_handle(adev);
union acpi_object *obj;
- struct acpi_device *adev;
- acpi_handle handle;
u16 hid_descriptor_address;
- handle = ACPI_HANDLE(&client->dev);
- if (!handle || acpi_bus_get_device(handle, &adev)) {
- dev_err(&client->dev, "Error could not get ACPI device\n");
- return -ENODEV;
- }
-
if (acpi_match_device_ids(adev, i2c_hid_acpi_blacklist) == 0)
return -ENODEV;
obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
ACPI_TYPE_INTEGER);
if (!obj) {
- dev_err(&client->dev, "Error _DSM call to get HID descriptor address failed\n");
+ acpi_handle_err(handle, "Error _DSM call to get HID descriptor address failed\n");
return -ENODEV;
}
@@ -76,14 +72,12 @@ static int i2c_hid_acpi_get_descriptor(struct i2c_client *client)
static void i2c_hid_acpi_shutdown_tail(struct i2chid_ops *ops)
{
- struct i2c_hid_acpi *ihid_acpi =
- container_of(ops, struct i2c_hid_acpi, ops);
- struct device *dev = &ihid_acpi->client->dev;
- acpi_device_set_power(ACPI_COMPANION(dev), ACPI_STATE_D3_COLD);
+ struct i2c_hid_acpi *ihid_acpi = container_of(ops, struct i2c_hid_acpi, ops);
+
+ acpi_device_set_power(ihid_acpi->adev, ACPI_STATE_D3_COLD);
}
-static int i2c_hid_acpi_probe(struct i2c_client *client,
- const struct i2c_device_id *dev_id)
+static int i2c_hid_acpi_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct i2c_hid_acpi *ihid_acpi;
@@ -91,21 +85,25 @@ static int i2c_hid_acpi_probe(struct i2c_client *client,
u16 hid_descriptor_address;
int ret;
+ adev = ACPI_COMPANION(dev);
+ if (!adev) {
+ dev_err(&client->dev, "Error could not get ACPI device\n");
+ return -ENODEV;
+ }
+
ihid_acpi = devm_kzalloc(&client->dev, sizeof(*ihid_acpi), GFP_KERNEL);
if (!ihid_acpi)
return -ENOMEM;
- ihid_acpi->client = client;
+ ihid_acpi->adev = adev;
ihid_acpi->ops.shutdown_tail = i2c_hid_acpi_shutdown_tail;
- ret = i2c_hid_acpi_get_descriptor(client);
+ ret = i2c_hid_acpi_get_descriptor(adev);
if (ret < 0)
return ret;
hid_descriptor_address = ret;
- adev = ACPI_COMPANION(dev);
- if (adev)
- acpi_device_fix_up_power(adev);
+ acpi_device_fix_up_power(adev);
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
device_set_wakeup_capable(dev, true);
@@ -128,10 +126,10 @@ static struct i2c_driver i2c_hid_acpi_driver = {
.name = "i2c_hid_acpi",
.pm = &i2c_hid_core_pm,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
+ .acpi_match_table = i2c_hid_acpi_match,
},
- .probe = i2c_hid_acpi_probe,
+ .probe_new = i2c_hid_acpi_probe,
.remove = i2c_hid_core_remove,
.shutdown = i2c_hid_core_shutdown,
};
diff --git a/drivers/hid/surface-hid/Kconfig b/drivers/hid/surface-hid/Kconfig
new file mode 100644
index 000000000000..7ce9b5d641eb
--- /dev/null
+++ b/drivers/hid/surface-hid/Kconfig
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0+
+menu "Surface System Aggregator Module HID support"
+ depends on SURFACE_AGGREGATOR
+ depends on INPUT
+
+config SURFACE_HID
+ tristate "HID transport driver for Surface System Aggregator Module"
+ depends on SURFACE_AGGREGATOR_REGISTRY
+ select SURFACE_HID_CORE
+ help
+ Driver to support integrated HID devices on newer Microsoft Surface
+ models.
+
+ This driver provides support for the HID transport protocol provided
+ by the Surface Aggregator Module (i.e. the embedded controller) on
+ 7th-generation Microsoft Surface devices, i.e. Surface Book 3 and
+ Surface Laptop 3. On those models, it is mainly used to connect the
+ integrated touchpad and keyboard.
+
+ Say M or Y here, if you want support for integrated HID devices, i.e.
+ integrated touchpad and keyboard, on 7th generation Microsoft Surface
+ models.
+
+config SURFACE_KBD
+ tristate "HID keyboard transport driver for Surface System Aggregator Module"
+ select SURFACE_HID_CORE
+ help
+ Driver to support HID keyboards on Surface Laptop 1 and 2 devices.
+
+ This driver provides support for the HID transport protocol provided
+ by the Surface Aggregator Module (i.e. the embedded controller) on
+ Microsoft Surface Laptops 1 and 2. It is used to connect the
+ integrated keyboard on those devices.
+
+ Say M or Y here, if you want support for the integrated keyboard on
+ Microsoft Surface Laptops 1 and 2.
+
+endmenu
+
+config SURFACE_HID_CORE
+ tristate
+ select HID
diff --git a/drivers/hid/surface-hid/Makefile b/drivers/hid/surface-hid/Makefile
new file mode 100644
index 000000000000..4ae11cf09b25
--- /dev/null
+++ b/drivers/hid/surface-hid/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile - Surface System Aggregator Module (SSAM) HID transport driver.
+#
+obj-$(CONFIG_SURFACE_HID_CORE) += surface_hid_core.o
+obj-$(CONFIG_SURFACE_HID) += surface_hid.o
+obj-$(CONFIG_SURFACE_KBD) += surface_kbd.o
diff --git a/drivers/hid/surface-hid/surface_hid.c b/drivers/hid/surface-hid/surface_hid.c
new file mode 100644
index 000000000000..3477b31611ae
--- /dev/null
+++ b/drivers/hid/surface-hid/surface_hid.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface System Aggregator Module (SSAM) HID transport driver for the
+ * generic HID interface (HID/TC=0x15 subsystem). Provides support for
+ * integrated HID devices on Surface Laptop 3, Book 3, and later.
+ *
+ * Copyright (C) 2019-2021 Blaž Hrastnik <blaz@mxxn.io>,
+ * Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/hid.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+#include "surface_hid_core.h"
+
+
+/* -- SAM interface. -------------------------------------------------------- */
+
+struct surface_hid_buffer_slice {
+ __u8 entry;
+ __le32 offset;
+ __le32 length;
+ __u8 end;
+ __u8 data[];
+} __packed;
+
+static_assert(sizeof(struct surface_hid_buffer_slice) == 10);
+
+enum surface_hid_cid {
+ SURFACE_HID_CID_OUTPUT_REPORT = 0x01,
+ SURFACE_HID_CID_GET_FEATURE_REPORT = 0x02,
+ SURFACE_HID_CID_SET_FEATURE_REPORT = 0x03,
+ SURFACE_HID_CID_GET_DESCRIPTOR = 0x04,
+};
+
+static int ssam_hid_get_descriptor(struct surface_hid_device *shid, u8 entry, u8 *buf, size_t len)
+{
+ u8 buffer[sizeof(struct surface_hid_buffer_slice) + 0x76];
+ struct surface_hid_buffer_slice *slice;
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ u32 buffer_len, offset, length;
+ int status;
+
+ /*
+ * Note: The 0x76 above has been chosen because that's what's used by
+ * the Windows driver. Together with the header, this leads to a 128
+ * byte payload in total.
+ */
+
+ buffer_len = ARRAY_SIZE(buffer) - sizeof(struct surface_hid_buffer_slice);
+
+ rqst.target_category = shid->uid.category;
+ rqst.target_id = shid->uid.target;
+ rqst.command_id = SURFACE_HID_CID_GET_DESCRIPTOR;
+ rqst.instance_id = shid->uid.instance;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = sizeof(struct surface_hid_buffer_slice);
+ rqst.payload = buffer;
+
+ rsp.capacity = ARRAY_SIZE(buffer);
+ rsp.pointer = buffer;
+
+ slice = (struct surface_hid_buffer_slice *)buffer;
+ slice->entry = entry;
+ slice->end = 0;
+
+ offset = 0;
+ length = buffer_len;
+
+ while (!slice->end && offset < len) {
+ put_unaligned_le32(offset, &slice->offset);
+ put_unaligned_le32(length, &slice->length);
+
+ rsp.length = 0;
+
+ status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp,
+ sizeof(*slice));
+ if (status)
+ return status;
+
+ offset = get_unaligned_le32(&slice->offset);
+ length = get_unaligned_le32(&slice->length);
+
+ /* Don't mess stuff up in case we receive garbage. */
+ if (length > buffer_len || offset > len)
+ return -EPROTO;
+
+ if (offset + length > len)
+ length = len - offset;
+
+ memcpy(buf + offset, &slice->data[0], length);
+
+ offset += length;
+ length = buffer_len;
+ }
+
+ if (offset != len) {
+ dev_err(shid->dev, "unexpected descriptor length: got %u, expected %zu\n",
+ offset, len);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int ssam_hid_set_raw_report(struct surface_hid_device *shid, u8 rprt_id, bool feature,
+ u8 *buf, size_t len)
+{
+ struct ssam_request rqst;
+ u8 cid;
+
+ if (feature)
+ cid = SURFACE_HID_CID_SET_FEATURE_REPORT;
+ else
+ cid = SURFACE_HID_CID_OUTPUT_REPORT;
+
+ rqst.target_category = shid->uid.category;
+ rqst.target_id = shid->uid.target;
+ rqst.instance_id = shid->uid.instance;
+ rqst.command_id = cid;
+ rqst.flags = 0;
+ rqst.length = len;
+ rqst.payload = buf;
+
+ buf[0] = rprt_id;
+
+ return ssam_retry(ssam_request_sync, shid->ctrl, &rqst, NULL);
+}
+
+static int ssam_hid_get_raw_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
+{
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+
+ rqst.target_category = shid->uid.category;
+ rqst.target_id = shid->uid.target;
+ rqst.instance_id = shid->uid.instance;
+ rqst.command_id = SURFACE_HID_CID_GET_FEATURE_REPORT;
+ rqst.flags = 0;
+ rqst.length = sizeof(rprt_id);
+ rqst.payload = &rprt_id;
+
+ rsp.capacity = len;
+ rsp.length = 0;
+ rsp.pointer = buf;
+
+ return ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(rprt_id));
+}
+
+static u32 ssam_hid_event_fn(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct surface_hid_device *shid = container_of(nf, struct surface_hid_device, notif);
+
+ if (event->command_id != 0x00)
+ return 0;
+
+ hid_input_report(shid->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 0);
+ return SSAM_NOTIF_HANDLED;
+}
+
+
+/* -- Transport driver. ----------------------------------------------------- */
+
+static int shid_output_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
+{
+ int status;
+
+ status = ssam_hid_set_raw_report(shid, rprt_id, false, buf, len);
+ return status >= 0 ? len : status;
+}
+
+static int shid_get_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
+{
+ int status;
+
+ status = ssam_hid_get_raw_report(shid, rprt_id, buf, len);
+ return status >= 0 ? len : status;
+}
+
+static int shid_set_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
+{
+ int status;
+
+ status = ssam_hid_set_raw_report(shid, rprt_id, true, buf, len);
+ return status >= 0 ? len : status;
+}
+
+
+/* -- Driver setup. --------------------------------------------------------- */
+
+static int surface_hid_probe(struct ssam_device *sdev)
+{
+ struct surface_hid_device *shid;
+
+ shid = devm_kzalloc(&sdev->dev, sizeof(*shid), GFP_KERNEL);
+ if (!shid)
+ return -ENOMEM;
+
+ shid->dev = &sdev->dev;
+ shid->ctrl = sdev->ctrl;
+ shid->uid = sdev->uid;
+
+ shid->notif.base.priority = 1;
+ shid->notif.base.fn = ssam_hid_event_fn;
+ shid->notif.event.reg = SSAM_EVENT_REGISTRY_REG;
+ shid->notif.event.id.target_category = sdev->uid.category;
+ shid->notif.event.id.instance = sdev->uid.instance;
+ shid->notif.event.mask = SSAM_EVENT_MASK_STRICT;
+ shid->notif.event.flags = 0;
+
+ shid->ops.get_descriptor = ssam_hid_get_descriptor;
+ shid->ops.output_report = shid_output_report;
+ shid->ops.get_feature_report = shid_get_feature_report;
+ shid->ops.set_feature_report = shid_set_feature_report;
+
+ ssam_device_set_drvdata(sdev, shid);
+ return surface_hid_device_add(shid);
+}
+
+static void surface_hid_remove(struct ssam_device *sdev)
+{
+ surface_hid_device_destroy(ssam_device_get_drvdata(sdev));
+}
+
+static const struct ssam_device_id surface_hid_match[] = {
+ { SSAM_SDEV(HID, 0x02, SSAM_ANY_IID, 0x00) },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, surface_hid_match);
+
+static struct ssam_device_driver surface_hid_driver = {
+ .probe = surface_hid_probe,
+ .remove = surface_hid_remove,
+ .match_table = surface_hid_match,
+ .driver = {
+ .name = "surface_hid",
+ .pm = &surface_hid_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_ssam_device_driver(surface_hid_driver);
+
+MODULE_AUTHOR("Blaž Hrastnik <blaz@mxxn.io>");
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("HID transport driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/surface-hid/surface_hid_core.c b/drivers/hid/surface-hid/surface_hid_core.c
new file mode 100644
index 000000000000..7b27ec392232
--- /dev/null
+++ b/drivers/hid/surface-hid/surface_hid_core.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Common/core components for the Surface System Aggregator Module (SSAM) HID
+ * transport driver. Provides support for integrated HID devices on Microsoft
+ * Surface models.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/hid.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+
+#include <linux/surface_aggregator/controller.h>
+
+#include "surface_hid_core.h"
+
+
+/* -- Device descriptor access. --------------------------------------------- */
+
+static int surface_hid_load_hid_descriptor(struct surface_hid_device *shid)
+{
+ int status;
+
+ status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_HID,
+ (u8 *)&shid->hid_desc, sizeof(shid->hid_desc));
+ if (status)
+ return status;
+
+ if (shid->hid_desc.desc_len != sizeof(shid->hid_desc)) {
+ dev_err(shid->dev, "unexpected HID descriptor length: got %u, expected %zu\n",
+ shid->hid_desc.desc_len, sizeof(shid->hid_desc));
+ return -EPROTO;
+ }
+
+ if (shid->hid_desc.desc_type != HID_DT_HID) {
+ dev_err(shid->dev, "unexpected HID descriptor type: got %#04x, expected %#04x\n",
+ shid->hid_desc.desc_type, HID_DT_HID);
+ return -EPROTO;
+ }
+
+ if (shid->hid_desc.num_descriptors != 1) {
+ dev_err(shid->dev, "unexpected number of descriptors: got %u, expected 1\n",
+ shid->hid_desc.num_descriptors);
+ return -EPROTO;
+ }
+
+ if (shid->hid_desc.report_desc_type != HID_DT_REPORT) {
+ dev_err(shid->dev, "unexpected report descriptor type: got %#04x, expected %#04x\n",
+ shid->hid_desc.report_desc_type, HID_DT_REPORT);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int surface_hid_load_device_attributes(struct surface_hid_device *shid)
+{
+ int status;
+
+ status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_ATTRS,
+ (u8 *)&shid->attrs, sizeof(shid->attrs));
+ if (status)
+ return status;
+
+ if (get_unaligned_le32(&shid->attrs.length) != sizeof(shid->attrs)) {
+ dev_err(shid->dev, "unexpected attribute length: got %u, expected %zu\n",
+ get_unaligned_le32(&shid->attrs.length), sizeof(shid->attrs));
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+
+/* -- Transport driver (common). -------------------------------------------- */
+
+static int surface_hid_start(struct hid_device *hid)
+{
+ struct surface_hid_device *shid = hid->driver_data;
+
+ return ssam_notifier_register(shid->ctrl, &shid->notif);
+}
+
+static void surface_hid_stop(struct hid_device *hid)
+{
+ struct surface_hid_device *shid = hid->driver_data;
+
+ /* Note: This call will log errors for us, so ignore them here. */
+ ssam_notifier_unregister(shid->ctrl, &shid->notif);
+}
+
+static int surface_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void surface_hid_close(struct hid_device *hid)
+{
+}
+
+static int surface_hid_parse(struct hid_device *hid)
+{
+ struct surface_hid_device *shid = hid->driver_data;
+ size_t len = get_unaligned_le16(&shid->hid_desc.report_desc_len);
+ u8 *buf;
+ int status;
+
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_REPORT, buf, len);
+ if (!status)
+ status = hid_parse_report(hid, buf, len);
+
+ kfree(buf);
+ return status;
+}
+
+static int surface_hid_raw_request(struct hid_device *hid, unsigned char reportnum, u8 *buf,
+ size_t len, unsigned char rtype, int reqtype)
+{
+ struct surface_hid_device *shid = hid->driver_data;
+
+ if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT)
+ return shid->ops.output_report(shid, reportnum, buf, len);
+
+ else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT)
+ return shid->ops.get_feature_report(shid, reportnum, buf, len);
+
+ else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT)
+ return shid->ops.set_feature_report(shid, reportnum, buf, len);
+
+ return -EIO;
+}
+
+static struct hid_ll_driver surface_hid_ll_driver = {
+ .start = surface_hid_start,
+ .stop = surface_hid_stop,
+ .open = surface_hid_open,
+ .close = surface_hid_close,
+ .parse = surface_hid_parse,
+ .raw_request = surface_hid_raw_request,
+};
+
+
+/* -- Common device setup. -------------------------------------------------- */
+
+int surface_hid_device_add(struct surface_hid_device *shid)
+{
+ int status;
+
+ status = surface_hid_load_hid_descriptor(shid);
+ if (status)
+ return status;
+
+ status = surface_hid_load_device_attributes(shid);
+ if (status)
+ return status;
+
+ shid->hid = hid_allocate_device();
+ if (IS_ERR(shid->hid))
+ return PTR_ERR(shid->hid);
+
+ shid->hid->dev.parent = shid->dev;
+ shid->hid->bus = BUS_HOST;
+ shid->hid->vendor = cpu_to_le16(shid->attrs.vendor);
+ shid->hid->product = cpu_to_le16(shid->attrs.product);
+ shid->hid->version = cpu_to_le16(shid->hid_desc.hid_version);
+ shid->hid->country = shid->hid_desc.country_code;
+
+ snprintf(shid->hid->name, sizeof(shid->hid->name), "Microsoft Surface %04X:%04X",
+ shid->hid->vendor, shid->hid->product);
+
+ strscpy(shid->hid->phys, dev_name(shid->dev), sizeof(shid->hid->phys));
+
+ shid->hid->driver_data = shid;
+ shid->hid->ll_driver = &surface_hid_ll_driver;
+
+ status = hid_add_device(shid->hid);
+ if (status)
+ hid_destroy_device(shid->hid);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(surface_hid_device_add);
+
+void surface_hid_device_destroy(struct surface_hid_device *shid)
+{
+ hid_destroy_device(shid->hid);
+}
+EXPORT_SYMBOL_GPL(surface_hid_device_destroy);
+
+
+/* -- PM ops. --------------------------------------------------------------- */
+
+#ifdef CONFIG_PM_SLEEP
+
+static int surface_hid_suspend(struct device *dev)
+{
+ struct surface_hid_device *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->suspend)
+ return d->hid->driver->suspend(d->hid, PMSG_SUSPEND);
+
+ return 0;
+}
+
+static int surface_hid_resume(struct device *dev)
+{
+ struct surface_hid_device *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->resume)
+ return d->hid->driver->resume(d->hid);
+
+ return 0;
+}
+
+static int surface_hid_freeze(struct device *dev)
+{
+ struct surface_hid_device *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->suspend)
+ return d->hid->driver->suspend(d->hid, PMSG_FREEZE);
+
+ return 0;
+}
+
+static int surface_hid_poweroff(struct device *dev)
+{
+ struct surface_hid_device *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->suspend)
+ return d->hid->driver->suspend(d->hid, PMSG_HIBERNATE);
+
+ return 0;
+}
+
+static int surface_hid_restore(struct device *dev)
+{
+ struct surface_hid_device *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->reset_resume)
+ return d->hid->driver->reset_resume(d->hid);
+
+ return 0;
+}
+
+const struct dev_pm_ops surface_hid_pm_ops = {
+ .freeze = surface_hid_freeze,
+ .thaw = surface_hid_resume,
+ .suspend = surface_hid_suspend,
+ .resume = surface_hid_resume,
+ .poweroff = surface_hid_poweroff,
+ .restore = surface_hid_restore,
+};
+EXPORT_SYMBOL_GPL(surface_hid_pm_ops);
+
+#else /* CONFIG_PM_SLEEP */
+
+const struct dev_pm_ops surface_hid_pm_ops = { };
+EXPORT_SYMBOL_GPL(surface_hid_pm_ops);
+
+#endif /* CONFIG_PM_SLEEP */
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("HID transport driver core for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/surface-hid/surface_hid_core.h b/drivers/hid/surface-hid/surface_hid_core.h
new file mode 100644
index 000000000000..4b1a7b57e035
--- /dev/null
+++ b/drivers/hid/surface-hid/surface_hid_core.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Common/core components for the Surface System Aggregator Module (SSAM) HID
+ * transport driver. Provides support for integrated HID devices on Microsoft
+ * Surface models.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef SURFACE_HID_CORE_H
+#define SURFACE_HID_CORE_H
+
+#include <linux/hid.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+enum surface_hid_descriptor_entry {
+ SURFACE_HID_DESC_HID = 0,
+ SURFACE_HID_DESC_REPORT = 1,
+ SURFACE_HID_DESC_ATTRS = 2,
+};
+
+struct surface_hid_descriptor {
+ __u8 desc_len; /* = 9 */
+ __u8 desc_type; /* = HID_DT_HID */
+ __le16 hid_version;
+ __u8 country_code;
+ __u8 num_descriptors; /* = 1 */
+
+ __u8 report_desc_type; /* = HID_DT_REPORT */
+ __le16 report_desc_len;
+} __packed;
+
+static_assert(sizeof(struct surface_hid_descriptor) == 9);
+
+struct surface_hid_attributes {
+ __le32 length;
+ __le16 vendor;
+ __le16 product;
+ __le16 version;
+ __u8 _unknown[22];
+} __packed;
+
+static_assert(sizeof(struct surface_hid_attributes) == 32);
+
+struct surface_hid_device;
+
+struct surface_hid_device_ops {
+ int (*get_descriptor)(struct surface_hid_device *shid, u8 entry, u8 *buf, size_t len);
+ int (*output_report)(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len);
+ int (*get_feature_report)(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len);
+ int (*set_feature_report)(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len);
+};
+
+struct surface_hid_device {
+ struct device *dev;
+ struct ssam_controller *ctrl;
+ struct ssam_device_uid uid;
+
+ struct surface_hid_descriptor hid_desc;
+ struct surface_hid_attributes attrs;
+
+ struct ssam_event_notifier notif;
+ struct hid_device *hid;
+
+ struct surface_hid_device_ops ops;
+};
+
+int surface_hid_device_add(struct surface_hid_device *shid);
+void surface_hid_device_destroy(struct surface_hid_device *shid);
+
+extern const struct dev_pm_ops surface_hid_pm_ops;
+
+#endif /* SURFACE_HID_CORE_H */
diff --git a/drivers/hid/surface-hid/surface_kbd.c b/drivers/hid/surface-hid/surface_kbd.c
new file mode 100644
index 000000000000..0635341bc517
--- /dev/null
+++ b/drivers/hid/surface-hid/surface_kbd.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface System Aggregator Module (SSAM) HID transport driver for the legacy
+ * keyboard interface (KBD/TC=0x08 subsystem). Provides support for the
+ * integrated HID keyboard on Surface Laptops 1 and 2.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/hid.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/controller.h>
+
+#include "surface_hid_core.h"
+
+
+/* -- SAM interface (KBD). -------------------------------------------------- */
+
+#define KBD_FEATURE_REPORT_SIZE 7 /* 6 + report ID */
+
+enum surface_kbd_cid {
+ SURFACE_KBD_CID_GET_DESCRIPTOR = 0x00,
+ SURFACE_KBD_CID_SET_CAPSLOCK_LED = 0x01,
+ SURFACE_KBD_CID_EVT_INPUT_GENERIC = 0x03,
+ SURFACE_KBD_CID_EVT_INPUT_HOTKEYS = 0x04,
+ SURFACE_KBD_CID_GET_FEATURE_REPORT = 0x0b,
+};
+
+static int ssam_kbd_get_descriptor(struct surface_hid_device *shid, u8 entry, u8 *buf, size_t len)
+{
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ int status;
+
+ rqst.target_category = shid->uid.category;
+ rqst.target_id = shid->uid.target;
+ rqst.command_id = SURFACE_KBD_CID_GET_DESCRIPTOR;
+ rqst.instance_id = shid->uid.instance;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = sizeof(entry);
+ rqst.payload = &entry;
+
+ rsp.capacity = len;
+ rsp.length = 0;
+ rsp.pointer = buf;
+
+ status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(entry));
+ if (status)
+ return status;
+
+ if (rsp.length != len) {
+ dev_err(shid->dev, "invalid descriptor length: got %zu, expected, %zu\n",
+ rsp.length, len);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int ssam_kbd_set_caps_led(struct surface_hid_device *shid, bool value)
+{
+ struct ssam_request rqst;
+ u8 value_u8 = value;
+
+ rqst.target_category = shid->uid.category;
+ rqst.target_id = shid->uid.target;
+ rqst.command_id = SURFACE_KBD_CID_SET_CAPSLOCK_LED;
+ rqst.instance_id = shid->uid.instance;
+ rqst.flags = 0;
+ rqst.length = sizeof(value_u8);
+ rqst.payload = &value_u8;
+
+ return ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, NULL, sizeof(value_u8));
+}
+
+static int ssam_kbd_get_feature_report(struct surface_hid_device *shid, u8 *buf, size_t len)
+{
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ u8 payload = 0;
+ int status;
+
+ rqst.target_category = shid->uid.category;
+ rqst.target_id = shid->uid.target;
+ rqst.command_id = SURFACE_KBD_CID_GET_FEATURE_REPORT;
+ rqst.instance_id = shid->uid.instance;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = sizeof(payload);
+ rqst.payload = &payload;
+
+ rsp.capacity = len;
+ rsp.length = 0;
+ rsp.pointer = buf;
+
+ status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(payload));
+ if (status)
+ return status;
+
+ if (rsp.length != len) {
+ dev_err(shid->dev, "invalid feature report length: got %zu, expected, %zu\n",
+ rsp.length, len);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static bool ssam_kbd_is_input_event(const struct ssam_event *event)
+{
+ if (event->command_id == SURFACE_KBD_CID_EVT_INPUT_GENERIC)
+ return true;
+
+ if (event->command_id == SURFACE_KBD_CID_EVT_INPUT_HOTKEYS)
+ return true;
+
+ return false;
+}
+
+static u32 ssam_kbd_event_fn(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct surface_hid_device *shid = container_of(nf, struct surface_hid_device, notif);
+
+ /*
+ * Check against device UID manually, as registry and device target
+ * category doesn't line up.
+ */
+
+ if (shid->uid.category != event->target_category)
+ return 0;
+
+ if (shid->uid.target != event->target_id)
+ return 0;
+
+ if (shid->uid.instance != event->instance_id)
+ return 0;
+
+ if (!ssam_kbd_is_input_event(event))
+ return 0;
+
+ hid_input_report(shid->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 0);
+ return SSAM_NOTIF_HANDLED;
+}
+
+
+/* -- Transport driver (KBD). ----------------------------------------------- */
+
+static int skbd_get_caps_led_value(struct hid_device *hid, u8 rprt_id, u8 *buf, size_t len)
+{
+ struct hid_field *field;
+ unsigned int offset, size;
+ int i;
+
+ /* Get LED field. */
+ field = hidinput_get_led_field(hid);
+ if (!field)
+ return -ENOENT;
+
+ /* Check if we got the correct report. */
+ if (len != hid_report_len(field->report))
+ return -ENOENT;
+
+ if (rprt_id != field->report->id)
+ return -ENOENT;
+
+ /* Get caps lock LED index. */
+ for (i = 0; i < field->report_count; i++)
+ if ((field->usage[i].hid & 0xffff) == 0x02)
+ break;
+
+ if (i == field->report_count)
+ return -ENOENT;
+
+ /* Extract value. */
+ size = field->report_size;
+ offset = field->report_offset + i * size;
+ return !!hid_field_extract(hid, buf + 1, size, offset);
+}
+
+static int skbd_output_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
+{
+ int caps_led;
+ int status;
+
+ caps_led = skbd_get_caps_led_value(shid->hid, rprt_id, buf, len);
+ if (caps_led < 0)
+ return -EIO; /* Only caps LED output reports are supported. */
+
+ status = ssam_kbd_set_caps_led(shid, caps_led);
+ if (status < 0)
+ return status;
+
+ return len;
+}
+
+static int skbd_get_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
+{
+ u8 report[KBD_FEATURE_REPORT_SIZE];
+ int status;
+
+ /*
+ * The keyboard only has a single hard-coded read-only feature report
+ * of size KBD_FEATURE_REPORT_SIZE. Try to load it and compare its
+ * report ID against the requested one.
+ */
+
+ if (len < ARRAY_SIZE(report))
+ return -ENOSPC;
+
+ status = ssam_kbd_get_feature_report(shid, report, ARRAY_SIZE(report));
+ if (status < 0)
+ return status;
+
+ if (rprt_id != report[0])
+ return -ENOENT;
+
+ memcpy(buf, report, ARRAY_SIZE(report));
+ return len;
+}
+
+static int skbd_set_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
+{
+ /* Not supported. See skbd_get_feature_report() for details. */
+ return -EIO;
+}
+
+
+/* -- Driver setup. --------------------------------------------------------- */
+
+static int surface_kbd_probe(struct platform_device *pdev)
+{
+ struct ssam_controller *ctrl;
+ struct surface_hid_device *shid;
+
+ /* Add device link to EC. */
+ ctrl = ssam_client_bind(&pdev->dev);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+ shid = devm_kzalloc(&pdev->dev, sizeof(*shid), GFP_KERNEL);
+ if (!shid)
+ return -ENOMEM;
+
+ shid->dev = &pdev->dev;
+ shid->ctrl = ctrl;
+
+ shid->uid.domain = SSAM_DOMAIN_SERIALHUB;
+ shid->uid.category = SSAM_SSH_TC_KBD;
+ shid->uid.target = 2;
+ shid->uid.instance = 0;
+ shid->uid.function = 0;
+
+ shid->notif.base.priority = 1;
+ shid->notif.base.fn = ssam_kbd_event_fn;
+ shid->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ shid->notif.event.id.target_category = shid->uid.category;
+ shid->notif.event.id.instance = shid->uid.instance;
+ shid->notif.event.mask = SSAM_EVENT_MASK_NONE;
+ shid->notif.event.flags = 0;
+
+ shid->ops.get_descriptor = ssam_kbd_get_descriptor;
+ shid->ops.output_report = skbd_output_report;
+ shid->ops.get_feature_report = skbd_get_feature_report;
+ shid->ops.set_feature_report = skbd_set_feature_report;
+
+ platform_set_drvdata(pdev, shid);
+ return surface_hid_device_add(shid);
+}
+
+static int surface_kbd_remove(struct platform_device *pdev)
+{
+ surface_hid_device_destroy(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static const struct acpi_device_id surface_kbd_match[] = {
+ { "MSHW0096" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, surface_kbd_match);
+
+static struct platform_driver surface_kbd_driver = {
+ .probe = surface_kbd_probe,
+ .remove = surface_kbd_remove,
+ .driver = {
+ .name = "surface_keyboard",
+ .acpi_match_table = surface_kbd_match,
+ .pm = &surface_hid_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(surface_kbd_driver);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("HID legacy transport driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index fddac7c72f64..ea126c50acc3 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -505,7 +505,7 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n)
HID_REQ_SET_REPORT);
}
-/**
+/*
* Play the effect with effect id @effect_id for @value times
*/
static int pidff_playback(struct input_dev *dev, int effect_id, int value)
@@ -997,7 +997,7 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
return 0;
}
-/**
+/*
* Find the implemented effect types
*/
static int pidff_find_effects(struct pidff_device *pidff,
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 45e0b1c75cb1..2fb2991dbe4c 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -887,11 +887,11 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
break;
if (i == hid->maxcollection)
- return -1;
+ return -EINVAL;
}
if (!(hiddev = kzalloc(sizeof(struct hiddev), GFP_KERNEL)))
- return -1;
+ return -ENOMEM;
init_waitqueue_head(&hiddev->wait);
INIT_LIST_HEAD(&hiddev->list);
@@ -905,7 +905,7 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
hid_err(hid, "Not able to get a minor for this device\n");
hid->hiddev = NULL;
kfree(hiddev);
- return -1;
+ return retval;
}
/*
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index d5b7a696a68c..e22434dfc9ef 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -63,7 +63,7 @@ static const unsigned char usb_kbd_keycode[256] = {
* new key is pressed or a key that was pressed is released.
* @led: URB for sending LEDs (e.g. numlock, ...)
* @newleds: data that will be sent with the @led URB representing which LEDs
- should be on
+ * should be on
* @name: Name of the keyboard. @dev's name field points to this buffer
* @phys: Physical path of the keyboard. @dev's phys field points to this
* buffer
@@ -91,7 +91,7 @@ struct usb_kbd {
unsigned char *leds;
dma_addr_t new_dma;
dma_addr_t leds_dma;
-
+
spinlock_t leds_lock;
bool led_urb_submitted;
@@ -175,15 +175,15 @@ static int usb_kbd_event(struct input_dev *dev, unsigned int type,
}
*(kbd->leds) = kbd->newleds;
-
+
kbd->led->dev = kbd->usbdev;
if (usb_submit_urb(kbd->led, GFP_ATOMIC))
pr_err("usb_submit_urb(leds) failed\n");
else
kbd->led_urb_submitted = true;
-
+
spin_unlock_irqrestore(&kbd->leds_lock, flags);
-
+
return 0;
}
@@ -205,14 +205,14 @@ static void usb_kbd_led(struct urb *urb)
}
*(kbd->leds) = kbd->newleds;
-
+
kbd->led->dev = kbd->usbdev;
if (usb_submit_urb(kbd->led, GFP_ATOMIC)){
hid_err(urb->dev, "usb_submit_urb(leds) failed\n");
kbd->led_urb_submitted = false;
}
spin_unlock_irqrestore(&kbd->leds_lock, flags);
-
+
}
static int usb_kbd_open(struct input_dev *dev)
@@ -358,9 +358,9 @@ static int usb_kbd_probe(struct usb_interface *iface,
device_set_wakeup_enable(&dev->dev, 1);
return 0;
-fail2:
+fail2:
usb_kbd_free_mem(dev, kbd);
-fail1:
+fail1:
input_free_device(input_dev);
kfree(kbd);
return error;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 8328ef155c46..57bfa0ae9836 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1495,7 +1495,7 @@ struct wacom_led *wacom_led_find(struct wacom *wacom, unsigned int group_id,
return &group->leds[id];
}
-/**
+/*
* wacom_led_next: gives the next available led with a wacom trigger.
*
* returns the next available struct wacom_led which has its default trigger
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 2d70dc4bea65..81d7d12bcf34 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1860,8 +1860,6 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
usage->type = type;
usage->code = code;
- set_bit(type, input->evbit);
-
switch (type) {
case EV_ABS:
input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
@@ -1869,13 +1867,9 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
hidinput_calc_abs_res(field, resolution_code));
break;
case EV_KEY:
- input_set_capability(input, EV_KEY, code);
- break;
case EV_MSC:
- input_set_capability(input, EV_MSC, code);
- break;
case EV_SW:
- input_set_capability(input, EV_SW, code);
+ input_set_capability(input, type, code);
break;
}
}
@@ -2187,6 +2181,18 @@ static void wacom_wac_pad_report(struct hid_device *hdev,
}
}
+static void wacom_set_barrel_switch3_usage(struct wacom_wac *wacom_wac)
+{
+ struct input_dev *input = wacom_wac->pen_input;
+ struct wacom_features *features = &wacom_wac->features;
+
+ if (!(features->quirks & WACOM_QUIRK_AESPEN) &&
+ wacom_wac->hid_data.barrelswitch &&
+ wacom_wac->hid_data.barrelswitch2 &&
+ wacom_wac->hid_data.serialhi)
+ input_set_capability(input, EV_KEY, BTN_STYLUS3);
+}
+
static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
@@ -2227,13 +2233,21 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
break;
case HID_DG_ERASER:
+ input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
+ wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
+ break;
case HID_DG_TIPSWITCH:
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
break;
case HID_DG_BARRELSWITCH:
+ wacom_wac->hid_data.barrelswitch = true;
+ wacom_set_barrel_switch3_usage(wacom_wac);
wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS, 0);
break;
case HID_DG_BARRELSWITCH2:
+ wacom_wac->hid_data.barrelswitch2 = true;
+ wacom_set_barrel_switch3_usage(wacom_wac);
wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS2, 0);
break;
case HID_DG_TOOLSERIALNUMBER:
@@ -2245,22 +2259,12 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
break;
case WACOM_HID_WD_SERIALHI:
+ wacom_wac->hid_data.serialhi = true;
+ wacom_set_barrel_switch3_usage(wacom_wac);
wacom_map_usage(input, usage, field, EV_ABS, ABS_MISC, 0);
-
- if (!(features->quirks & WACOM_QUIRK_AESPEN)) {
- set_bit(EV_KEY, input->evbit);
- input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
- input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
- input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
- input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
- input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
- if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
- input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
- input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
- }
- }
break;
case WACOM_HID_WD_FINGERWHEEL:
+ input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
break;
}
@@ -3582,11 +3586,9 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
else
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
- if (features->type == HID_GENERIC) {
- /* setup has already been done; apply otherwise-undetectible quirks */
- input_set_capability(input_dev, EV_KEY, BTN_STYLUS3);
+ if (features->type == HID_GENERIC)
+ /* setup has already been done */
return 0;
- }
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
__set_bit(BTN_TOUCH, input_dev->keybit);
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 195910dd2154..71c886245dbf 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -300,6 +300,7 @@ struct hid_data {
bool tipswitch;
bool barrelswitch;
bool barrelswitch2;
+ bool serialhi;
int x;
int y;
int pressure;
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 32cd26352f38..53e13476e831 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -28,17 +28,6 @@ config HWSPINLOCK_QCOM
If unsure, say N.
-config HWSPINLOCK_SIRF
- tristate "SIRF Hardware Spinlock device"
- depends on ARCH_SIRF || COMPILE_TEST
- help
- Say y here to support the SIRF Hardware Spinlock device, which
- provides a synchronisation mechanism for the various processors
- on the SoC.
-
- It's safe to say n here if you're not interested in SIRF hardware
- spinlock or just want a bare minimum kernel.
-
config HWSPINLOCK_SPRD
tristate "SPRD Hardware Spinlock device"
depends on ARCH_SPRD || COMPILE_TEST
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
index ed053e3f02be..1f8dd6f5814f 100644
--- a/drivers/hwspinlock/Makefile
+++ b/drivers/hwspinlock/Makefile
@@ -6,7 +6,6 @@
obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
obj-$(CONFIG_HWSPINLOCK_QCOM) += qcom_hwspinlock.o
-obj-$(CONFIG_HWSPINLOCK_SIRF) += sirf_hwspinlock.o
obj-$(CONFIG_HWSPINLOCK_SPRD) += sprd_hwspinlock.o
obj-$(CONFIG_HWSPINLOCK_STM32) += stm32_hwspinlock.o
obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o
diff --git a/drivers/hwspinlock/sirf_hwspinlock.c b/drivers/hwspinlock/sirf_hwspinlock.c
deleted file mode 100644
index a3f77120bad7..000000000000
--- a/drivers/hwspinlock/sirf_hwspinlock.c
+++ /dev/null
@@ -1,105 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SIRF hardware spinlock driver
- *
- * Copyright (c) 2015 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/hwspinlock.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include "hwspinlock_internal.h"
-
-struct sirf_hwspinlock {
- void __iomem *io_base;
- struct hwspinlock_device bank;
-};
-
-/* Number of Hardware Spinlocks*/
-#define HW_SPINLOCK_NUMBER 30
-
-/* Hardware spinlock register offsets */
-#define HW_SPINLOCK_BASE 0x404
-#define HW_SPINLOCK_OFFSET(x) (HW_SPINLOCK_BASE + 0x4 * (x))
-
-static int sirf_hwspinlock_trylock(struct hwspinlock *lock)
-{
- void __iomem *lock_addr = lock->priv;
-
- /* attempt to acquire the lock by reading value == 1 from it */
- return !!readl(lock_addr);
-}
-
-static void sirf_hwspinlock_unlock(struct hwspinlock *lock)
-{
- void __iomem *lock_addr = lock->priv;
-
- /* release the lock by writing 0 to it */
- writel(0, lock_addr);
-}
-
-static const struct hwspinlock_ops sirf_hwspinlock_ops = {
- .trylock = sirf_hwspinlock_trylock,
- .unlock = sirf_hwspinlock_unlock,
-};
-
-static int sirf_hwspinlock_probe(struct platform_device *pdev)
-{
- struct sirf_hwspinlock *hwspin;
- struct hwspinlock *hwlock;
- int idx;
-
- if (!pdev->dev.of_node)
- return -ENODEV;
-
- hwspin = devm_kzalloc(&pdev->dev,
- struct_size(hwspin, bank.lock,
- HW_SPINLOCK_NUMBER),
- GFP_KERNEL);
- if (!hwspin)
- return -ENOMEM;
-
- /* retrieve io base */
- hwspin->io_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(hwspin->io_base))
- return PTR_ERR(hwspin->io_base);
-
- for (idx = 0; idx < HW_SPINLOCK_NUMBER; idx++) {
- hwlock = &hwspin->bank.lock[idx];
- hwlock->priv = hwspin->io_base + HW_SPINLOCK_OFFSET(idx);
- }
-
- platform_set_drvdata(pdev, hwspin);
-
- return devm_hwspin_lock_register(&pdev->dev, &hwspin->bank,
- &sirf_hwspinlock_ops, 0,
- HW_SPINLOCK_NUMBER);
-}
-
-static const struct of_device_id sirf_hwpinlock_ids[] = {
- { .compatible = "sirf,hwspinlock", },
- {},
-};
-MODULE_DEVICE_TABLE(of, sirf_hwpinlock_ids);
-
-static struct platform_driver sirf_hwspinlock_driver = {
- .probe = sirf_hwspinlock_probe,
- .driver = {
- .name = "atlas7_hwspinlock",
- .of_match_table = sirf_hwpinlock_ids,
- },
-};
-
-module_platform_driver(sirf_hwspinlock_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("SIRF Hardware spinlock driver");
-MODULE_AUTHOR("Wei Chen <wei.chen@csr.com>");
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 7b44ba22cbe1..84530fd80998 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -97,15 +97,15 @@ config CORESIGHT_SOURCE_ETM3X
module will be called coresight-etm3x.
config CORESIGHT_SOURCE_ETM4X
- tristate "CoreSight Embedded Trace Macrocell 4.x driver"
+ tristate "CoreSight ETMv4.x / ETE driver"
depends on ARM64
select CORESIGHT_LINKS_AND_SINKS
select PID_IN_CONTEXTIDR
help
- This driver provides support for the ETM4.x tracer module, tracing the
- instructions that a processor is executing. This is primarily useful
- for instruction level tracing. Depending on the implemented version
- data tracing may also be available.
+ This driver provides support for the CoreSight Embedded Trace Macrocell
+ version 4.x and the Embedded Trace Extensions (ETE). Both are CPU tracer
+ modules, tracing the instructions that a processor is executing. This is
+ primarily useful for instruction level tracing.
To compile this driver as a module, choose M here: the
module will be called coresight-etm4x.
@@ -173,4 +173,18 @@ config CORESIGHT_CTI_INTEGRATION_REGS
CTI trigger connections between this and other devices.These
registers are not used in normal operation and can leave devices in
an inconsistent state.
+
+config CORESIGHT_TRBE
+ tristate "Trace Buffer Extension (TRBE) driver"
+ depends on ARM64 && CORESIGHT_SOURCE_ETM4X
+ help
+ This driver provides support for percpu Trace Buffer Extension (TRBE).
+ TRBE always needs to be used along with it's corresponding percpu ETE
+ component. ETE generates trace data which is then captured with TRBE.
+ Unlike traditional sink devices, TRBE is a CPU feature accessible via
+ system registers. But it's explicit dependency with trace unit (ETE)
+ requires it to be plugged in as a coresight sink device.
+
+ To compile this driver as a module, choose M here: the module will be
+ called coresight-trbe.
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index f20e357758d1..d60816509755 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -21,5 +21,6 @@ obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
+obj-$(CONFIG_CORESIGHT_TRBE) += coresight-trbe.o
coresight-cti-y := coresight-cti-core.o coresight-cti-platform.o \
coresight-cti-sysfs.o
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index b57bea167102..6c68d34d956e 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -23,6 +23,7 @@
#include "coresight-priv.h"
static DEFINE_MUTEX(coresight_mutex);
+static DEFINE_PER_CPU(struct coresight_device *, csdev_sink);
/**
* struct coresight_node - elements of a path, from source to sink
@@ -70,6 +71,18 @@ void coresight_remove_cti_ops(void)
}
EXPORT_SYMBOL_GPL(coresight_remove_cti_ops);
+void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev)
+{
+ per_cpu(csdev_sink, cpu) = csdev;
+}
+EXPORT_SYMBOL_GPL(coresight_set_percpu_sink);
+
+struct coresight_device *coresight_get_percpu_sink(int cpu)
+{
+ return per_cpu(csdev_sink, cpu);
+}
+EXPORT_SYMBOL_GPL(coresight_get_percpu_sink);
+
static int coresight_id_match(struct device *dev, void *data)
{
int trace_id, i_trace_id;
@@ -784,6 +797,14 @@ static int _coresight_build_path(struct coresight_device *csdev,
if (csdev == sink)
goto out;
+ if (coresight_is_percpu_source(csdev) && coresight_is_percpu_sink(sink) &&
+ sink == per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev))) {
+ if (_coresight_build_path(sink, sink, path) == 0) {
+ found = true;
+ goto out;
+ }
+ }
+
/* Not a sink - recursively explore each port found on this element */
for (i = 0; i < csdev->pdata->nr_outport; i++) {
struct coresight_device *child_dev;
@@ -999,8 +1020,12 @@ coresight_find_default_sink(struct coresight_device *csdev)
int depth = 0;
/* look for a default sink if we have not found for this device */
- if (!csdev->def_sink)
- csdev->def_sink = coresight_find_sink(csdev, &depth);
+ if (!csdev->def_sink) {
+ if (coresight_is_percpu_source(csdev))
+ csdev->def_sink = per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev));
+ if (!csdev->def_sink)
+ csdev->def_sink = coresight_find_sink(csdev, &depth);
+ }
return csdev->def_sink;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index c1bec2ad3911..6f398377fec9 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -24,7 +24,26 @@
static struct pmu etm_pmu;
static bool etm_perf_up;
-static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
+/*
+ * An ETM context for a running event includes the perf aux handle
+ * and aux_data. For ETM, the aux_data (etm_event_data), consists of
+ * the trace path and the sink configuration. The event data is accessible
+ * via perf_get_aux(handle). However, a sink could "end" a perf output
+ * handle via the IRQ handler. And if the "sink" encounters a failure
+ * to "begin" another session (e.g due to lack of space in the buffer),
+ * the handle will be cleared. Thus, the event_data may not be accessible
+ * from the handle when we get to the etm_event_stop(), which is required
+ * for stopping the trace path. The event_data is guaranteed to stay alive
+ * until "free_aux()", which cannot happen as long as the event is active on
+ * the ETM. Thus the event_data for the session must be part of the ETM context
+ * to make sure we can disable the trace path.
+ */
+struct etm_ctxt {
+ struct perf_output_handle handle;
+ struct etm_event_data *event_data;
+};
+
+static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt);
static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
/*
@@ -232,6 +251,25 @@ static void etm_free_aux(void *data)
schedule_work(&event_data->work);
}
+/*
+ * Check if two given sinks are compatible with each other,
+ * so that they can use the same sink buffers, when an event
+ * moves around.
+ */
+static bool sinks_compatible(struct coresight_device *a,
+ struct coresight_device *b)
+{
+ if (!a || !b)
+ return false;
+ /*
+ * If the sinks are of the same subtype and driven
+ * by the same driver, we can use the same buffer
+ * on these sinks.
+ */
+ return (a->subtype.sink_subtype == b->subtype.sink_subtype) &&
+ (sink_ops(a) == sink_ops(b));
+}
+
static void *etm_setup_aux(struct perf_event *event, void **pages,
int nr_pages, bool overwrite)
{
@@ -239,6 +277,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
int cpu = event->cpu;
cpumask_t *mask;
struct coresight_device *sink = NULL;
+ struct coresight_device *user_sink = NULL, *last_sink = NULL;
struct etm_event_data *event_data = NULL;
event_data = alloc_event_data(cpu);
@@ -249,7 +288,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
/* First get the selected sink from user space. */
if (event->attr.config2) {
id = (u32)event->attr.config2;
- sink = coresight_get_sink_by_id(id);
+ sink = user_sink = coresight_get_sink_by_id(id);
}
mask = &event_data->mask;
@@ -277,14 +316,33 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
}
/*
- * No sink provided - look for a default sink for one of the
- * devices. At present we only support topology where all CPUs
- * use the same sink [N:1], so only need to find one sink. The
- * coresight_build_path later will remove any CPU that does not
- * attach to the sink, or if we have not found a sink.
+ * No sink provided - look for a default sink for all the ETMs,
+ * where this event can be scheduled.
+ * We allocate the sink specific buffers only once for this
+ * event. If the ETMs have different default sink devices, we
+ * can only use a single "type" of sink as the event can carry
+ * only one sink specific buffer. Thus we have to make sure
+ * that the sinks are of the same type and driven by the same
+ * driver, as the one we allocate the buffer for. As such
+ * we choose the first sink and check if the remaining ETMs
+ * have a compatible default sink. We don't trace on a CPU
+ * if the sink is not compatible.
*/
- if (!sink)
+ if (!user_sink) {
+ /* Find the default sink for this ETM */
sink = coresight_find_default_sink(csdev);
+ if (!sink) {
+ cpumask_clear_cpu(cpu, mask);
+ continue;
+ }
+
+ /* Check if this sink compatible with the last sink */
+ if (last_sink && !sinks_compatible(last_sink, sink)) {
+ cpumask_clear_cpu(cpu, mask);
+ continue;
+ }
+ last_sink = sink;
+ }
/*
* Building a path doesn't enable it, it simply builds a
@@ -312,7 +370,12 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
goto err;
- /* Allocate the sink buffer for this session */
+ /*
+ * Allocate the sink buffer for this session. All the sinks
+ * where this event can be scheduled are ensured to be of the
+ * same type. Thus the same sink configuration is used by the
+ * sinks.
+ */
event_data->snk_config =
sink_ops(sink)->alloc_buffer(sink, event, pages,
nr_pages, overwrite);
@@ -332,13 +395,18 @@ static void etm_event_start(struct perf_event *event, int flags)
{
int cpu = smp_processor_id();
struct etm_event_data *event_data;
- struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+ struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
+ struct perf_output_handle *handle = &ctxt->handle;
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
struct list_head *path;
if (!csdev)
goto fail;
+ /* Have we messed up our tracking ? */
+ if (WARN_ON(ctxt->event_data))
+ goto fail;
+
/*
* Deal with the ring buffer API and get a handle on the
* session's information.
@@ -374,6 +442,8 @@ static void etm_event_start(struct perf_event *event, int flags)
if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
goto fail_disable_path;
+ /* Save the event_data for this ETM */
+ ctxt->event_data = event_data;
out:
return;
@@ -392,13 +462,30 @@ static void etm_event_stop(struct perf_event *event, int mode)
int cpu = smp_processor_id();
unsigned long size;
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
- struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
- struct etm_event_data *event_data = perf_get_aux(handle);
+ struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
+ struct perf_output_handle *handle = &ctxt->handle;
+ struct etm_event_data *event_data;
struct list_head *path;
+ /*
+ * If we still have access to the event_data via handle,
+ * confirm that we haven't messed up the tracking.
+ */
+ if (handle->event &&
+ WARN_ON(perf_get_aux(handle) != ctxt->event_data))
+ return;
+
+ event_data = ctxt->event_data;
+ /* Clear the event_data as this ETM is stopping the trace. */
+ ctxt->event_data = NULL;
+
if (event->hw.state == PERF_HES_STOPPED)
return;
+ /* We must have a valid event_data for a running event */
+ if (WARN_ON(!event_data))
+ return;
+
if (!csdev)
return;
@@ -416,7 +503,13 @@ static void etm_event_stop(struct perf_event *event, int mode)
/* tell the core */
event->hw.state = PERF_HES_STOPPED;
- if (mode & PERF_EF_UPDATE) {
+ /*
+ * If the handle is not bound to an event anymore
+ * (e.g, the sink driver was unable to restart the
+ * handle due to lack of buffer space), we don't
+ * have to do anything here.
+ */
+ if (handle->event && (mode & PERF_EF_UPDATE)) {
if (WARN_ON_ONCE(handle->event != event))
return;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index a5b13a7779c3..db881993c211 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -31,6 +31,7 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <asm/barrier.h>
#include <asm/sections.h>
#include <asm/sysreg.h>
#include <asm/local.h>
@@ -114,30 +115,91 @@ void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
}
}
-static void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata, struct csdev_access *csa)
+static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
{
- /* Writing 0 to TRCOSLAR unlocks the trace registers */
- etm4x_relaxed_write32(csa, 0x0, TRCOSLAR);
- drvdata->os_unlock = true;
+ u64 res = 0;
+
+ switch (offset) {
+ ETE_READ_CASES(res)
+ default :
+ pr_warn_ratelimited("ete: trying to read unsupported register @%x\n",
+ offset);
+ }
+
+ if (!_relaxed)
+ __iormb(res); /* Imitate the !relaxed I/O helpers */
+
+ return res;
+}
+
+static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
+{
+ if (!_relaxed)
+ __iowmb(); /* Imitate the !relaxed I/O helpers */
+ if (!_64bit)
+ val &= GENMASK(31, 0);
+
+ switch (offset) {
+ ETE_WRITE_CASES(val)
+ default :
+ pr_warn_ratelimited("ete: trying to write to unsupported register @%x\n",
+ offset);
+ }
+}
+
+static void etm_detect_os_lock(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ u32 oslsr = etm4x_relaxed_read32(csa, TRCOSLSR);
+
+ drvdata->os_lock_model = ETM_OSLSR_OSLM(oslsr);
+}
+
+static void etm_write_os_lock(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa, u32 val)
+{
+ val = !!val;
+
+ switch (drvdata->os_lock_model) {
+ case ETM_OSLOCK_PRESENT:
+ etm4x_relaxed_write32(csa, val, TRCOSLAR);
+ break;
+ case ETM_OSLOCK_PE:
+ write_sysreg_s(val, SYS_OSLAR_EL1);
+ break;
+ default:
+ pr_warn_once("CPU%d: Unsupported Trace OSLock model: %x\n",
+ smp_processor_id(), drvdata->os_lock_model);
+ fallthrough;
+ case ETM_OSLOCK_NI:
+ return;
+ }
isb();
}
+static inline void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ WARN_ON(drvdata->cpu != smp_processor_id());
+
+ /* Writing 0 to OS Lock unlocks the trace unit registers */
+ etm_write_os_lock(drvdata, csa, 0x0);
+ drvdata->os_unlock = true;
+}
+
static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
{
if (!WARN_ON(!drvdata->csdev))
etm4_os_unlock_csa(drvdata, &drvdata->csdev->access);
-
}
static void etm4_os_lock(struct etmv4_drvdata *drvdata)
{
if (WARN_ON(!drvdata->csdev))
return;
-
- /* Writing 0x1 to TRCOSLAR locks the trace registers */
- etm4x_relaxed_write32(&drvdata->csdev->access, 0x1, TRCOSLAR);
+ /* Writing 0x1 to OS Lock locks the trace registers */
+ etm_write_os_lock(drvdata, &drvdata->csdev->access, 0x1);
drvdata->os_unlock = false;
- isb();
}
static void etm4_cs_lock(struct etmv4_drvdata *drvdata,
@@ -371,6 +433,13 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, trcpdcr | TRCPDCR_PU, TRCPDCR);
}
+ /*
+ * ETE mandates that the TRCRSR is written to before
+ * enabling it.
+ */
+ if (etm4x_is_ete(drvdata))
+ etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR);
+
/* Enable the trace unit */
etm4x_relaxed_write32(csa, 1, TRCPRGCTLR);
@@ -654,6 +723,7 @@ static int etm4_enable(struct coresight_device *csdev,
static void etm4_disable_hw(void *info)
{
u32 control;
+ u64 trfcr;
struct etmv4_drvdata *drvdata = info;
struct etmv4_config *config = &drvdata->config;
struct coresight_device *csdev = drvdata->csdev;
@@ -677,18 +747,32 @@ static void etm4_disable_hw(void *info)
control &= ~0x1;
/*
+ * If the CPU supports v8.4 Trace filter Control,
+ * set the ETM to trace prohibited region.
+ */
+ if (drvdata->trfc) {
+ trfcr = read_sysreg_s(SYS_TRFCR_EL1);
+ write_sysreg_s(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE),
+ SYS_TRFCR_EL1);
+ isb();
+ }
+ /*
* Make sure everything completes before disabling, as recommended
* by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
* SSTATUS") of ARM IHI 0064D
*/
dsb(sy);
isb();
+ /* Trace synchronization barrier, is a nop if not supported */
+ tsb_csync();
etm4x_relaxed_write32(csa, control, TRCPRGCTLR);
/* wait for TRCSTATR.PMSTABLE to go to '1' */
if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for PM stable Trace Status\n");
+ if (drvdata->trfc)
+ write_sysreg_s(trfcr, SYS_TRFCR_EL1);
/* read the status of the single shot comparators */
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
@@ -817,13 +901,24 @@ static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata,
* ETMs implementing sysreg access must implement TRCDEVARCH.
*/
devarch = read_etm4x_sysreg_const_offset(TRCDEVARCH);
- if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH)
+ switch (devarch & ETM_DEVARCH_ID_MASK) {
+ case ETM_DEVARCH_ETMv4x_ARCH:
+ *csa = (struct csdev_access) {
+ .io_mem = false,
+ .read = etm4x_sysreg_read,
+ .write = etm4x_sysreg_write,
+ };
+ break;
+ case ETM_DEVARCH_ETE_ARCH:
+ *csa = (struct csdev_access) {
+ .io_mem = false,
+ .read = ete_sysreg_read,
+ .write = ete_sysreg_write,
+ };
+ break;
+ default:
return false;
- *csa = (struct csdev_access) {
- .io_mem = false,
- .read = etm4x_sysreg_read,
- .write = etm4x_sysreg_write,
- };
+ }
drvdata->arch = etm_devarch_to_arch(devarch);
return true;
@@ -873,7 +968,7 @@ static bool etm4_init_csdev_access(struct etmv4_drvdata *drvdata,
return false;
}
-static void cpu_enable_tracing(void)
+static void cpu_enable_tracing(struct etmv4_drvdata *drvdata)
{
u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
u64 trfcr;
@@ -881,6 +976,7 @@ static void cpu_enable_tracing(void)
if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT))
return;
+ drvdata->trfc = true;
/*
* If the CPU supports v8.4 SelfHosted Tracing, enable
* tracing at the kernel EL and EL0, forcing to use the
@@ -920,6 +1016,9 @@ static void etm4_init_arch_data(void *info)
if (!etm4_init_csdev_access(drvdata, csa))
return;
+ /* Detect the support for OS Lock before we actually use it */
+ etm_detect_os_lock(drvdata, csa);
+
/* Make sure all registers are accessible */
etm4_os_unlock_csa(drvdata, csa);
etm4_cs_unlock(drvdata, csa);
@@ -1082,7 +1181,7 @@ static void etm4_init_arch_data(void *info)
/* NUMCNTR, bits[30:28] number of counters available for tracing */
drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
etm4_cs_lock(drvdata, csa);
- cpu_enable_tracing();
+ cpu_enable_tracing(drvdata);
}
static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config)
@@ -1760,6 +1859,8 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
struct etmv4_drvdata *drvdata;
struct coresight_desc desc = { 0 };
struct etm4_init_arg init_arg = { 0 };
+ u8 major, minor;
+ char *type_name;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
@@ -1786,10 +1887,6 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
if (drvdata->cpu < 0)
return drvdata->cpu;
- desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu);
- if (!desc.name)
- return -ENOMEM;
-
init_arg.drvdata = drvdata;
init_arg.csa = &desc.access;
init_arg.pid = etm_pid;
@@ -1806,6 +1903,22 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
drvdata->skip_power_up = true;
+ major = ETM_ARCH_MAJOR_VERSION(drvdata->arch);
+ minor = ETM_ARCH_MINOR_VERSION(drvdata->arch);
+
+ if (etm4x_is_ete(drvdata)) {
+ type_name = "ete";
+ /* ETE v1 has major version == 0b101. Adjust this for logging.*/
+ major -= 4;
+ } else {
+ type_name = "etm";
+ }
+
+ desc.name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s%d", type_name, drvdata->cpu);
+ if (!desc.name)
+ return -ENOMEM;
+
etm4_init_trace_id(drvdata);
etm4_set_default(&drvdata->config);
@@ -1833,9 +1946,8 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
etmdrvdata[drvdata->cpu] = drvdata;
- dev_info(&drvdata->csdev->dev, "CPU%d: ETM v%d.%d initialized\n",
- drvdata->cpu, ETM_ARCH_MAJOR_VERSION(drvdata->arch),
- ETM_ARCH_MINOR_VERSION(drvdata->arch));
+ dev_info(&drvdata->csdev->dev, "CPU%d: %s v%d.%d initialized\n",
+ drvdata->cpu, type_name, major, minor);
if (boot_enable) {
coresight_enable(drvdata->csdev);
@@ -1979,6 +2091,7 @@ static struct amba_driver etm4x_amba_driver = {
static const struct of_device_id etm4_sysreg_match[] = {
{ .compatible = "arm,coresight-etm4x-sysreg" },
+ { .compatible = "arm,embedded-trace-extension" },
{}
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 0995a10790f4..007bad9e7ad8 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -2374,12 +2374,20 @@ static inline bool
etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
{
switch (offset) {
- ETM4x_SYSREG_LIST_CASES
+ ETM_COMMON_SYSREG_LIST_CASES
/*
- * Registers accessible via system instructions are always
- * implemented.
+ * Common registers to ETE & ETM4x accessible via system
+ * instructions are always implemented.
*/
return true;
+
+ ETM4x_ONLY_SYSREG_LIST_CASES
+ /*
+ * We only support etm4x and ete. So if the device is not
+ * ETE, it must be ETMv4x.
+ */
+ return !etm4x_is_ete(drvdata);
+
ETM4x_MMAP_LIST_CASES
/*
* Registers accessible only via memory-mapped registers
@@ -2389,8 +2397,13 @@ etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
* coresight_register() and the csdev is not initialized
* until that is done. So rely on the drvdata->base to
* detect if we have a memory mapped access.
+ * Also ETE doesn't implement memory mapped access, thus
+ * it is sufficient to check that we are using mmio.
*/
return !!drvdata->base;
+
+ ETE_ONLY_SYSREG_LIST_CASES
+ return etm4x_is_ete(drvdata);
}
return false;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 0af60571aa23..e5b79bdb9851 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -29,6 +29,7 @@
#define TRCAUXCTLR 0x018
#define TRCEVENTCTL0R 0x020
#define TRCEVENTCTL1R 0x024
+#define TRCRSR 0x028
#define TRCSTALLCTLR 0x02C
#define TRCTSCTLR 0x030
#define TRCSYNCPR 0x034
@@ -49,6 +50,7 @@
#define TRCSEQRSTEVR 0x118
#define TRCSEQSTR 0x11C
#define TRCEXTINSELR 0x120
+#define TRCEXTINSELRn(n) (0x120 + (n * 4)) /* n = 0-3 */
#define TRCCNTRLDVRn(n) (0x140 + (n * 4)) /* n = 0-3 */
#define TRCCNTCTLRn(n) (0x150 + (n * 4)) /* n = 0-3 */
#define TRCCNTVRn(n) (0x160 + (n * 4)) /* n = 0-3 */
@@ -126,6 +128,8 @@
#define TRCCIDR2 0xFF8
#define TRCCIDR3 0xFFC
+#define TRCRSR_TA BIT(12)
+
/*
* System instructions to access ETM registers.
* See ETMv4.4 spec ARM IHI0064F section 4.3.6 System instructions
@@ -160,10 +164,22 @@
#define CASE_NOP(__unused, x) \
case (x): /* fall through */
+#define ETE_ONLY_SYSREG_LIST(op, val) \
+ CASE_##op((val), TRCRSR) \
+ CASE_##op((val), TRCEXTINSELRn(1)) \
+ CASE_##op((val), TRCEXTINSELRn(2)) \
+ CASE_##op((val), TRCEXTINSELRn(3))
+
/* List of registers accessible via System instructions */
-#define ETM_SYSREG_LIST(op, val) \
- CASE_##op((val), TRCPRGCTLR) \
+#define ETM4x_ONLY_SYSREG_LIST(op, val) \
CASE_##op((val), TRCPROCSELR) \
+ CASE_##op((val), TRCVDCTLR) \
+ CASE_##op((val), TRCVDSACCTLR) \
+ CASE_##op((val), TRCVDARCCTLR) \
+ CASE_##op((val), TRCOSLAR)
+
+#define ETM_COMMON_SYSREG_LIST(op, val) \
+ CASE_##op((val), TRCPRGCTLR) \
CASE_##op((val), TRCSTATR) \
CASE_##op((val), TRCCONFIGR) \
CASE_##op((val), TRCAUXCTLR) \
@@ -180,9 +196,6 @@
CASE_##op((val), TRCVIIECTLR) \
CASE_##op((val), TRCVISSCTLR) \
CASE_##op((val), TRCVIPCSSCTLR) \
- CASE_##op((val), TRCVDCTLR) \
- CASE_##op((val), TRCVDSACCTLR) \
- CASE_##op((val), TRCVDARCCTLR) \
CASE_##op((val), TRCSEQEVRn(0)) \
CASE_##op((val), TRCSEQEVRn(1)) \
CASE_##op((val), TRCSEQEVRn(2)) \
@@ -277,7 +290,6 @@
CASE_##op((val), TRCSSPCICRn(5)) \
CASE_##op((val), TRCSSPCICRn(6)) \
CASE_##op((val), TRCSSPCICRn(7)) \
- CASE_##op((val), TRCOSLAR) \
CASE_##op((val), TRCOSLSR) \
CASE_##op((val), TRCACVRn(0)) \
CASE_##op((val), TRCACVRn(1)) \
@@ -369,12 +381,38 @@
CASE_##op((val), TRCPIDR2) \
CASE_##op((val), TRCPIDR3)
-#define ETM4x_READ_SYSREG_CASES(res) ETM_SYSREG_LIST(READ, (res))
-#define ETM4x_WRITE_SYSREG_CASES(val) ETM_SYSREG_LIST(WRITE, (val))
+#define ETM4x_READ_SYSREG_CASES(res) \
+ ETM_COMMON_SYSREG_LIST(READ, (res)) \
+ ETM4x_ONLY_SYSREG_LIST(READ, (res))
+
+#define ETM4x_WRITE_SYSREG_CASES(val) \
+ ETM_COMMON_SYSREG_LIST(WRITE, (val)) \
+ ETM4x_ONLY_SYSREG_LIST(WRITE, (val))
+
+#define ETM_COMMON_SYSREG_LIST_CASES \
+ ETM_COMMON_SYSREG_LIST(NOP, __unused)
+
+#define ETM4x_ONLY_SYSREG_LIST_CASES \
+ ETM4x_ONLY_SYSREG_LIST(NOP, __unused)
+
+#define ETM4x_SYSREG_LIST_CASES \
+ ETM_COMMON_SYSREG_LIST_CASES \
+ ETM4x_ONLY_SYSREG_LIST(NOP, __unused)
-#define ETM4x_SYSREG_LIST_CASES ETM_SYSREG_LIST(NOP, __unused)
#define ETM4x_MMAP_LIST_CASES ETM_MMAP_LIST(NOP, __unused)
+/* ETE only supports system register access */
+#define ETE_READ_CASES(res) \
+ ETM_COMMON_SYSREG_LIST(READ, (res)) \
+ ETE_ONLY_SYSREG_LIST(READ, (res))
+
+#define ETE_WRITE_CASES(val) \
+ ETM_COMMON_SYSREG_LIST(WRITE, (val)) \
+ ETE_ONLY_SYSREG_LIST(WRITE, (val))
+
+#define ETE_ONLY_SYSREG_LIST_CASES \
+ ETE_ONLY_SYSREG_LIST(NOP, __unused)
+
#define read_etm4x_sysreg_offset(offset, _64bit) \
({ \
u64 __val; \
@@ -506,6 +544,20 @@
ETM_MODE_EXCL_USER)
/*
+ * TRCOSLSR.OSLM advertises the OS Lock model.
+ * OSLM[2:0] = TRCOSLSR[4:3,0]
+ *
+ * 0b000 - Trace OS Lock is not implemented.
+ * 0b010 - Trace OS Lock is implemented.
+ * 0b100 - Trace OS Lock is not implemented, unit is controlled by PE OS Lock.
+ */
+#define ETM_OSLOCK_NI 0b000
+#define ETM_OSLOCK_PRESENT 0b010
+#define ETM_OSLOCK_PE 0b100
+
+#define ETM_OSLSR_OSLM(oslsr) ((((oslsr) & GENMASK(4, 3)) >> 2) | (oslsr & 0x1))
+
+/*
* TRCDEVARCH Bit field definitions
* Bits[31:21] - ARCHITECT = Always Arm Ltd.
* * Bits[31:28] = 0x4
@@ -541,11 +593,14 @@
((ETM_DEVARCH_MAKE_ARCHID_ARCH_VER(major)) | ETM_DEVARCH_ARCHID_ARCH_PART(0xA13))
#define ETM_DEVARCH_ARCHID_ETMv4x ETM_DEVARCH_MAKE_ARCHID(0x4)
+#define ETM_DEVARCH_ARCHID_ETE ETM_DEVARCH_MAKE_ARCHID(0x5)
#define ETM_DEVARCH_ID_MASK \
(ETM_DEVARCH_ARCHITECT_MASK | ETM_DEVARCH_ARCHID_MASK | ETM_DEVARCH_PRESENT)
#define ETM_DEVARCH_ETMv4x_ARCH \
(ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETMv4x | ETM_DEVARCH_PRESENT)
+#define ETM_DEVARCH_ETE_ARCH \
+ (ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETE | ETM_DEVARCH_PRESENT)
#define TRCSTATR_IDLE_BIT 0
#define TRCSTATR_PMSTABLE_BIT 1
@@ -635,6 +690,8 @@
#define ETM_ARCH_MINOR_VERSION(arch) ((arch) & 0xfU)
#define ETM_ARCH_V4 ETM_ARCH_VERSION(4, 0)
+#define ETM_ARCH_ETE ETM_ARCH_VERSION(5, 0)
+
/* Interpretation of resource numbers change at ETM v4.3 architecture */
#define ETM_ARCH_V4_3 ETM_ARCH_VERSION(4, 3)
@@ -862,6 +919,7 @@ struct etmv4_save_state {
* @nooverflow: Indicate if overflow prevention is supported.
* @atbtrig: If the implementation can support ATB triggers
* @lpoverride: If the implementation can support low-power state over.
+ * @trfc: If the implementation supports Arm v8.4 trace filter controls.
* @config: structure holding configuration parameters.
* @save_state: State to be preserved across power loss
* @state_needs_restore: True when there is context to restore after PM exit
@@ -897,6 +955,7 @@ struct etmv4_drvdata {
u8 s_ex_level;
u8 ns_ex_level;
u8 q_support;
+ u8 os_lock_model;
bool sticky_enable;
bool boot_enable;
bool os_unlock;
@@ -912,6 +971,7 @@ struct etmv4_drvdata {
bool nooverflow;
bool atbtrig;
bool lpoverride;
+ bool trfc;
struct etmv4_config config;
struct etmv4_save_state *save_state;
bool state_needs_restore;
@@ -940,4 +1000,9 @@ void etm4_config_trace_mode(struct etmv4_config *config);
u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit);
void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit);
+
+static inline bool etm4x_is_ete(struct etmv4_drvdata *drvdata)
+{
+ return drvdata->arch >= ETM_ARCH_ETE;
+}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index 3629b7885aca..c594f45319fc 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -90,6 +90,12 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
struct of_endpoint endpoint;
int in = 0, out = 0;
+ /*
+ * Avoid warnings in of_graph_get_next_endpoint()
+ * if the device doesn't have any graph connections
+ */
+ if (!of_graph_is_present(node))
+ return;
do {
ep = of_graph_get_next_endpoint(node, ep);
if (!ep)
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index f5f654ea2994..ff1dd2092ac5 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -232,4 +232,7 @@ coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode);
void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
struct coresight_device *ect_csdev);
+void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev);
+struct coresight_device *coresight_get_percpu_sink(int cpu);
+
#endif
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
new file mode 100644
index 000000000000..176868496879
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -0,0 +1,1157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This driver enables Trace Buffer Extension (TRBE) as a per-cpu coresight
+ * sink device could then pair with an appropriate per-cpu coresight source
+ * device (ETE) thus generating required trace data. Trace can be enabled
+ * via the perf framework.
+ *
+ * The AUX buffer handling is inspired from Arm SPE PMU driver.
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ *
+ * Author: Anshuman Khandual <anshuman.khandual@arm.com>
+ */
+#define DRVNAME "arm_trbe"
+
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
+#include <asm/barrier.h>
+#include "coresight-trbe.h"
+
+#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+
+/*
+ * A padding packet that will help the user space tools
+ * in skipping relevant sections in the captured trace
+ * data which could not be decoded. TRBE doesn't support
+ * formatting the trace data, unlike the legacy CoreSight
+ * sinks and thus we use ETE trace packets to pad the
+ * sections of the buffer.
+ */
+#define ETE_IGNORE_PACKET 0x70
+
+/*
+ * Minimum amount of meaningful trace will contain:
+ * A-Sync, Trace Info, Trace On, Address, Atom.
+ * This is about 44bytes of ETE trace. To be on
+ * the safer side, we assume 64bytes is the minimum
+ * space required for a meaningful session, before
+ * we hit a "WRAP" event.
+ */
+#define TRBE_TRACE_MIN_BUF_SIZE 64
+
+enum trbe_fault_action {
+ TRBE_FAULT_ACT_WRAP,
+ TRBE_FAULT_ACT_SPURIOUS,
+ TRBE_FAULT_ACT_FATAL,
+};
+
+struct trbe_buf {
+ /*
+ * Even though trbe_base represents vmap()
+ * mapped allocated buffer's start address,
+ * it's being as unsigned long for various
+ * arithmetic and comparision operations &
+ * also to be consistent with trbe_write &
+ * trbe_limit sibling pointers.
+ */
+ unsigned long trbe_base;
+ unsigned long trbe_limit;
+ unsigned long trbe_write;
+ int nr_pages;
+ void **pages;
+ bool snapshot;
+ struct trbe_cpudata *cpudata;
+};
+
+struct trbe_cpudata {
+ bool trbe_flag;
+ u64 trbe_align;
+ int cpu;
+ enum cs_mode mode;
+ struct trbe_buf *buf;
+ struct trbe_drvdata *drvdata;
+};
+
+struct trbe_drvdata {
+ struct trbe_cpudata __percpu *cpudata;
+ struct perf_output_handle * __percpu *handle;
+ struct hlist_node hotplug_node;
+ int irq;
+ cpumask_t supported_cpus;
+ enum cpuhp_state trbe_online;
+ struct platform_device *pdev;
+};
+
+static int trbe_alloc_node(struct perf_event *event)
+{
+ if (event->cpu == -1)
+ return NUMA_NO_NODE;
+ return cpu_to_node(event->cpu);
+}
+
+static void trbe_drain_buffer(void)
+{
+ tsb_csync();
+ dsb(nsh);
+}
+
+static void trbe_drain_and_disable_local(void)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+
+ trbe_drain_buffer();
+
+ /*
+ * Disable the TRBE without clearing LIMITPTR which
+ * might be required for fetching the buffer limits.
+ */
+ trblimitr &= ~TRBLIMITR_ENABLE;
+ write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+ isb();
+}
+
+static void trbe_reset_local(void)
+{
+ trbe_drain_and_disable_local();
+ write_sysreg_s(0, SYS_TRBLIMITR_EL1);
+ write_sysreg_s(0, SYS_TRBPTR_EL1);
+ write_sysreg_s(0, SYS_TRBBASER_EL1);
+ write_sysreg_s(0, SYS_TRBSR_EL1);
+}
+
+static void trbe_stop_and_truncate_event(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+
+ /*
+ * We cannot proceed with the buffer collection and we
+ * do not have any data for the current session. The
+ * etm_perf driver expects to close out the aux_buffer
+ * at event_stop(). So disable the TRBE here and leave
+ * the update_buffer() to return a 0 size.
+ */
+ trbe_drain_and_disable_local();
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
+}
+
+/*
+ * TRBE Buffer Management
+ *
+ * The TRBE buffer spans from the base pointer till the limit pointer. When enabled,
+ * it starts writing trace data from the write pointer onward till the limit pointer.
+ * When the write pointer reaches the address just before the limit pointer, it gets
+ * wrapped around again to the base pointer. This is called a TRBE wrap event, which
+ * generates a maintenance interrupt when operated in WRAP or FILL mode. This driver
+ * uses FILL mode, where the TRBE stops the trace collection at wrap event. The IRQ
+ * handler updates the AUX buffer and re-enables the TRBE with updated WRITE and
+ * LIMIT pointers.
+ *
+ * Wrap around with an IRQ
+ * ------ < ------ < ------- < ----- < -----
+ * | |
+ * ------ > ------ > ------- > ----- > -----
+ *
+ * +---------------+-----------------------+
+ * | | |
+ * +---------------+-----------------------+
+ * Base Pointer Write Pointer Limit Pointer
+ *
+ * The base and limit pointers always needs to be PAGE_SIZE aligned. But the write
+ * pointer can be aligned to the implementation defined TRBE trace buffer alignment
+ * as captured in trbe_cpudata->trbe_align.
+ *
+ *
+ * head tail wakeup
+ * +---------------------------------------+----- ~ ~ ------
+ * |$$$$$$$|################|$$$$$$$$$$$$$$| |
+ * +---------------------------------------+----- ~ ~ ------
+ * Base Pointer Write Pointer Limit Pointer
+ *
+ * The perf_output_handle indices (head, tail, wakeup) are monotonically increasing
+ * values which tracks all the driver writes and user reads from the perf auxiliary
+ * buffer. Generally [head..tail] is the area where the driver can write into unless
+ * the wakeup is behind the tail. Enabled TRBE buffer span needs to be adjusted and
+ * configured depending on the perf_output_handle indices, so that the driver does
+ * not override into areas in the perf auxiliary buffer which is being or yet to be
+ * consumed from the user space. The enabled TRBE buffer area is a moving subset of
+ * the allocated perf auxiliary buffer.
+ */
+static void trbe_pad_buf(struct perf_output_handle *handle, int len)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ u64 head = PERF_IDX2OFF(handle->head, buf);
+
+ memset((void *)buf->trbe_base + head, ETE_IGNORE_PACKET, len);
+ if (!buf->snapshot)
+ perf_aux_output_skip(handle, len);
+}
+
+static unsigned long trbe_snapshot_offset(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+
+ /*
+ * The ETE trace has alignment synchronization packets allowing
+ * the decoder to reset in case of an overflow or corruption.
+ * So we can use the entire buffer for the snapshot mode.
+ */
+ return buf->nr_pages * PAGE_SIZE;
+}
+
+/*
+ * TRBE Limit Calculation
+ *
+ * The following markers are used to illustrate various TRBE buffer situations.
+ *
+ * $$$$ - Data area, unconsumed captured trace data, not to be overridden
+ * #### - Free area, enabled, trace will be written
+ * %%%% - Free area, disabled, trace will not be written
+ * ==== - Free area, padded with ETE_IGNORE_PACKET, trace will be skipped
+ */
+static unsigned long __trbe_normal_offset(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ struct trbe_cpudata *cpudata = buf->cpudata;
+ const u64 bufsize = buf->nr_pages * PAGE_SIZE;
+ u64 limit = bufsize;
+ u64 head, tail, wakeup;
+
+ head = PERF_IDX2OFF(handle->head, buf);
+
+ /*
+ * head
+ * ------->|
+ * |
+ * head TRBE align tail
+ * +----|-------|---------------|-------+
+ * |$$$$|=======|###############|$$$$$$$|
+ * +----|-------|---------------|-------+
+ * trbe_base trbe_base + nr_pages
+ *
+ * Perf aux buffer output head position can be misaligned depending on
+ * various factors including user space reads. In case misaligned, head
+ * needs to be aligned before TRBE can be configured. Pad the alignment
+ * gap with ETE_IGNORE_PACKET bytes that will be ignored by user tools
+ * and skip this section thus advancing the head.
+ */
+ if (!IS_ALIGNED(head, cpudata->trbe_align)) {
+ unsigned long delta = roundup(head, cpudata->trbe_align) - head;
+
+ delta = min(delta, handle->size);
+ trbe_pad_buf(handle, delta);
+ head = PERF_IDX2OFF(handle->head, buf);
+ }
+
+ /*
+ * head = tail (size = 0)
+ * +----|-------------------------------+
+ * |$$$$|$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ |
+ * +----|-------------------------------+
+ * trbe_base trbe_base + nr_pages
+ *
+ * Perf aux buffer does not have any space for the driver to write into.
+ * Just communicate trace truncation event to the user space by marking
+ * it with PERF_AUX_FLAG_TRUNCATED.
+ */
+ if (!handle->size) {
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ return 0;
+ }
+
+ /* Compute the tail and wakeup indices now that we've aligned head */
+ tail = PERF_IDX2OFF(handle->head + handle->size, buf);
+ wakeup = PERF_IDX2OFF(handle->wakeup, buf);
+
+ /*
+ * Lets calculate the buffer area which TRBE could write into. There
+ * are three possible scenarios here. Limit needs to be aligned with
+ * PAGE_SIZE per the TRBE requirement. Always avoid clobbering the
+ * unconsumed data.
+ *
+ * 1) head < tail
+ *
+ * head tail
+ * +----|-----------------------|-------+
+ * |$$$$|#######################|$$$$$$$|
+ * +----|-----------------------|-------+
+ * trbe_base limit trbe_base + nr_pages
+ *
+ * TRBE could write into [head..tail] area. Unless the tail is right at
+ * the end of the buffer, neither an wrap around nor an IRQ is expected
+ * while being enabled.
+ *
+ * 2) head == tail
+ *
+ * head = tail (size > 0)
+ * +----|-------------------------------+
+ * |%%%%|###############################|
+ * +----|-------------------------------+
+ * trbe_base limit = trbe_base + nr_pages
+ *
+ * TRBE should just write into [head..base + nr_pages] area even though
+ * the entire buffer is empty. Reason being, when the trace reaches the
+ * end of the buffer, it will just wrap around with an IRQ giving an
+ * opportunity to reconfigure the buffer.
+ *
+ * 3) tail < head
+ *
+ * tail head
+ * +----|-----------------------|-------+
+ * |%%%%|$$$$$$$$$$$$$$$$$$$$$$$|#######|
+ * +----|-----------------------|-------+
+ * trbe_base limit = trbe_base + nr_pages
+ *
+ * TRBE should just write into [head..base + nr_pages] area even though
+ * the [trbe_base..tail] is also empty. Reason being, when the trace
+ * reaches the end of the buffer, it will just wrap around with an IRQ
+ * giving an opportunity to reconfigure the buffer.
+ */
+ if (head < tail)
+ limit = round_down(tail, PAGE_SIZE);
+
+ /*
+ * Wakeup may be arbitrarily far into the future. If it's not in the
+ * current generation, either we'll wrap before hitting it, or it's
+ * in the past and has been handled already.
+ *
+ * If there's a wakeup before we wrap, arrange to be woken up by the
+ * page boundary following it. Keep the tail boundary if that's lower.
+ *
+ * head wakeup tail
+ * +----|---------------|-------|-------+
+ * |$$$$|###############|%%%%%%%|$$$$$$$|
+ * +----|---------------|-------|-------+
+ * trbe_base limit trbe_base + nr_pages
+ */
+ if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
+ limit = min(limit, round_up(wakeup, PAGE_SIZE));
+
+ /*
+ * There are two situation when this can happen i.e limit is before
+ * the head and hence TRBE cannot be configured.
+ *
+ * 1) head < tail (aligned down with PAGE_SIZE) and also they are both
+ * within the same PAGE size range.
+ *
+ * PAGE_SIZE
+ * |----------------------|
+ *
+ * limit head tail
+ * +------------|------|--------|-------+
+ * |$$$$$$$$$$$$$$$$$$$|========|$$$$$$$|
+ * +------------|------|--------|-------+
+ * trbe_base trbe_base + nr_pages
+ *
+ * 2) head < wakeup (aligned up with PAGE_SIZE) < tail and also both
+ * head and wakeup are within same PAGE size range.
+ *
+ * PAGE_SIZE
+ * |----------------------|
+ *
+ * limit head wakeup tail
+ * +----|------|-------|--------|-------+
+ * |$$$$$$$$$$$|=======|========|$$$$$$$|
+ * +----|------|-------|--------|-------+
+ * trbe_base trbe_base + nr_pages
+ */
+ if (limit > head)
+ return limit;
+
+ trbe_pad_buf(handle, handle->size);
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ return 0;
+}
+
+static unsigned long trbe_normal_offset(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = perf_get_aux(handle);
+ u64 limit = __trbe_normal_offset(handle);
+ u64 head = PERF_IDX2OFF(handle->head, buf);
+
+ /*
+ * If the head is too close to the limit and we don't
+ * have space for a meaningful run, we rather pad it
+ * and start fresh.
+ */
+ if (limit && (limit - head < TRBE_TRACE_MIN_BUF_SIZE)) {
+ trbe_pad_buf(handle, limit - head);
+ limit = __trbe_normal_offset(handle);
+ }
+ return limit;
+}
+
+static unsigned long compute_trbe_buffer_limit(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ unsigned long offset;
+
+ if (buf->snapshot)
+ offset = trbe_snapshot_offset(handle);
+ else
+ offset = trbe_normal_offset(handle);
+ return buf->trbe_base + offset;
+}
+
+static void clr_trbe_status(void)
+{
+ u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
+
+ WARN_ON(is_trbe_enabled());
+ trbsr &= ~TRBSR_IRQ;
+ trbsr &= ~TRBSR_TRG;
+ trbsr &= ~TRBSR_WRAP;
+ trbsr &= ~(TRBSR_EC_MASK << TRBSR_EC_SHIFT);
+ trbsr &= ~(TRBSR_BSC_MASK << TRBSR_BSC_SHIFT);
+ trbsr &= ~TRBSR_STOP;
+ write_sysreg_s(trbsr, SYS_TRBSR_EL1);
+}
+
+static void set_trbe_limit_pointer_enabled(unsigned long addr)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+
+ WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_LIMIT_SHIFT)));
+ WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
+
+ trblimitr &= ~TRBLIMITR_NVM;
+ trblimitr &= ~(TRBLIMITR_FILL_MODE_MASK << TRBLIMITR_FILL_MODE_SHIFT);
+ trblimitr &= ~(TRBLIMITR_TRIG_MODE_MASK << TRBLIMITR_TRIG_MODE_SHIFT);
+ trblimitr &= ~(TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT);
+
+ /*
+ * Fill trace buffer mode is used here while configuring the
+ * TRBE for trace capture. In this particular mode, the trace
+ * collection is stopped and a maintenance interrupt is raised
+ * when the current write pointer wraps. This pause in trace
+ * collection gives the software an opportunity to capture the
+ * trace data in the interrupt handler, before reconfiguring
+ * the TRBE.
+ */
+ trblimitr |= (TRBE_FILL_MODE_FILL & TRBLIMITR_FILL_MODE_MASK) << TRBLIMITR_FILL_MODE_SHIFT;
+
+ /*
+ * Trigger mode is not used here while configuring the TRBE for
+ * the trace capture. Hence just keep this in the ignore mode.
+ */
+ trblimitr |= (TRBE_TRIG_MODE_IGNORE & TRBLIMITR_TRIG_MODE_MASK) <<
+ TRBLIMITR_TRIG_MODE_SHIFT;
+ trblimitr |= (addr & PAGE_MASK);
+
+ trblimitr |= TRBLIMITR_ENABLE;
+ write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+
+ /* Synchronize the TRBE enable event */
+ isb();
+}
+
+static void trbe_enable_hw(struct trbe_buf *buf)
+{
+ WARN_ON(buf->trbe_write < buf->trbe_base);
+ WARN_ON(buf->trbe_write >= buf->trbe_limit);
+ set_trbe_disabled();
+ isb();
+ clr_trbe_status();
+ set_trbe_base_pointer(buf->trbe_base);
+ set_trbe_write_pointer(buf->trbe_write);
+
+ /*
+ * Synchronize all the register updates
+ * till now before enabling the TRBE.
+ */
+ isb();
+ set_trbe_limit_pointer_enabled(buf->trbe_limit);
+}
+
+static enum trbe_fault_action trbe_get_fault_act(u64 trbsr)
+{
+ int ec = get_trbe_ec(trbsr);
+ int bsc = get_trbe_bsc(trbsr);
+
+ WARN_ON(is_trbe_running(trbsr));
+ if (is_trbe_trg(trbsr) || is_trbe_abort(trbsr))
+ return TRBE_FAULT_ACT_FATAL;
+
+ if ((ec == TRBE_EC_STAGE1_ABORT) || (ec == TRBE_EC_STAGE2_ABORT))
+ return TRBE_FAULT_ACT_FATAL;
+
+ if (is_trbe_wrap(trbsr) && (ec == TRBE_EC_OTHERS) && (bsc == TRBE_BSC_FILLED)) {
+ if (get_trbe_write_pointer() == get_trbe_base_pointer())
+ return TRBE_FAULT_ACT_WRAP;
+ }
+ return TRBE_FAULT_ACT_SPURIOUS;
+}
+
+static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
+ struct perf_event *event, void **pages,
+ int nr_pages, bool snapshot)
+{
+ struct trbe_buf *buf;
+ struct page **pglist;
+ int i;
+
+ /*
+ * TRBE LIMIT and TRBE WRITE pointers must be page aligned. But with
+ * just a single page, there would not be any room left while writing
+ * into a partially filled TRBE buffer after the page size alignment.
+ * Hence restrict the minimum buffer size as two pages.
+ */
+ if (nr_pages < 2)
+ return NULL;
+
+ buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event));
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
+ if (!pglist) {
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < nr_pages; i++)
+ pglist[i] = virt_to_page(pages[i]);
+
+ buf->trbe_base = (unsigned long)vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!buf->trbe_base) {
+ kfree(pglist);
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
+ buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
+ buf->trbe_write = buf->trbe_base;
+ buf->snapshot = snapshot;
+ buf->nr_pages = nr_pages;
+ buf->pages = pages;
+ kfree(pglist);
+ return buf;
+}
+
+static void arm_trbe_free_buffer(void *config)
+{
+ struct trbe_buf *buf = config;
+
+ vunmap((void *)buf->trbe_base);
+ kfree(buf);
+}
+
+static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *config)
+{
+ struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
+ struct trbe_buf *buf = config;
+ enum trbe_fault_action act;
+ unsigned long size, offset;
+ unsigned long write, base, status;
+ unsigned long flags;
+
+ WARN_ON(buf->cpudata != cpudata);
+ WARN_ON(cpudata->cpu != smp_processor_id());
+ WARN_ON(cpudata->drvdata != drvdata);
+ if (cpudata->mode != CS_MODE_PERF)
+ return 0;
+
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
+
+ /*
+ * We are about to disable the TRBE. And this could in turn
+ * fill up the buffer triggering, an IRQ. This could be consumed
+ * by the PE asynchronously, causing a race here against
+ * the IRQ handler in closing out the handle. So, let us
+ * make sure the IRQ can't trigger while we are collecting
+ * the buffer. We also make sure that a WRAP event is handled
+ * accordingly.
+ */
+ local_irq_save(flags);
+
+ /*
+ * If the TRBE was disabled due to lack of space in the AUX buffer or a
+ * spurious fault, the driver leaves it disabled, truncating the buffer.
+ * Since the etm_perf driver expects to close out the AUX buffer, the
+ * driver skips it. Thus, just pass in 0 size here to indicate that the
+ * buffer was truncated.
+ */
+ if (!is_trbe_enabled()) {
+ size = 0;
+ goto done;
+ }
+ /*
+ * perf handle structure needs to be shared with the TRBE IRQ handler for
+ * capturing trace data and restarting the handle. There is a probability
+ * of an undefined reference based crash when etm event is being stopped
+ * while a TRBE IRQ also getting processed. This happens due the release
+ * of perf handle via perf_aux_output_end() in etm_event_stop(). Stopping
+ * the TRBE here will ensure that no IRQ could be generated when the perf
+ * handle gets freed in etm_event_stop().
+ */
+ trbe_drain_and_disable_local();
+ write = get_trbe_write_pointer();
+ base = get_trbe_base_pointer();
+
+ /* Check if there is a pending interrupt and handle it here */
+ status = read_sysreg_s(SYS_TRBSR_EL1);
+ if (is_trbe_irq(status)) {
+
+ /*
+ * Now that we are handling the IRQ here, clear the IRQ
+ * from the status, to let the irq handler know that it
+ * is taken care of.
+ */
+ clr_trbe_irq();
+ isb();
+
+ act = trbe_get_fault_act(status);
+ /*
+ * If this was not due to a WRAP event, we have some
+ * errors and as such buffer is empty.
+ */
+ if (act != TRBE_FAULT_ACT_WRAP) {
+ size = 0;
+ goto done;
+ }
+
+ /*
+ * Otherwise, the buffer is full and the write pointer
+ * has reached base. Adjust this back to the Limit pointer
+ * for correct size. Also, mark the buffer truncated.
+ */
+ write = get_trbe_limit_pointer();
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ }
+
+ offset = write - base;
+ if (WARN_ON_ONCE(offset < PERF_IDX2OFF(handle->head, buf)))
+ size = 0;
+ else
+ size = offset - PERF_IDX2OFF(handle->head, buf);
+
+done:
+ local_irq_restore(flags);
+
+ if (buf->snapshot)
+ handle->head += size;
+ return size;
+}
+
+static int arm_trbe_enable(struct coresight_device *csdev, u32 mode, void *data)
+{
+ struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
+ struct perf_output_handle *handle = data;
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+
+ WARN_ON(cpudata->cpu != smp_processor_id());
+ WARN_ON(cpudata->drvdata != drvdata);
+ if (mode != CS_MODE_PERF)
+ return -EINVAL;
+
+ *this_cpu_ptr(drvdata->handle) = handle;
+ cpudata->buf = buf;
+ cpudata->mode = mode;
+ buf->cpudata = cpudata;
+ buf->trbe_limit = compute_trbe_buffer_limit(handle);
+ buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
+ if (buf->trbe_limit == buf->trbe_base) {
+ trbe_stop_and_truncate_event(handle);
+ return 0;
+ }
+ trbe_enable_hw(buf);
+ return 0;
+}
+
+static int arm_trbe_disable(struct coresight_device *csdev)
+{
+ struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
+ struct trbe_buf *buf = cpudata->buf;
+
+ WARN_ON(buf->cpudata != cpudata);
+ WARN_ON(cpudata->cpu != smp_processor_id());
+ WARN_ON(cpudata->drvdata != drvdata);
+ if (cpudata->mode != CS_MODE_PERF)
+ return -EINVAL;
+
+ trbe_drain_and_disable_local();
+ buf->cpudata = NULL;
+ cpudata->buf = NULL;
+ cpudata->mode = CS_MODE_DISABLED;
+ return 0;
+}
+
+static void trbe_handle_spurious(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+
+ buf->trbe_limit = compute_trbe_buffer_limit(handle);
+ buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
+ if (buf->trbe_limit == buf->trbe_base) {
+ trbe_drain_and_disable_local();
+ return;
+ }
+ trbe_enable_hw(buf);
+}
+
+static void trbe_handle_overflow(struct perf_output_handle *handle)
+{
+ struct perf_event *event = handle->event;
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ unsigned long offset, size;
+ struct etm_event_data *event_data;
+
+ offset = get_trbe_limit_pointer() - get_trbe_base_pointer();
+ size = offset - PERF_IDX2OFF(handle->head, buf);
+ if (buf->snapshot)
+ handle->head += size;
+
+ /*
+ * Mark the buffer as truncated, as we have stopped the trace
+ * collection upon the WRAP event, without stopping the source.
+ */
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW |
+ PERF_AUX_FLAG_TRUNCATED);
+ perf_aux_output_end(handle, size);
+ event_data = perf_aux_output_begin(handle, event);
+ if (!event_data) {
+ /*
+ * We are unable to restart the trace collection,
+ * thus leave the TRBE disabled. The etm-perf driver
+ * is able to detect this with a disconnected handle
+ * (handle->event = NULL).
+ */
+ trbe_drain_and_disable_local();
+ *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
+ return;
+ }
+ buf->trbe_limit = compute_trbe_buffer_limit(handle);
+ buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
+ if (buf->trbe_limit == buf->trbe_base) {
+ trbe_stop_and_truncate_event(handle);
+ return;
+ }
+ *this_cpu_ptr(buf->cpudata->drvdata->handle) = handle;
+ trbe_enable_hw(buf);
+}
+
+static bool is_perf_trbe(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ struct trbe_cpudata *cpudata = buf->cpudata;
+ struct trbe_drvdata *drvdata = cpudata->drvdata;
+ int cpu = smp_processor_id();
+
+ WARN_ON(buf->trbe_base != get_trbe_base_pointer());
+ WARN_ON(buf->trbe_limit != get_trbe_limit_pointer());
+
+ if (cpudata->mode != CS_MODE_PERF)
+ return false;
+
+ if (cpudata->cpu != cpu)
+ return false;
+
+ if (!cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ return false;
+
+ return true;
+}
+
+static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
+{
+ struct perf_output_handle **handle_ptr = dev;
+ struct perf_output_handle *handle = *handle_ptr;
+ enum trbe_fault_action act;
+ u64 status;
+
+ /*
+ * Ensure the trace is visible to the CPUs and
+ * any external aborts have been resolved.
+ */
+ trbe_drain_and_disable_local();
+
+ status = read_sysreg_s(SYS_TRBSR_EL1);
+ /*
+ * If the pending IRQ was handled by update_buffer callback
+ * we have nothing to do here.
+ */
+ if (!is_trbe_irq(status))
+ return IRQ_NONE;
+
+ clr_trbe_irq();
+ isb();
+
+ if (WARN_ON_ONCE(!handle) || !perf_get_aux(handle))
+ return IRQ_NONE;
+
+ if (!is_perf_trbe(handle))
+ return IRQ_NONE;
+
+ /*
+ * Ensure perf callbacks have completed, which may disable
+ * the trace buffer in response to a TRUNCATION flag.
+ */
+ irq_work_run();
+
+ act = trbe_get_fault_act(status);
+ switch (act) {
+ case TRBE_FAULT_ACT_WRAP:
+ trbe_handle_overflow(handle);
+ break;
+ case TRBE_FAULT_ACT_SPURIOUS:
+ trbe_handle_spurious(handle);
+ break;
+ case TRBE_FAULT_ACT_FATAL:
+ trbe_stop_and_truncate_event(handle);
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
+static const struct coresight_ops_sink arm_trbe_sink_ops = {
+ .enable = arm_trbe_enable,
+ .disable = arm_trbe_disable,
+ .alloc_buffer = arm_trbe_alloc_buffer,
+ .free_buffer = arm_trbe_free_buffer,
+ .update_buffer = arm_trbe_update_buffer,
+};
+
+static const struct coresight_ops arm_trbe_cs_ops = {
+ .sink_ops = &arm_trbe_sink_ops,
+};
+
+static ssize_t align_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%llx\n", cpudata->trbe_align);
+}
+static DEVICE_ATTR_RO(align);
+
+static ssize_t flag_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", cpudata->trbe_flag);
+}
+static DEVICE_ATTR_RO(flag);
+
+static struct attribute *arm_trbe_attrs[] = {
+ &dev_attr_align.attr,
+ &dev_attr_flag.attr,
+ NULL,
+};
+
+static const struct attribute_group arm_trbe_group = {
+ .attrs = arm_trbe_attrs,
+};
+
+static const struct attribute_group *arm_trbe_groups[] = {
+ &arm_trbe_group,
+ NULL,
+};
+
+static void arm_trbe_enable_cpu(void *info)
+{
+ struct trbe_drvdata *drvdata = info;
+
+ trbe_reset_local();
+ enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE);
+}
+
+static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
+{
+ struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+ struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
+ struct coresight_desc desc = { 0 };
+ struct device *dev;
+
+ if (WARN_ON(trbe_csdev))
+ return;
+
+ dev = &cpudata->drvdata->pdev->dev;
+ desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
+ if (!desc.name)
+ goto cpu_clear;
+
+ desc.type = CORESIGHT_DEV_TYPE_SINK;
+ desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM;
+ desc.ops = &arm_trbe_cs_ops;
+ desc.pdata = dev_get_platdata(dev);
+ desc.groups = arm_trbe_groups;
+ desc.dev = dev;
+ trbe_csdev = coresight_register(&desc);
+ if (IS_ERR(trbe_csdev))
+ goto cpu_clear;
+
+ dev_set_drvdata(&trbe_csdev->dev, cpudata);
+ coresight_set_percpu_sink(cpu, trbe_csdev);
+ return;
+cpu_clear:
+ cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
+}
+
+static void arm_trbe_probe_cpu(void *info)
+{
+ struct trbe_drvdata *drvdata = info;
+ int cpu = smp_processor_id();
+ struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+ u64 trbidr;
+
+ if (WARN_ON(!cpudata))
+ goto cpu_clear;
+
+ if (!is_trbe_available()) {
+ pr_err("TRBE is not implemented on cpu %d\n", cpu);
+ goto cpu_clear;
+ }
+
+ trbidr = read_sysreg_s(SYS_TRBIDR_EL1);
+ if (!is_trbe_programmable(trbidr)) {
+ pr_err("TRBE is owned in higher exception level on cpu %d\n", cpu);
+ goto cpu_clear;
+ }
+
+ cpudata->trbe_align = 1ULL << get_trbe_address_align(trbidr);
+ if (cpudata->trbe_align > SZ_2K) {
+ pr_err("Unsupported alignment on cpu %d\n", cpu);
+ goto cpu_clear;
+ }
+ cpudata->trbe_flag = get_trbe_flag_update(trbidr);
+ cpudata->cpu = cpu;
+ cpudata->drvdata = drvdata;
+ return;
+cpu_clear:
+ cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
+}
+
+static void arm_trbe_remove_coresight_cpu(void *info)
+{
+ int cpu = smp_processor_id();
+ struct trbe_drvdata *drvdata = info;
+ struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+ struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
+
+ disable_percpu_irq(drvdata->irq);
+ trbe_reset_local();
+ if (trbe_csdev) {
+ coresight_unregister(trbe_csdev);
+ cpudata->drvdata = NULL;
+ coresight_set_percpu_sink(cpu, NULL);
+ }
+}
+
+static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata)
+{
+ int cpu;
+
+ drvdata->cpudata = alloc_percpu(typeof(*drvdata->cpudata));
+ if (!drvdata->cpudata)
+ return -ENOMEM;
+
+ for_each_cpu(cpu, &drvdata->supported_cpus) {
+ smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1);
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ arm_trbe_register_coresight_cpu(drvdata, cpu);
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ smp_call_function_single(cpu, arm_trbe_enable_cpu, drvdata, 1);
+ }
+ return 0;
+}
+
+static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata)
+{
+ int cpu;
+
+ for_each_cpu(cpu, &drvdata->supported_cpus)
+ smp_call_function_single(cpu, arm_trbe_remove_coresight_cpu, drvdata, 1);
+ free_percpu(drvdata->cpudata);
+ return 0;
+}
+
+static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node)
+{
+ struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
+
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
+
+ /*
+ * If this CPU was not probed for TRBE,
+ * initialize it now.
+ */
+ if (!coresight_get_percpu_sink(cpu)) {
+ arm_trbe_probe_cpu(drvdata);
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ arm_trbe_register_coresight_cpu(drvdata, cpu);
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ arm_trbe_enable_cpu(drvdata);
+ } else {
+ arm_trbe_enable_cpu(drvdata);
+ }
+ }
+ return 0;
+}
+
+static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+{
+ struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
+
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
+ disable_percpu_irq(drvdata->irq);
+ trbe_reset_local();
+ }
+ return 0;
+}
+
+static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata)
+{
+ enum cpuhp_state trbe_online;
+ int ret;
+
+ trbe_online = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
+ arm_trbe_cpu_startup, arm_trbe_cpu_teardown);
+ if (trbe_online < 0)
+ return trbe_online;
+
+ ret = cpuhp_state_add_instance(trbe_online, &drvdata->hotplug_node);
+ if (ret) {
+ cpuhp_remove_multi_state(trbe_online);
+ return ret;
+ }
+ drvdata->trbe_online = trbe_online;
+ return 0;
+}
+
+static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata)
+{
+ cpuhp_remove_multi_state(drvdata->trbe_online);
+}
+
+static int arm_trbe_probe_irq(struct platform_device *pdev,
+ struct trbe_drvdata *drvdata)
+{
+ int ret;
+
+ drvdata->irq = platform_get_irq(pdev, 0);
+ if (drvdata->irq < 0) {
+ pr_err("IRQ not found for the platform device\n");
+ return drvdata->irq;
+ }
+
+ if (!irq_is_percpu(drvdata->irq)) {
+ pr_err("IRQ is not a PPI\n");
+ return -EINVAL;
+ }
+
+ if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus))
+ return -EINVAL;
+
+ drvdata->handle = alloc_percpu(struct perf_output_handle *);
+ if (!drvdata->handle)
+ return -ENOMEM;
+
+ ret = request_percpu_irq(drvdata->irq, arm_trbe_irq_handler, DRVNAME, drvdata->handle);
+ if (ret) {
+ free_percpu(drvdata->handle);
+ return ret;
+ }
+ return 0;
+}
+
+static void arm_trbe_remove_irq(struct trbe_drvdata *drvdata)
+{
+ free_percpu_irq(drvdata->irq, drvdata->handle);
+ free_percpu(drvdata->handle);
+}
+
+static int arm_trbe_device_probe(struct platform_device *pdev)
+{
+ struct coresight_platform_data *pdata;
+ struct trbe_drvdata *drvdata;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ dev_set_drvdata(dev, drvdata);
+ dev->platform_data = pdata;
+ drvdata->pdev = pdev;
+ ret = arm_trbe_probe_irq(pdev, drvdata);
+ if (ret)
+ return ret;
+
+ ret = arm_trbe_probe_coresight(drvdata);
+ if (ret)
+ goto probe_failed;
+
+ ret = arm_trbe_probe_cpuhp(drvdata);
+ if (ret)
+ goto cpuhp_failed;
+
+ return 0;
+cpuhp_failed:
+ arm_trbe_remove_coresight(drvdata);
+probe_failed:
+ arm_trbe_remove_irq(drvdata);
+ return ret;
+}
+
+static int arm_trbe_device_remove(struct platform_device *pdev)
+{
+ struct trbe_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ arm_trbe_remove_cpuhp(drvdata);
+ arm_trbe_remove_coresight(drvdata);
+ arm_trbe_remove_irq(drvdata);
+ return 0;
+}
+
+static const struct of_device_id arm_trbe_of_match[] = {
+ { .compatible = "arm,trace-buffer-extension"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, arm_trbe_of_match);
+
+static struct platform_driver arm_trbe_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .of_match_table = of_match_ptr(arm_trbe_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = arm_trbe_device_probe,
+ .remove = arm_trbe_device_remove,
+};
+
+static int __init arm_trbe_init(void)
+{
+ int ret;
+
+ if (arm64_kernel_unmapped_at_el0()) {
+ pr_err("TRBE wouldn't work if kernel gets unmapped at EL0\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = platform_driver_register(&arm_trbe_driver);
+ if (!ret)
+ return 0;
+
+ pr_err("Error registering %s platform driver\n", DRVNAME);
+ return ret;
+}
+
+static void __exit arm_trbe_exit(void)
+{
+ platform_driver_unregister(&arm_trbe_driver);
+}
+module_init(arm_trbe_init);
+module_exit(arm_trbe_exit);
+
+MODULE_AUTHOR("Anshuman Khandual <anshuman.khandual@arm.com>");
+MODULE_DESCRIPTION("Arm Trace Buffer Extension (TRBE) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/coresight-trbe.h b/drivers/hwtracing/coresight/coresight-trbe.h
new file mode 100644
index 000000000000..abf3e36082f0
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-trbe.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This contains all required hardware related helper functions for
+ * Trace Buffer Extension (TRBE) driver in the coresight framework.
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ *
+ * Author: Anshuman Khandual <anshuman.khandual@arm.com>
+ */
+#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+
+#include "coresight-etm-perf.h"
+
+static inline bool is_trbe_available(void)
+{
+ u64 aa64dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
+ unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0, ID_AA64DFR0_TRBE_SHIFT);
+
+ return trbe >= 0b0001;
+}
+
+static inline bool is_trbe_enabled(void)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+
+ return trblimitr & TRBLIMITR_ENABLE;
+}
+
+#define TRBE_EC_OTHERS 0
+#define TRBE_EC_STAGE1_ABORT 36
+#define TRBE_EC_STAGE2_ABORT 37
+
+static inline int get_trbe_ec(u64 trbsr)
+{
+ return (trbsr >> TRBSR_EC_SHIFT) & TRBSR_EC_MASK;
+}
+
+#define TRBE_BSC_NOT_STOPPED 0
+#define TRBE_BSC_FILLED 1
+#define TRBE_BSC_TRIGGERED 2
+
+static inline int get_trbe_bsc(u64 trbsr)
+{
+ return (trbsr >> TRBSR_BSC_SHIFT) & TRBSR_BSC_MASK;
+}
+
+static inline void clr_trbe_irq(void)
+{
+ u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
+
+ trbsr &= ~TRBSR_IRQ;
+ write_sysreg_s(trbsr, SYS_TRBSR_EL1);
+}
+
+static inline bool is_trbe_irq(u64 trbsr)
+{
+ return trbsr & TRBSR_IRQ;
+}
+
+static inline bool is_trbe_trg(u64 trbsr)
+{
+ return trbsr & TRBSR_TRG;
+}
+
+static inline bool is_trbe_wrap(u64 trbsr)
+{
+ return trbsr & TRBSR_WRAP;
+}
+
+static inline bool is_trbe_abort(u64 trbsr)
+{
+ return trbsr & TRBSR_ABORT;
+}
+
+static inline bool is_trbe_running(u64 trbsr)
+{
+ return !(trbsr & TRBSR_STOP);
+}
+
+#define TRBE_TRIG_MODE_STOP 0
+#define TRBE_TRIG_MODE_IRQ 1
+#define TRBE_TRIG_MODE_IGNORE 3
+
+#define TRBE_FILL_MODE_FILL 0
+#define TRBE_FILL_MODE_WRAP 1
+#define TRBE_FILL_MODE_CIRCULAR_BUFFER 3
+
+static inline void set_trbe_disabled(void)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+
+ trblimitr &= ~TRBLIMITR_ENABLE;
+ write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+}
+
+static inline bool get_trbe_flag_update(u64 trbidr)
+{
+ return trbidr & TRBIDR_FLAG;
+}
+
+static inline bool is_trbe_programmable(u64 trbidr)
+{
+ return !(trbidr & TRBIDR_PROG);
+}
+
+static inline int get_trbe_address_align(u64 trbidr)
+{
+ return (trbidr >> TRBIDR_ALIGN_SHIFT) & TRBIDR_ALIGN_MASK;
+}
+
+static inline unsigned long get_trbe_write_pointer(void)
+{
+ return read_sysreg_s(SYS_TRBPTR_EL1);
+}
+
+static inline void set_trbe_write_pointer(unsigned long addr)
+{
+ WARN_ON(is_trbe_enabled());
+ write_sysreg_s(addr, SYS_TRBPTR_EL1);
+}
+
+static inline unsigned long get_trbe_limit_pointer(void)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+ unsigned long addr = trblimitr & (TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT);
+
+ WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
+ return addr;
+}
+
+static inline unsigned long get_trbe_base_pointer(void)
+{
+ u64 trbbaser = read_sysreg_s(SYS_TRBBASER_EL1);
+ unsigned long addr = trbbaser & (TRBBASER_BASE_MASK << TRBBASER_BASE_SHIFT);
+
+ WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
+ return addr;
+}
+
+static inline void set_trbe_base_pointer(unsigned long addr)
+{
+ WARN_ON(is_trbe_enabled());
+ WARN_ON(!IS_ALIGNED(addr, (1UL << TRBBASER_BASE_SHIFT)));
+ WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
+ write_sysreg_s(addr, SYS_TRBBASER_EL1);
+}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 3eec59f1fed3..281a65d9b44b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -645,6 +645,16 @@ config I2C_HIGHLANDER
This driver can also be built as a module. If so, the module
will be called i2c-highlander.
+config I2C_HISI
+ tristate "HiSilicon I2C controller"
+ depends on ARM64 || COMPILE_TEST
+ help
+ Say Y here if you want to have Hisilicon I2C controller support
+ available on the Kunpeng Server.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-hisi.
+
config I2C_IBM_IIC
tristate "IBM PPC 4xx on-chip I2C interface"
depends on 4xx
@@ -776,7 +786,7 @@ config I2C_MT7621
config I2C_MV64XXX
tristate "Marvell mv64xxx I2C Controller"
- depends on MV64X60 || PLAT_ORION || ARCH_SUNXI || ARCH_MVEBU || COMPILE_TEST
+ depends on PLAT_ORION || ARCH_SUNXI || ARCH_MVEBU || COMPILE_TEST
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Marvell 64xxx line of host bridges.
@@ -1199,6 +1209,16 @@ config I2C_DLN2
This driver can also be built as a module. If so, the module
will be called i2c-dln2.
+config I2C_CP2615
+ tristate "Silicon Labs CP2615 USB sound card and I2C adapter"
+ depends on USB
+ help
+ If you say yes to this option, support will be included for Silicon
+ Labs CP2615's I2C interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-cp2615.
+
config I2C_PARPORT
tristate "Parallel port adapter"
depends on PARPORT
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 615f35e3e31f..69e9963615f6 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_I2C_EMEV2) += i2c-emev2.o
obj-$(CONFIG_I2C_EXYNOS5) += i2c-exynos5.o
obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
+obj-$(CONFIG_I2C_HISI) += i2c-hisi.o
obj-$(CONFIG_I2C_HIX5HD2) += i2c-hix5hd2.o
obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
obj-$(CONFIG_I2C_IMG) += i2c-img-scb.o
@@ -123,6 +124,7 @@ obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o
# External I2C/SMBus adapter drivers
obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
obj-$(CONFIG_I2C_DLN2) += i2c-dln2.o
+obj-$(CONFIG_I2C_CP2615) += i2c-cp2615.o
obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o
obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 34862ad3423e..1ed7e945bb6d 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -186,9 +186,9 @@ static int amd_ec_write(struct amd_smbus *smbus, unsigned char address,
#define AMD_SMB_PRTCL_PEC 0x80
-static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
+static s32 amd8111_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write, u8 command, int size,
- union i2c_smbus_data * data)
+ union i2c_smbus_data *data)
{
struct amd_smbus *smbus = adap->algo_data;
unsigned char protocol, len, pec, temp[2];
@@ -199,130 +199,130 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
pec = (flags & I2C_CLIENT_PEC) ? AMD_SMB_PRTCL_PEC : 0;
switch (size) {
- case I2C_SMBUS_QUICK:
- protocol |= AMD_SMB_PRTCL_QUICK;
- read_write = I2C_SMBUS_WRITE;
- break;
-
- case I2C_SMBUS_BYTE:
- if (read_write == I2C_SMBUS_WRITE) {
- status = amd_ec_write(smbus, AMD_SMB_CMD,
- command);
- if (status)
- return status;
- }
- protocol |= AMD_SMB_PRTCL_BYTE;
- break;
-
- case I2C_SMBUS_BYTE_DATA:
- status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ case I2C_SMBUS_QUICK:
+ protocol |= AMD_SMB_PRTCL_QUICK;
+ read_write = I2C_SMBUS_WRITE;
+ break;
+
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_WRITE) {
+ status = amd_ec_write(smbus, AMD_SMB_CMD,
+ command);
if (status)
return status;
- if (read_write == I2C_SMBUS_WRITE) {
- status = amd_ec_write(smbus, AMD_SMB_DATA,
- data->byte);
- if (status)
- return status;
- }
- protocol |= AMD_SMB_PRTCL_BYTE_DATA;
- break;
+ }
+ protocol |= AMD_SMB_PRTCL_BYTE;
+ break;
- case I2C_SMBUS_WORD_DATA:
- status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ case I2C_SMBUS_BYTE_DATA:
+ status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ if (status)
+ return status;
+ if (read_write == I2C_SMBUS_WRITE) {
+ status = amd_ec_write(smbus, AMD_SMB_DATA,
+ data->byte);
if (status)
return status;
- if (read_write == I2C_SMBUS_WRITE) {
- status = amd_ec_write(smbus, AMD_SMB_DATA,
- data->word & 0xff);
- if (status)
- return status;
- status = amd_ec_write(smbus, AMD_SMB_DATA + 1,
- data->word >> 8);
- if (status)
- return status;
- }
- protocol |= AMD_SMB_PRTCL_WORD_DATA | pec;
- break;
+ }
+ protocol |= AMD_SMB_PRTCL_BYTE_DATA;
+ break;
- case I2C_SMBUS_BLOCK_DATA:
- status = amd_ec_write(smbus, AMD_SMB_CMD, command);
- if (status)
- return status;
- if (read_write == I2C_SMBUS_WRITE) {
- len = min_t(u8, data->block[0],
- I2C_SMBUS_BLOCK_MAX);
- status = amd_ec_write(smbus, AMD_SMB_BCNT, len);
- if (status)
- return status;
- for (i = 0; i < len; i++) {
- status =
- amd_ec_write(smbus, AMD_SMB_DATA + i,
- data->block[i + 1]);
- if (status)
- return status;
- }
- }
- protocol |= AMD_SMB_PRTCL_BLOCK_DATA | pec;
- break;
-
- case I2C_SMBUS_I2C_BLOCK_DATA:
- len = min_t(u8, data->block[0],
- I2C_SMBUS_BLOCK_MAX);
- status = amd_ec_write(smbus, AMD_SMB_CMD, command);
- if (status)
- return status;
- status = amd_ec_write(smbus, AMD_SMB_BCNT, len);
- if (status)
- return status;
- if (read_write == I2C_SMBUS_WRITE)
- for (i = 0; i < len; i++) {
- status =
- amd_ec_write(smbus, AMD_SMB_DATA + i,
- data->block[i + 1]);
- if (status)
- return status;
- }
- protocol |= AMD_SMB_PRTCL_I2C_BLOCK_DATA;
- break;
-
- case I2C_SMBUS_PROC_CALL:
- status = amd_ec_write(smbus, AMD_SMB_CMD, command);
- if (status)
- return status;
+ case I2C_SMBUS_WORD_DATA:
+ status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ if (status)
+ return status;
+ if (read_write == I2C_SMBUS_WRITE) {
status = amd_ec_write(smbus, AMD_SMB_DATA,
- data->word & 0xff);
+ data->word & 0xff);
if (status)
return status;
status = amd_ec_write(smbus, AMD_SMB_DATA + 1,
- data->word >> 8);
+ data->word >> 8);
if (status)
return status;
- protocol = AMD_SMB_PRTCL_PROC_CALL | pec;
- read_write = I2C_SMBUS_READ;
- break;
+ }
+ protocol |= AMD_SMB_PRTCL_WORD_DATA | pec;
+ break;
- case I2C_SMBUS_BLOCK_PROC_CALL:
+ case I2C_SMBUS_BLOCK_DATA:
+ status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ if (status)
+ return status;
+ if (read_write == I2C_SMBUS_WRITE) {
len = min_t(u8, data->block[0],
- I2C_SMBUS_BLOCK_MAX - 1);
- status = amd_ec_write(smbus, AMD_SMB_CMD, command);
- if (status)
- return status;
+ I2C_SMBUS_BLOCK_MAX);
status = amd_ec_write(smbus, AMD_SMB_BCNT, len);
if (status)
return status;
for (i = 0; i < len; i++) {
- status = amd_ec_write(smbus, AMD_SMB_DATA + i,
- data->block[i + 1]);
+ status =
+ amd_ec_write(smbus, AMD_SMB_DATA + i,
+ data->block[i + 1]);
if (status)
return status;
}
- protocol = AMD_SMB_PRTCL_BLOCK_PROC_CALL | pec;
- read_write = I2C_SMBUS_READ;
- break;
+ }
+ protocol |= AMD_SMB_PRTCL_BLOCK_DATA | pec;
+ break;
+
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ len = min_t(u8, data->block[0],
+ I2C_SMBUS_BLOCK_MAX);
+ status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ if (status)
+ return status;
+ status = amd_ec_write(smbus, AMD_SMB_BCNT, len);
+ if (status)
+ return status;
+ if (read_write == I2C_SMBUS_WRITE)
+ for (i = 0; i < len; i++) {
+ status =
+ amd_ec_write(smbus, AMD_SMB_DATA + i,
+ data->block[i + 1]);
+ if (status)
+ return status;
+ }
+ protocol |= AMD_SMB_PRTCL_I2C_BLOCK_DATA;
+ break;
- default:
- dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
- return -EOPNOTSUPP;
+ case I2C_SMBUS_PROC_CALL:
+ status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ if (status)
+ return status;
+ status = amd_ec_write(smbus, AMD_SMB_DATA,
+ data->word & 0xff);
+ if (status)
+ return status;
+ status = amd_ec_write(smbus, AMD_SMB_DATA + 1,
+ data->word >> 8);
+ if (status)
+ return status;
+ protocol = AMD_SMB_PRTCL_PROC_CALL | pec;
+ read_write = I2C_SMBUS_READ;
+ break;
+
+ case I2C_SMBUS_BLOCK_PROC_CALL:
+ len = min_t(u8, data->block[0],
+ I2C_SMBUS_BLOCK_MAX - 1);
+ status = amd_ec_write(smbus, AMD_SMB_CMD, command);
+ if (status)
+ return status;
+ status = amd_ec_write(smbus, AMD_SMB_BCNT, len);
+ if (status)
+ return status;
+ for (i = 0; i < len; i++) {
+ status = amd_ec_write(smbus, AMD_SMB_DATA + i,
+ data->block[i + 1]);
+ if (status)
+ return status;
+ }
+ protocol = AMD_SMB_PRTCL_BLOCK_PROC_CALL | pec;
+ read_write = I2C_SMBUS_READ;
+ break;
+
+ default:
+ dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
+ return -EOPNOTSUPP;
}
status = amd_ec_write(smbus, AMD_SMB_ADDR, addr << 1);
@@ -357,40 +357,40 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
return 0;
switch (size) {
- case I2C_SMBUS_BYTE:
- case I2C_SMBUS_BYTE_DATA:
- status = amd_ec_read(smbus, AMD_SMB_DATA, &data->byte);
- if (status)
- return status;
- break;
+ case I2C_SMBUS_BYTE:
+ case I2C_SMBUS_BYTE_DATA:
+ status = amd_ec_read(smbus, AMD_SMB_DATA, &data->byte);
+ if (status)
+ return status;
+ break;
- case I2C_SMBUS_WORD_DATA:
- case I2C_SMBUS_PROC_CALL:
- status = amd_ec_read(smbus, AMD_SMB_DATA, temp + 0);
- if (status)
- return status;
- status = amd_ec_read(smbus, AMD_SMB_DATA + 1, temp + 1);
- if (status)
- return status;
- data->word = (temp[1] << 8) | temp[0];
- break;
+ case I2C_SMBUS_WORD_DATA:
+ case I2C_SMBUS_PROC_CALL:
+ status = amd_ec_read(smbus, AMD_SMB_DATA, temp + 0);
+ if (status)
+ return status;
+ status = amd_ec_read(smbus, AMD_SMB_DATA + 1, temp + 1);
+ if (status)
+ return status;
+ data->word = (temp[1] << 8) | temp[0];
+ break;
- case I2C_SMBUS_BLOCK_DATA:
- case I2C_SMBUS_BLOCK_PROC_CALL:
- status = amd_ec_read(smbus, AMD_SMB_BCNT, &len);
+ case I2C_SMBUS_BLOCK_DATA:
+ case I2C_SMBUS_BLOCK_PROC_CALL:
+ status = amd_ec_read(smbus, AMD_SMB_BCNT, &len);
+ if (status)
+ return status;
+ len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX);
+ fallthrough;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ for (i = 0; i < len; i++) {
+ status = amd_ec_read(smbus, AMD_SMB_DATA + i,
+ data->block + i + 1);
if (status)
return status;
- len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX);
- fallthrough;
- case I2C_SMBUS_I2C_BLOCK_DATA:
- for (i = 0; i < len; i++) {
- status = amd_ec_read(smbus, AMD_SMB_DATA + i,
- data->block + i + 1);
- if (status)
- return status;
- }
- data->block[0] = len;
- break;
+ }
+ data->block[0] = len;
+ break;
}
return 0;
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index ba766d24219e..490ee3962645 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -22,7 +22,6 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/version.h>
#define N_DATA_REGS 8
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index e4b7f2a951ad..c1bbc4caeb5c 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -789,7 +789,7 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
bool change_role = false;
#endif
- ret = pm_runtime_get_sync(id->dev);
+ ret = pm_runtime_resume_and_get(id->dev);
if (ret < 0)
return ret;
@@ -911,7 +911,7 @@ static int cdns_reg_slave(struct i2c_client *slave)
if (slave->flags & I2C_CLIENT_TEN)
return -EAFNOSUPPORT;
- ret = pm_runtime_get_sync(id->dev);
+ ret = pm_runtime_resume_and_get(id->dev);
if (ret < 0)
return ret;
@@ -1200,7 +1200,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
if (IS_ERR(id->membase))
return PTR_ERR(id->membase);
- id->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ id->irq = ret;
id->adap.owner = THIS_MODULE;
id->adap.dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index f80d79e973cd..08f491ea21ac 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -280,6 +280,10 @@ static const struct property_entry bq24190_props[] = {
{ }
};
+static const struct software_node bq24190_node = {
+ .properties = bq24190_props,
+};
+
static struct regulator_consumer_supply fusb302_consumer = {
.supply = "vbus",
/* Must match fusb302 dev_name in intel_cht_int33fe.c */
@@ -308,7 +312,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
.type = "bq24190",
.addr = 0x6b,
.dev_name = "bq24190",
- .properties = bq24190_props,
+ .swnode = &bq24190_node,
.platform_data = &bq24190_pdata,
};
int ret, reg, irq;
diff --git a/drivers/i2c/busses/i2c-cp2615.c b/drivers/i2c/busses/i2c-cp2615.c
new file mode 100644
index 000000000000..78cfecd1ea76
--- /dev/null
+++ b/drivers/i2c/busses/i2c-cp2615.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * i2c support for Silicon Labs' CP2615 Digital Audio Bridge
+ *
+ * (c) 2021, Bence Csókás <bence98@sch.bme.hu>
+ */
+
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/usb.h>
+
+/** CP2615 I/O Protocol implementation */
+
+#define CP2615_VID 0x10c4
+#define CP2615_PID 0xeac1
+
+#define IOP_EP_IN 0x82
+#define IOP_EP_OUT 0x02
+#define IOP_IFN 1
+#define IOP_ALTSETTING 2
+
+#define MAX_IOP_SIZE 64
+#define MAX_IOP_PAYLOAD_SIZE (MAX_IOP_SIZE - 6)
+#define MAX_I2C_SIZE (MAX_IOP_PAYLOAD_SIZE - 4)
+
+enum cp2615_iop_msg_type {
+ iop_GetAccessoryInfo = 0xD100,
+ iop_AccessoryInfo = 0xA100,
+ iop_GetPortConfiguration = 0xD203,
+ iop_PortConfiguration = 0xA203,
+ iop_DoI2cTransfer = 0xD400,
+ iop_I2cTransferResult = 0xA400,
+ iop_GetSerialState = 0xD501,
+ iop_SerialState = 0xA501
+};
+
+struct __packed cp2615_iop_msg {
+ __be16 preamble, length, msg;
+ u8 data[MAX_IOP_PAYLOAD_SIZE];
+};
+
+#define PART_ID_A01 0x1400
+#define PART_ID_A02 0x1500
+
+struct __packed cp2615_iop_accessory_info {
+ __be16 part_id, option_id, proto_ver;
+};
+
+struct __packed cp2615_i2c_transfer {
+ u8 tag, i2caddr, read_len, write_len;
+ u8 data[MAX_I2C_SIZE];
+};
+
+/* Possible values for struct cp2615_i2c_transfer_result.status */
+enum cp2615_i2c_status {
+ /* Writing to the internal EEPROM failed, because it is locked */
+ CP2615_CFG_LOCKED = -6,
+ /* read_len or write_len out of range */
+ CP2615_INVALID_PARAM = -4,
+ /* I2C slave did not ACK in time */
+ CP2615_TIMEOUT,
+ /* I2C bus busy */
+ CP2615_BUS_BUSY,
+ /* I2C bus error (ie. device NAK'd the request) */
+ CP2615_BUS_ERROR,
+ CP2615_SUCCESS
+};
+
+struct __packed cp2615_i2c_transfer_result {
+ u8 tag, i2caddr;
+ s8 status;
+ u8 read_len;
+ u8 data[MAX_I2C_SIZE];
+};
+
+static int cp2615_init_iop_msg(struct cp2615_iop_msg *ret, enum cp2615_iop_msg_type msg,
+ const void *data, size_t data_len)
+{
+ if (data_len > MAX_IOP_PAYLOAD_SIZE)
+ return -EFBIG;
+
+ if (!ret)
+ return -EINVAL;
+
+ ret->preamble = 0x2A2A;
+ ret->length = htons(data_len + 6);
+ ret->msg = htons(msg);
+ if (data && data_len)
+ memcpy(&ret->data, data, data_len);
+ return 0;
+}
+
+static int cp2615_init_i2c_msg(struct cp2615_iop_msg *ret, const struct cp2615_i2c_transfer *data)
+{
+ return cp2615_init_iop_msg(ret, iop_DoI2cTransfer, data, 4 + data->write_len);
+}
+
+/* Translates status codes to Linux errno's */
+static int cp2615_check_status(enum cp2615_i2c_status status)
+{
+ switch (status) {
+ case CP2615_SUCCESS:
+ return 0;
+ case CP2615_BUS_ERROR:
+ return -ENXIO;
+ case CP2615_BUS_BUSY:
+ return -EAGAIN;
+ case CP2615_TIMEOUT:
+ return -ETIMEDOUT;
+ case CP2615_INVALID_PARAM:
+ return -EINVAL;
+ case CP2615_CFG_LOCKED:
+ return -EPERM;
+ }
+ /* Unknown error code */
+ return -EPROTO;
+}
+
+/** Driver code */
+
+static int
+cp2615_i2c_send(struct usb_interface *usbif, struct cp2615_i2c_transfer *i2c_w)
+{
+ struct cp2615_iop_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ struct usb_device *usbdev = interface_to_usbdev(usbif);
+ int res = cp2615_init_i2c_msg(msg, i2c_w);
+
+ if (!res)
+ res = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, IOP_EP_OUT),
+ msg, ntohs(msg->length), NULL, 0);
+ kfree(msg);
+ return res;
+}
+
+static int
+cp2615_i2c_recv(struct usb_interface *usbif, unsigned char tag, void *buf)
+{
+ struct cp2615_iop_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ struct cp2615_i2c_transfer_result *i2c_r = (struct cp2615_i2c_transfer_result *)&msg->data;
+ struct usb_device *usbdev = interface_to_usbdev(usbif);
+ int res = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, IOP_EP_IN),
+ msg, sizeof(struct cp2615_iop_msg), NULL, 0);
+
+ if (res < 0) {
+ kfree(msg);
+ return res;
+ }
+
+ if (msg->msg != htons(iop_I2cTransferResult) || i2c_r->tag != tag) {
+ kfree(msg);
+ return -EIO;
+ }
+
+ res = cp2615_check_status(i2c_r->status);
+ if (!res)
+ memcpy(buf, &i2c_r->data, i2c_r->read_len);
+
+ kfree(msg);
+ return res;
+}
+
+/* Checks if the IOP is functional by querying the part's ID */
+static int cp2615_check_iop(struct usb_interface *usbif)
+{
+ struct cp2615_iop_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ struct cp2615_iop_accessory_info *info = (struct cp2615_iop_accessory_info *)&msg->data;
+ struct usb_device *usbdev = interface_to_usbdev(usbif);
+ int res = cp2615_init_iop_msg(msg, iop_GetAccessoryInfo, NULL, 0);
+
+ if (res)
+ goto out;
+
+ res = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, IOP_EP_OUT),
+ msg, ntohs(msg->length), NULL, 0);
+ if (res)
+ goto out;
+
+ res = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, IOP_EP_IN),
+ msg, sizeof(struct cp2615_iop_msg), NULL, 0);
+ if (res)
+ goto out;
+
+ if (msg->msg != htons(iop_AccessoryInfo)) {
+ res = -EIO;
+ goto out;
+ }
+
+ switch (ntohs(info->part_id)) {
+ case PART_ID_A01:
+ dev_dbg(&usbif->dev, "Found A01 part. (WARNING: errata exists!)\n");
+ break;
+ case PART_ID_A02:
+ dev_dbg(&usbif->dev, "Found good A02 part.\n");
+ break;
+ default:
+ dev_warn(&usbif->dev, "Unknown part ID %04X\n", ntohs(info->part_id));
+ }
+
+out:
+ kfree(msg);
+ return res;
+}
+
+static int
+cp2615_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct usb_interface *usbif = adap->algo_data;
+ int i = 0, ret = 0;
+ struct i2c_msg *msg;
+ struct cp2615_i2c_transfer i2c_w = {0};
+
+ dev_dbg(&usbif->dev, "Doing %d I2C transactions\n", num);
+
+ for (; !ret && i < num; i++) {
+ msg = &msgs[i];
+
+ i2c_w.tag = 0xdd;
+ i2c_w.i2caddr = i2c_8bit_addr_from_msg(msg);
+ if (msg->flags & I2C_M_RD) {
+ i2c_w.read_len = msg->len;
+ i2c_w.write_len = 0;
+ } else {
+ i2c_w.read_len = 0;
+ i2c_w.write_len = msg->len;
+ memcpy(&i2c_w.data, msg->buf, i2c_w.write_len);
+ }
+ ret = cp2615_i2c_send(usbif, &i2c_w);
+ if (ret)
+ break;
+ ret = cp2615_i2c_recv(usbif, i2c_w.tag, msg->buf);
+ }
+ if (ret < 0)
+ return ret;
+ return i;
+}
+
+static u32
+cp2615_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm cp2615_i2c_algo = {
+ .master_xfer = cp2615_i2c_master_xfer,
+ .functionality = cp2615_i2c_func,
+};
+
+/*
+ * This chip has some limitations: one is that the USB endpoint
+ * can only receive 64 bytes/transfer, that leaves 54 bytes for
+ * the I2C transfer. On top of that, EITHER read_len OR write_len
+ * may be zero, but not both. If both are non-zero, the adapter
+ * issues a write followed by a read. And the chip does not
+ * support repeated START between the write and read phases.
+ */
+static struct i2c_adapter_quirks cp2615_i2c_quirks = {
+ .max_write_len = MAX_I2C_SIZE,
+ .max_read_len = MAX_I2C_SIZE,
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ | I2C_AQ_NO_ZERO_LEN | I2C_AQ_NO_REP_START,
+ .max_comb_1st_msg_len = MAX_I2C_SIZE,
+ .max_comb_2nd_msg_len = MAX_I2C_SIZE
+};
+
+static void
+cp2615_i2c_remove(struct usb_interface *usbif)
+{
+ struct i2c_adapter *adap = usb_get_intfdata(usbif);
+
+ usb_set_intfdata(usbif, NULL);
+ i2c_del_adapter(adap);
+}
+
+static int
+cp2615_i2c_probe(struct usb_interface *usbif, const struct usb_device_id *id)
+{
+ int ret = 0;
+ struct i2c_adapter *adap;
+ struct usb_device *usbdev = interface_to_usbdev(usbif);
+
+ ret = usb_set_interface(usbdev, IOP_IFN, IOP_ALTSETTING);
+ if (ret)
+ return ret;
+
+ ret = cp2615_check_iop(usbif);
+ if (ret)
+ return ret;
+
+ adap = devm_kzalloc(&usbif->dev, sizeof(struct i2c_adapter), GFP_KERNEL);
+ if (!adap)
+ return -ENOMEM;
+
+ strncpy(adap->name, usbdev->serial, sizeof(adap->name) - 1);
+ adap->owner = THIS_MODULE;
+ adap->dev.parent = &usbif->dev;
+ adap->dev.of_node = usbif->dev.of_node;
+ adap->timeout = HZ;
+ adap->algo = &cp2615_i2c_algo;
+ adap->quirks = &cp2615_i2c_quirks;
+ adap->algo_data = usbif;
+
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ return ret;
+
+ usb_set_intfdata(usbif, adap);
+ return 0;
+}
+
+static const struct usb_device_id id_table[] = {
+ { USB_DEVICE_INTERFACE_NUMBER(CP2615_VID, CP2615_PID, IOP_IFN) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver cp2615_i2c_driver = {
+ .name = "i2c-cp2615",
+ .probe = cp2615_i2c_probe,
+ .disconnect = cp2615_i2c_remove,
+ .id_table = id_table,
+};
+
+module_usb_driver(cp2615_i2c_driver);
+
+MODULE_AUTHOR("Bence Csókás <bence98@sch.bme.hu>");
+MODULE_DESCRIPTION("CP2615 I2C bus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 3c19aada4b30..fdc34d9e3702 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -150,6 +150,9 @@ int i2c_dw_init_regmap(struct dw_i2c_dev *dev)
reg = readl(dev->base + DW_IC_COMP_TYPE);
i2c_dw_release_lock(dev);
+ if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU)
+ map_cfg.max_register = AMD_UCSI_INTR_REG;
+
if (reg == swab32(DW_IC_COMP_TYPE_VALUE)) {
map_cfg.reg_read = dw_reg_read_swab;
map_cfg.reg_write = dw_reg_write_swab;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 5392b82f68a4..6a53f75abf7c 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -295,8 +295,16 @@ struct dw_i2c_dev {
#define MODEL_MSCC_OCELOT BIT(8)
#define MODEL_BAIKAL_BT1 BIT(9)
+#define MODEL_AMD_NAVI_GPU BIT(10)
#define MODEL_MASK GENMASK(11, 8)
+/*
+ * Enable UCSI interrupt by writing 0xd at register
+ * offset 0x474 specified in hardware specification.
+ */
+#define AMD_UCSI_INTR_REG 0x474
+#define AMD_UCSI_INTR_EN 0xd
+
int i2c_dw_init_regmap(struct dw_i2c_dev *dev);
u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset);
u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 873ef38eb1c8..13be1d678c39 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -23,6 +23,10 @@
#include "i2c-designware-core.h"
+#define AMD_TIMEOUT_MIN_US 25
+#define AMD_TIMEOUT_MAX_US 250
+#define AMD_MASTERCFG_MASK GENMASK(15, 0)
+
static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
{
/* Configure Tx/Rx FIFO threshold levels */
@@ -35,10 +39,10 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
{
- const char *mode_str, *fp_str = "";
u32 comp_param1;
u32 sda_falling_time, scl_falling_time;
struct i2c_timings *t = &dev->timings;
+ const char *fp_str = "";
u32 ic_clk;
int ret;
@@ -78,7 +82,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
* difference is the timing parameter values since the registers are
* the same.
*/
- if (t->bus_freq_hz == 1000000) {
+ if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) {
/*
* Check are Fast Mode Plus parameters available. Calculate
* SCL timing parameters for Fast Mode Plus if not set.
@@ -154,22 +158,10 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
ret = i2c_dw_set_sda_hold(dev);
if (ret)
- goto out;
-
- switch (dev->master_cfg & DW_IC_CON_SPEED_MASK) {
- case DW_IC_CON_SPEED_STD:
- mode_str = "Standard Mode";
- break;
- case DW_IC_CON_SPEED_HIGH:
- mode_str = "High Speed Mode";
- break;
- default:
- mode_str = "Fast Mode";
- }
- dev_dbg(dev->dev, "Bus speed: %s%s\n", mode_str, fp_str);
+ return ret;
-out:
- return ret;
+ dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz));
+ return 0;
}
/**
@@ -260,6 +252,108 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_MASTER_MASK);
}
+static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
+ !(val & DW_IC_INTR_STOP_DET),
+ 1100, 20000);
+ if (ret)
+ dev_err(dev->dev, "i2c timeout error %d\n", ret);
+
+ return ret;
+}
+
+static int i2c_dw_status(struct dw_i2c_dev *dev)
+{
+ int status;
+
+ status = i2c_dw_wait_bus_not_busy(dev);
+ if (status)
+ return status;
+
+ return i2c_dw_check_stopbit(dev);
+}
+
+/*
+ * Initiate and continue master read/write transaction with polling
+ * based transfer routine afterward write messages into the Tx buffer.
+ */
+static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs)
+{
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
+ int cmd = 0, status;
+ u8 *tx_buf;
+ u32 val;
+
+ /*
+ * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
+ * it is mandatory to set the right value in specific register
+ * (offset:0x474) as per the hardware IP specification.
+ */
+ regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);
+
+ dev->msgs = msgs;
+ dev->msgs_num = num_msgs;
+ i2c_dw_xfer_init(dev);
+ i2c_dw_disable_int(dev);
+
+ /* Initiate messages read/write transaction */
+ for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
+ tx_buf = msgs[msg_wrt_idx].buf;
+ buf_len = msgs[msg_wrt_idx].len;
+
+ if (!(msgs[msg_wrt_idx].flags & I2C_M_RD))
+ regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
+ /*
+ * Initiate the i2c read/write transaction of buffer length,
+ * and poll for bus busy status. For the last message transfer,
+ * update the command with stopbit enable.
+ */
+ for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
+ if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
+ cmd |= BIT(9);
+
+ if (msgs[msg_wrt_idx].flags & I2C_M_RD) {
+ /* Due to hardware bug, need to write the same command twice. */
+ regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
+ regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
+ if (cmd) {
+ regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
+ regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
+ /*
+ * Need to check the stop bit. However, it cannot be
+ * detected from the registers so we check it always
+ * when read/write the last byte.
+ */
+ status = i2c_dw_status(dev);
+ if (status)
+ return status;
+
+ for (data_idx = 0; data_idx < buf_len; data_idx++) {
+ regmap_read(dev->map, DW_IC_DATA_CMD, &val);
+ tx_buf[data_idx] = val;
+ }
+ status = i2c_dw_check_stopbit(dev);
+ if (status)
+ return status;
+ }
+ } else {
+ regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
+ usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US);
+ }
+ }
+ status = i2c_dw_check_stopbit(dev);
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
/*
* Initiate (and continue) low level master read/write transaction.
* This function is only called from i2c_dw_isr, and pumping i2c_msg
@@ -463,6 +557,16 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
pm_runtime_get_sync(dev->dev);
+ /*
+ * Initiate I2C message transfer when AMD NAVI GPU card is enabled,
+ * As it is polling based transfer mechanism, which does not support
+ * interrupt based functionalities of existing DesignWare driver.
+ */
+ if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
+ ret = amd_i2c_dw_xfer_quirk(adap, msgs, num);
+ goto done_nolock;
+ }
+
if (dev_WARN_ONCE(dev->dev, dev->suspended, "Transfer while suspended\n")) {
ret = -ESHUTDOWN;
goto done_nolock;
@@ -739,6 +843,20 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
return 0;
}
+static int amd_i2c_adap_quirk(struct dw_i2c_dev *dev)
+{
+ struct i2c_adapter *adap = &dev->adapter;
+ int ret;
+
+ pm_runtime_get_noresume(dev->dev);
+ ret = i2c_add_numbered_adapter(adap);
+ if (ret)
+ dev_err(dev->dev, "Failed to add adapter: %d\n", ret);
+ pm_runtime_put_noidle(dev->dev);
+
+ return ret;
+}
+
int i2c_dw_probe_master(struct dw_i2c_dev *dev)
{
struct i2c_adapter *adap = &dev->adapter;
@@ -775,6 +893,9 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev)
adap->dev.parent = dev->dev;
i2c_set_adapdata(adap, dev);
+ if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU)
+ return amd_i2c_adap_quirk(dev);
+
if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
irq_flags = IRQF_NO_SUSPEND;
} else {
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 55c83a7a24f3..0f409a4c2da0 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -26,6 +26,7 @@
#include "i2c-designware-core.h"
#define DRIVER_NAME "i2c-designware-pci"
+#define AMD_CLK_RATE_HZ 100000
enum dw_pci_ctl_id_t {
medfield,
@@ -34,6 +35,7 @@ enum dw_pci_ctl_id_t {
cherrytrail,
haswell,
elkhartlake,
+ navi_amd,
};
struct dw_scl_sda_cfg {
@@ -78,11 +80,23 @@ static struct dw_scl_sda_cfg hsw_config = {
.sda_hold = 0x9,
};
+/* NAVI-AMD HCNT/LCNT/SDA hold time */
+static struct dw_scl_sda_cfg navi_amd_config = {
+ .ss_hcnt = 0x1ae,
+ .ss_lcnt = 0x23a,
+ .sda_hold = 0x9,
+};
+
static u32 mfld_get_clk_rate_khz(struct dw_i2c_dev *dev)
{
return 25000;
}
+static u32 navi_amd_get_clk_rate_khz(struct dw_i2c_dev *dev)
+{
+ return AMD_CLK_RATE_HZ;
+}
+
static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
@@ -104,6 +118,35 @@ static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
return -ENODEV;
}
+ /*
+ * TODO find a better way how to deduplicate instantiation
+ * of USB PD slave device from nVidia GPU driver.
+ */
+static int navi_amd_register_client(struct dw_i2c_dev *dev)
+{
+ struct i2c_board_info info;
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strscpy(info.type, "ccgx-ucsi", I2C_NAME_SIZE);
+ info.addr = 0x08;
+ info.irq = dev->irq;
+
+ dev->slave = i2c_new_client_device(&dev->adapter, &info);
+ if (IS_ERR(dev->slave))
+ return PTR_ERR(dev->slave);
+
+ return 0;
+}
+
+static int navi_amd_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
+{
+ struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
+
+ dev->flags |= MODEL_AMD_NAVI_GPU;
+ dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
+ return 0;
+}
+
static int mrfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
/*
@@ -155,6 +198,12 @@ static struct dw_pci_controller dw_pci_controllers[] = {
.bus_num = -1,
.get_clk_rate_khz = ehl_get_clk_rate_khz,
},
+ [navi_amd] = {
+ .bus_num = -1,
+ .scl_sda_cfg = &navi_amd_config,
+ .setup = navi_amd_setup,
+ .get_clk_rate_khz = navi_amd_get_clk_rate_khz,
+ },
};
#ifdef CONFIG_PM
@@ -274,6 +323,14 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
return r;
}
+ if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
+ r = navi_amd_register_client(dev);
+ if (r) {
+ dev_err(dev->dev, "register client failed with %d\n", r);
+ return r;
+ }
+ }
+
pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
@@ -337,6 +394,10 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4bbe), elkhartlake },
{ PCI_VDEVICE(INTEL, 0x4bbf), elkhartlake },
{ PCI_VDEVICE(INTEL, 0x4bc0), elkhartlake },
+ { PCI_VDEVICE(ATI, 0x7314), navi_amd },
+ { PCI_VDEVICE(ATI, 0x73a4), navi_amd },
+ { PCI_VDEVICE(ATI, 0x73e4), navi_amd },
+ { PCI_VDEVICE(ATI, 0x73c4), navi_amd },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids);
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index a08554c1a570..bdff0e6345d9 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -395,7 +395,10 @@ static int em_i2c_probe(struct platform_device *pdev)
em_i2c_reset(&priv->adap);
- priv->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_clk;
+ priv->irq = ret;
ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
"em_i2c", priv);
if (ret)
diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
new file mode 100644
index 000000000000..acf394812061
--- /dev/null
+++ b/drivers/i2c/busses/i2c-hisi.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * HiSilicon I2C Controller Driver for Kunpeng SoC
+ *
+ * Copyright (c) 2021 HiSilicon Technologies Co., Ltd.
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define HISI_I2C_FRAME_CTRL 0x0000
+#define HISI_I2C_FRAME_CTRL_SPEED_MODE GENMASK(1, 0)
+#define HISI_I2C_FRAME_CTRL_ADDR_TEN BIT(2)
+#define HISI_I2C_SLV_ADDR 0x0004
+#define HISI_I2C_SLV_ADDR_VAL GENMASK(9, 0)
+#define HISI_I2C_SLV_ADDR_GC_S_MODE BIT(10)
+#define HISI_I2C_SLV_ADDR_GC_S_EN BIT(11)
+#define HISI_I2C_CMD_TXDATA 0x0008
+#define HISI_I2C_CMD_TXDATA_DATA GENMASK(7, 0)
+#define HISI_I2C_CMD_TXDATA_RW BIT(8)
+#define HISI_I2C_CMD_TXDATA_P_EN BIT(9)
+#define HISI_I2C_CMD_TXDATA_SR_EN BIT(10)
+#define HISI_I2C_RXDATA 0x000c
+#define HISI_I2C_RXDATA_DATA GENMASK(7, 0)
+#define HISI_I2C_SS_SCL_HCNT 0x0010
+#define HISI_I2C_SS_SCL_LCNT 0x0014
+#define HISI_I2C_FS_SCL_HCNT 0x0018
+#define HISI_I2C_FS_SCL_LCNT 0x001c
+#define HISI_I2C_HS_SCL_HCNT 0x0020
+#define HISI_I2C_HS_SCL_LCNT 0x0024
+#define HISI_I2C_FIFO_CTRL 0x0028
+#define HISI_I2C_FIFO_RX_CLR BIT(0)
+#define HISI_I2C_FIFO_TX_CLR BIT(1)
+#define HISI_I2C_FIFO_RX_AF_THRESH GENMASK(7, 2)
+#define HISI_I2C_FIFO_TX_AE_THRESH GENMASK(13, 8)
+#define HISI_I2C_FIFO_STATE 0x002c
+#define HISI_I2C_FIFO_STATE_RX_RERR BIT(0)
+#define HISI_I2C_FIFO_STATE_RX_WERR BIT(1)
+#define HISI_I2C_FIFO_STATE_RX_EMPTY BIT(3)
+#define HISI_I2C_FIFO_STATE_TX_RERR BIT(6)
+#define HISI_I2C_FIFO_STATE_TX_WERR BIT(7)
+#define HISI_I2C_FIFO_STATE_TX_FULL BIT(11)
+#define HISI_I2C_SDA_HOLD 0x0030
+#define HISI_I2C_SDA_HOLD_TX GENMASK(15, 0)
+#define HISI_I2C_SDA_HOLD_RX GENMASK(23, 16)
+#define HISI_I2C_FS_SPK_LEN 0x0038
+#define HISI_I2C_FS_SPK_LEN_CNT GENMASK(7, 0)
+#define HISI_I2C_HS_SPK_LEN 0x003c
+#define HISI_I2C_HS_SPK_LEN_CNT GENMASK(7, 0)
+#define HISI_I2C_INT_MSTAT 0x0044
+#define HISI_I2C_INT_CLR 0x0048
+#define HISI_I2C_INT_MASK 0x004C
+#define HISI_I2C_TRANS_STATE 0x0050
+#define HISI_I2C_TRANS_ERR 0x0054
+#define HISI_I2C_VERSION 0x0058
+
+#define HISI_I2C_INT_ALL GENMASK(4, 0)
+#define HISI_I2C_INT_TRANS_CPLT BIT(0)
+#define HISI_I2C_INT_TRANS_ERR BIT(1)
+#define HISI_I2C_INT_FIFO_ERR BIT(2)
+#define HISI_I2C_INT_RX_FULL BIT(3)
+#define HISI_I2C_INT_TX_EMPTY BIT(4)
+#define HISI_I2C_INT_ERR \
+ (HISI_I2C_INT_TRANS_ERR | HISI_I2C_INT_FIFO_ERR)
+
+#define HISI_I2C_STD_SPEED_MODE 0
+#define HISI_I2C_FAST_SPEED_MODE 1
+#define HISI_I2C_HIGH_SPEED_MODE 2
+
+#define HISI_I2C_TX_FIFO_DEPTH 64
+#define HISI_I2C_RX_FIFO_DEPTH 64
+#define HISI_I2C_TX_F_AE_THRESH 1
+#define HISI_I2C_RX_F_AF_THRESH 60
+
+#define HZ_PER_KHZ 1000
+
+#define NSEC_TO_CYCLES(ns, clk_rate_khz) \
+ DIV_ROUND_UP_ULL((clk_rate_khz) * (ns), NSEC_PER_MSEC)
+
+struct hisi_i2c_controller {
+ struct i2c_adapter adapter;
+ void __iomem *iobase;
+ struct device *dev;
+ int irq;
+
+ /* Intermediates for recording the transfer process */
+ struct completion *completion;
+ struct i2c_msg *msgs;
+ int msg_num;
+ int msg_tx_idx;
+ int buf_tx_idx;
+ int msg_rx_idx;
+ int buf_rx_idx;
+ u16 tar_addr;
+ u32 xfer_err;
+
+ /* I2C bus configuration */
+ struct i2c_timings t;
+ u32 clk_rate_khz;
+ u32 spk_len;
+};
+
+static void hisi_i2c_enable_int(struct hisi_i2c_controller *ctlr, u32 mask)
+{
+ writel_relaxed(mask, ctlr->iobase + HISI_I2C_INT_MASK);
+}
+
+static void hisi_i2c_disable_int(struct hisi_i2c_controller *ctlr, u32 mask)
+{
+ writel_relaxed((~mask) & HISI_I2C_INT_ALL, ctlr->iobase + HISI_I2C_INT_MASK);
+}
+
+static void hisi_i2c_clear_int(struct hisi_i2c_controller *ctlr, u32 mask)
+{
+ writel_relaxed(mask, ctlr->iobase + HISI_I2C_INT_CLR);
+}
+
+static void hisi_i2c_handle_errors(struct hisi_i2c_controller *ctlr)
+{
+ u32 int_err = ctlr->xfer_err, reg;
+
+ if (int_err & HISI_I2C_INT_FIFO_ERR) {
+ reg = readl(ctlr->iobase + HISI_I2C_FIFO_STATE);
+
+ if (reg & HISI_I2C_FIFO_STATE_RX_RERR)
+ dev_err(ctlr->dev, "rx fifo error read\n");
+
+ if (reg & HISI_I2C_FIFO_STATE_RX_WERR)
+ dev_err(ctlr->dev, "rx fifo error write\n");
+
+ if (reg & HISI_I2C_FIFO_STATE_TX_RERR)
+ dev_err(ctlr->dev, "tx fifo error read\n");
+
+ if (reg & HISI_I2C_FIFO_STATE_TX_WERR)
+ dev_err(ctlr->dev, "tx fifo error write\n");
+ }
+}
+
+static int hisi_i2c_start_xfer(struct hisi_i2c_controller *ctlr)
+{
+ struct i2c_msg *msg = ctlr->msgs;
+ u32 reg;
+
+ reg = readl(ctlr->iobase + HISI_I2C_FRAME_CTRL);
+ reg &= ~HISI_I2C_FRAME_CTRL_ADDR_TEN;
+ if (msg->flags & I2C_M_TEN)
+ reg |= HISI_I2C_FRAME_CTRL_ADDR_TEN;
+ writel(reg, ctlr->iobase + HISI_I2C_FRAME_CTRL);
+
+ reg = readl(ctlr->iobase + HISI_I2C_SLV_ADDR);
+ reg &= ~HISI_I2C_SLV_ADDR_VAL;
+ reg |= FIELD_PREP(HISI_I2C_SLV_ADDR_VAL, msg->addr);
+ writel(reg, ctlr->iobase + HISI_I2C_SLV_ADDR);
+
+ reg = readl(ctlr->iobase + HISI_I2C_FIFO_CTRL);
+ reg |= HISI_I2C_FIFO_RX_CLR | HISI_I2C_FIFO_TX_CLR;
+ writel(reg, ctlr->iobase + HISI_I2C_FIFO_CTRL);
+ reg &= ~(HISI_I2C_FIFO_RX_CLR | HISI_I2C_FIFO_TX_CLR);
+ writel(reg, ctlr->iobase + HISI_I2C_FIFO_CTRL);
+
+ hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL);
+ hisi_i2c_enable_int(ctlr, HISI_I2C_INT_ALL);
+
+ return 0;
+}
+
+static void hisi_i2c_reset_xfer(struct hisi_i2c_controller *ctlr)
+{
+ ctlr->msg_num = 0;
+ ctlr->xfer_err = 0;
+ ctlr->msg_tx_idx = 0;
+ ctlr->msg_rx_idx = 0;
+ ctlr->buf_tx_idx = 0;
+ ctlr->buf_rx_idx = 0;
+}
+
+/*
+ * Initialize the transfer information and start the I2C bus transfer.
+ * We only configure the transfer and do some pre/post works here, and
+ * wait for the transfer done. The major transfer process is performed
+ * in the IRQ handler.
+ */
+static int hisi_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct hisi_i2c_controller *ctlr = i2c_get_adapdata(adap);
+ DECLARE_COMPLETION_ONSTACK(done);
+ int ret = num;
+
+ hisi_i2c_reset_xfer(ctlr);
+ ctlr->completion = &done;
+ ctlr->msg_num = num;
+ ctlr->msgs = msgs;
+
+ hisi_i2c_start_xfer(ctlr);
+
+ if (!wait_for_completion_timeout(ctlr->completion, adap->timeout)) {
+ hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
+ synchronize_irq(ctlr->irq);
+ i2c_recover_bus(&ctlr->adapter);
+ dev_err(ctlr->dev, "bus transfer timeout\n");
+ ret = -EIO;
+ }
+
+ if (ctlr->xfer_err) {
+ hisi_i2c_handle_errors(ctlr);
+ ret = -EIO;
+ }
+
+ hisi_i2c_reset_xfer(ctlr);
+ ctlr->completion = NULL;
+
+ return ret;
+}
+
+static u32 hisi_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm hisi_i2c_algo = {
+ .master_xfer = hisi_i2c_master_xfer,
+ .functionality = hisi_i2c_functionality,
+};
+
+static int hisi_i2c_read_rx_fifo(struct hisi_i2c_controller *ctlr)
+{
+ struct i2c_msg *cur_msg;
+ u32 fifo_state;
+
+ while (ctlr->msg_rx_idx < ctlr->msg_num) {
+ cur_msg = ctlr->msgs + ctlr->msg_rx_idx;
+
+ if (!(cur_msg->flags & I2C_M_RD)) {
+ ctlr->msg_rx_idx++;
+ continue;
+ }
+
+ fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE);
+ while (!(fifo_state & HISI_I2C_FIFO_STATE_RX_EMPTY) &&
+ ctlr->buf_rx_idx < cur_msg->len) {
+ cur_msg->buf[ctlr->buf_rx_idx++] = readl(ctlr->iobase + HISI_I2C_RXDATA);
+ fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE);
+ }
+
+ if (ctlr->buf_rx_idx == cur_msg->len) {
+ ctlr->buf_rx_idx = 0;
+ ctlr->msg_rx_idx++;
+ }
+
+ if (fifo_state & HISI_I2C_FIFO_STATE_RX_EMPTY)
+ break;
+ }
+
+ return 0;
+}
+
+static void hisi_i2c_xfer_msg(struct hisi_i2c_controller *ctlr)
+{
+ int max_write = HISI_I2C_TX_FIFO_DEPTH;
+ bool need_restart = false, last_msg;
+ struct i2c_msg *cur_msg;
+ u32 cmd, fifo_state;
+
+ while (ctlr->msg_tx_idx < ctlr->msg_num) {
+ cur_msg = ctlr->msgs + ctlr->msg_tx_idx;
+ last_msg = (ctlr->msg_tx_idx == ctlr->msg_num - 1);
+
+ /* Signal the SR bit when we start transferring a new message */
+ if (ctlr->msg_tx_idx && !ctlr->buf_tx_idx)
+ need_restart = true;
+
+ fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE);
+ while (!(fifo_state & HISI_I2C_FIFO_STATE_TX_FULL) &&
+ ctlr->buf_tx_idx < cur_msg->len && max_write) {
+ cmd = 0;
+
+ if (need_restart) {
+ cmd |= HISI_I2C_CMD_TXDATA_SR_EN;
+ need_restart = false;
+ }
+
+ /* Signal the STOP bit at the last frame of the last message */
+ if (ctlr->buf_tx_idx == cur_msg->len - 1 && last_msg)
+ cmd |= HISI_I2C_CMD_TXDATA_P_EN;
+
+ if (cur_msg->flags & I2C_M_RD)
+ cmd |= HISI_I2C_CMD_TXDATA_RW;
+ else
+ cmd |= FIELD_PREP(HISI_I2C_CMD_TXDATA_DATA,
+ cur_msg->buf[ctlr->buf_tx_idx]);
+
+ writel(cmd, ctlr->iobase + HISI_I2C_CMD_TXDATA);
+ ctlr->buf_tx_idx++;
+ max_write--;
+
+ fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE);
+ }
+
+ /* Update the transfer index after per message transfer is done. */
+ if (ctlr->buf_tx_idx == cur_msg->len) {
+ ctlr->buf_tx_idx = 0;
+ ctlr->msg_tx_idx++;
+ }
+
+ if ((fifo_state & HISI_I2C_FIFO_STATE_TX_FULL) ||
+ max_write == 0)
+ break;
+ }
+}
+
+static irqreturn_t hisi_i2c_irq(int irq, void *context)
+{
+ struct hisi_i2c_controller *ctlr = context;
+ u32 int_stat;
+
+ int_stat = readl(ctlr->iobase + HISI_I2C_INT_MSTAT);
+ hisi_i2c_clear_int(ctlr, int_stat);
+ if (!(int_stat & HISI_I2C_INT_ALL))
+ return IRQ_NONE;
+
+ if (int_stat & HISI_I2C_INT_TX_EMPTY)
+ hisi_i2c_xfer_msg(ctlr);
+
+ if (int_stat & HISI_I2C_INT_ERR) {
+ ctlr->xfer_err = int_stat;
+ goto out;
+ }
+
+ /* Drain the rx fifo before finish the transfer */
+ if (int_stat & (HISI_I2C_INT_TRANS_CPLT | HISI_I2C_INT_RX_FULL))
+ hisi_i2c_read_rx_fifo(ctlr);
+
+out:
+ if (int_stat & HISI_I2C_INT_TRANS_CPLT || ctlr->xfer_err) {
+ hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
+ hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL);
+ complete(ctlr->completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Helper function for calculating and configuring the HIGH and LOW
+ * periods of SCL clock. The caller will pass the ratio of the
+ * counts (divide / divisor) according to the target speed mode,
+ * and the target registers.
+ */
+static void hisi_i2c_set_scl(struct hisi_i2c_controller *ctlr,
+ u32 divide, u32 divisor,
+ u32 reg_hcnt, u32 reg_lcnt)
+{
+ u32 total_cnt, t_scl_hcnt, t_scl_lcnt, scl_fall_cnt, scl_rise_cnt;
+ u32 scl_hcnt, scl_lcnt;
+
+ /* Total SCL clock cycles per speed period */
+ total_cnt = DIV_ROUND_UP_ULL(ctlr->clk_rate_khz * HZ_PER_KHZ, ctlr->t.bus_freq_hz);
+ /* Total HIGH level SCL clock cycles including edges */
+ t_scl_hcnt = DIV_ROUND_UP_ULL(total_cnt * divide, divisor);
+ /* Total LOW level SCL clock cycles including edges */
+ t_scl_lcnt = total_cnt - t_scl_hcnt;
+ /* Fall edge SCL clock cycles */
+ scl_fall_cnt = NSEC_TO_CYCLES(ctlr->t.scl_fall_ns, ctlr->clk_rate_khz);
+ /* Rise edge SCL clock cycles */
+ scl_rise_cnt = NSEC_TO_CYCLES(ctlr->t.scl_rise_ns, ctlr->clk_rate_khz);
+
+ /* Calculated HIGH and LOW periods of SCL clock */
+ scl_hcnt = t_scl_hcnt - ctlr->spk_len - 7 - scl_fall_cnt;
+ scl_lcnt = t_scl_lcnt - 1 - scl_rise_cnt;
+
+ writel(scl_hcnt, ctlr->iobase + reg_hcnt);
+ writel(scl_lcnt, ctlr->iobase + reg_lcnt);
+}
+
+static void hisi_i2c_configure_bus(struct hisi_i2c_controller *ctlr)
+{
+ u32 reg, sda_hold_cnt, speed_mode;
+
+ i2c_parse_fw_timings(ctlr->dev, &ctlr->t, true);
+ ctlr->spk_len = NSEC_TO_CYCLES(ctlr->t.digital_filter_width_ns, ctlr->clk_rate_khz);
+
+ switch (ctlr->t.bus_freq_hz) {
+ case I2C_MAX_FAST_MODE_FREQ:
+ speed_mode = HISI_I2C_FAST_SPEED_MODE;
+ hisi_i2c_set_scl(ctlr, 26, 76, HISI_I2C_FS_SCL_HCNT, HISI_I2C_FS_SCL_LCNT);
+ break;
+ case I2C_MAX_HIGH_SPEED_MODE_FREQ:
+ speed_mode = HISI_I2C_HIGH_SPEED_MODE;
+ hisi_i2c_set_scl(ctlr, 6, 22, HISI_I2C_HS_SCL_HCNT, HISI_I2C_HS_SCL_LCNT);
+ break;
+ case I2C_MAX_STANDARD_MODE_FREQ:
+ default:
+ speed_mode = HISI_I2C_STD_SPEED_MODE;
+
+ /* For default condition force the bus speed to standard mode. */
+ ctlr->t.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
+ hisi_i2c_set_scl(ctlr, 40, 87, HISI_I2C_SS_SCL_HCNT, HISI_I2C_SS_SCL_LCNT);
+ break;
+ }
+
+ reg = readl(ctlr->iobase + HISI_I2C_FRAME_CTRL);
+ reg &= ~HISI_I2C_FRAME_CTRL_SPEED_MODE;
+ reg |= FIELD_PREP(HISI_I2C_FRAME_CTRL_SPEED_MODE, speed_mode);
+ writel(reg, ctlr->iobase + HISI_I2C_FRAME_CTRL);
+
+ sda_hold_cnt = NSEC_TO_CYCLES(ctlr->t.sda_hold_ns, ctlr->clk_rate_khz);
+
+ reg = FIELD_PREP(HISI_I2C_SDA_HOLD_TX, sda_hold_cnt);
+ writel(reg, ctlr->iobase + HISI_I2C_SDA_HOLD);
+
+ writel(ctlr->spk_len, ctlr->iobase + HISI_I2C_FS_SPK_LEN);
+
+ reg = FIELD_PREP(HISI_I2C_FIFO_RX_AF_THRESH, HISI_I2C_RX_F_AF_THRESH);
+ reg |= FIELD_PREP(HISI_I2C_FIFO_TX_AE_THRESH, HISI_I2C_TX_F_AE_THRESH);
+ writel(reg, ctlr->iobase + HISI_I2C_FIFO_CTRL);
+}
+
+static int hisi_i2c_probe(struct platform_device *pdev)
+{
+ struct hisi_i2c_controller *ctlr;
+ struct device *dev = &pdev->dev;
+ struct i2c_adapter *adapter;
+ u64 clk_rate_hz;
+ u32 hw_version;
+ int ret;
+
+ ctlr = devm_kzalloc(dev, sizeof(*ctlr), GFP_KERNEL);
+ if (!ctlr)
+ return -ENOMEM;
+
+ ctlr->iobase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctlr->iobase))
+ return PTR_ERR(ctlr->iobase);
+
+ ctlr->irq = platform_get_irq(pdev, 0);
+ if (ctlr->irq < 0)
+ return ctlr->irq;
+
+ ctlr->dev = dev;
+
+ hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
+
+ ret = devm_request_irq(dev, ctlr->irq, hisi_i2c_irq, 0, "hisi-i2c", ctlr);
+ if (ret) {
+ dev_err(dev, "failed to request irq handler, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = device_property_read_u64(dev, "clk_rate", &clk_rate_hz);
+ if (ret) {
+ dev_err(dev, "failed to get clock frequency, ret = %d\n", ret);
+ return ret;
+ }
+
+ ctlr->clk_rate_khz = DIV_ROUND_UP_ULL(clk_rate_hz, HZ_PER_KHZ);
+
+ hisi_i2c_configure_bus(ctlr);
+
+ adapter = &ctlr->adapter;
+ snprintf(adapter->name, sizeof(adapter->name),
+ "HiSilicon I2C Controller %s", dev_name(dev));
+ adapter->owner = THIS_MODULE;
+ adapter->algo = &hisi_i2c_algo;
+ adapter->dev.parent = dev;
+ i2c_set_adapdata(adapter, ctlr);
+
+ ret = devm_i2c_add_adapter(dev, adapter);
+ if (ret)
+ return ret;
+
+ hw_version = readl(ctlr->iobase + HISI_I2C_VERSION);
+ dev_info(ctlr->dev, "speed mode is %s. hw version 0x%x\n",
+ i2c_freq_mode_string(ctlr->t.bus_freq_hz), hw_version);
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_i2c_acpi_ids[] = {
+ { "HISI03D1", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_i2c_acpi_ids);
+
+static struct platform_driver hisi_i2c_driver = {
+ .probe = hisi_i2c_probe,
+ .driver = {
+ .name = "hisi-i2c",
+ .acpi_match_table = hisi_i2c_acpi_ids,
+ },
+};
+module_platform_driver(hisi_i2c_driver);
+
+MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
+MODULE_DESCRIPTION("HiSilicon I2C Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 4acee6f9e5a3..99d446763530 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -73,6 +73,7 @@
* Comet Lake-V (PCH) 0xa3a3 32 hard yes yes yes
* Alder Lake-S (PCH) 0x7aa3 32 hard yes yes yes
* Alder Lake-P (PCH) 0x51a3 32 hard yes yes yes
+ * Alder Lake-M (PCH) 0x54a3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -230,6 +231,7 @@
#define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS 0x4b23
#define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
@@ -1087,6 +1089,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS) },
{ 0, }
};
@@ -1771,6 +1774,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_EBG_SMBUS:
case PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS:
case PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS:
+ case PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS:
priv->features |= FEATURE_BLOCK_PROC;
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c
index 66c9923fc766..c8c422e9dda4 100644
--- a/drivers/i2c/busses/i2c-icy.c
+++ b/drivers/i2c/busses/i2c-icy.c
@@ -54,7 +54,6 @@ struct icy_i2c {
void __iomem *reg_s0;
void __iomem *reg_s1;
- struct fwnode_handle *ltc2990_fwnode;
struct i2c_client *ltc2990_client;
};
@@ -115,6 +114,10 @@ static const struct property_entry icy_ltc2990_props[] = {
{ }
};
+static const struct software_node icy_ltc2990_node = {
+ .properties = icy_ltc2990_props,
+};
+
static int icy_probe(struct zorro_dev *z,
const struct zorro_device_id *ent)
{
@@ -123,6 +126,7 @@ static int icy_probe(struct zorro_dev *z,
struct fwnode_handle *new_fwnode;
struct i2c_board_info ltc2990_info = {
.type = "ltc2990",
+ .swnode = &icy_ltc2990_node,
};
i2c = devm_kzalloc(&z->dev, sizeof(*i2c), GFP_KERNEL);
@@ -174,26 +178,10 @@ static int icy_probe(struct zorro_dev *z,
*
* See property_entry above for in1, in2, temp3.
*/
- new_fwnode = fwnode_create_software_node(icy_ltc2990_props, NULL);
- if (IS_ERR(new_fwnode)) {
- dev_info(&z->dev, "Failed to create fwnode for LTC2990, error: %ld\n",
- PTR_ERR(new_fwnode));
- } else {
- /*
- * Store the fwnode so we can destroy it on .remove().
- * Only store it on success, as fwnode_remove_software_node()
- * is NULL safe, but not PTR_ERR safe.
- */
- i2c->ltc2990_fwnode = new_fwnode;
- ltc2990_info.fwnode = new_fwnode;
-
- i2c->ltc2990_client =
- i2c_new_scanned_device(&i2c->adapter,
- &ltc2990_info,
- icy_ltc2990_addresses,
- NULL);
- }
-
+ i2c->ltc2990_client = i2c_new_scanned_device(&i2c->adapter,
+ &ltc2990_info,
+ icy_ltc2990_addresses,
+ NULL);
return 0;
}
@@ -202,8 +190,6 @@ static void icy_remove(struct zorro_dev *z)
struct icy_i2c *i2c = dev_get_drvdata(&z->dev);
i2c_unregister_device(i2c->ltc2990_client);
- fwnode_remove_software_node(i2c->ltc2990_fwnode);
-
i2c_del_adapter(&i2c->adapter);
}
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 98a89301ed2a..8e987945ed45 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -1057,7 +1057,7 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
atomic = true;
}
- ret = pm_runtime_get_sync(adap->dev.parent);
+ ret = pm_runtime_resume_and_get(adap->dev.parent);
if (ret < 0)
return ret;
@@ -1158,7 +1158,7 @@ static int img_i2c_init(struct img_i2c *i2c)
u32 rev;
int ret;
- ret = pm_runtime_get_sync(i2c->adap.dev.parent);
+ ret = pm_runtime_resume_and_get(i2c->adap.dev.parent);
if (ret < 0)
return ret;
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 9db6ccded5e9..8b9ba055c418 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -259,7 +259,7 @@ static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
unsigned int temp;
int ret;
- ret = pm_runtime_get_sync(lpi2c_imx->adapter.dev.parent);
+ ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
if (ret < 0)
return ret;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index b80fdc1f0092..dc5ca71906db 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -801,7 +801,7 @@ static int i2c_imx_reg_slave(struct i2c_client *client)
i2c_imx->last_slave_event = I2C_SLAVE_STOP;
/* Resume */
- ret = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
+ ret = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
if (ret < 0) {
dev_err(&i2c_imx->adapter.dev, "failed to resume i2c controller");
return ret;
@@ -1253,7 +1253,7 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
int result;
- result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
+ result = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
if (result < 0)
return result;
@@ -1496,7 +1496,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
int irq, ret;
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 2f8b8050a223..cfecaf18ccbb 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -125,10 +125,12 @@ iop3xx_i2c_error(u32 sr)
int rc = 0;
if ((sr & IOP3XX_ISR_BERRD)) {
- if ( !rc ) rc = -I2C_ERR_BERR;
+ if (!rc)
+ rc = -I2C_ERR_BERR;
}
if ((sr & IOP3XX_ISR_ALD)) {
- if ( !rc ) rc = -I2C_ERR_ALD;
+ if (!rc)
+ rc = -I2C_ERR_ALD;
}
return rc;
}
@@ -151,12 +153,12 @@ iop3xx_i2c_get_srstat(struct i2c_algo_iop3xx_data *iop3xx_adap)
* sleep until interrupted, then recover and analyse the SR
* saved by handler
*/
-typedef int (* compare_func)(unsigned test, unsigned mask);
+typedef int (*compare_func)(unsigned test, unsigned mask);
/* returns 1 on correct comparison */
static int
iop3xx_i2c_wait_event(struct i2c_algo_iop3xx_data *iop3xx_adap,
- unsigned flags, unsigned* status,
+ unsigned flags, unsigned *status,
compare_func compare)
{
unsigned sr = 0;
@@ -167,7 +169,7 @@ iop3xx_i2c_wait_event(struct i2c_algo_iop3xx_data *iop3xx_adap,
do {
interrupted = wait_event_interruptible_timeout (
iop3xx_adap->waitq,
- (done = compare( sr = iop3xx_i2c_get_srstat(iop3xx_adap) ,flags )),
+ (done = compare(sr = iop3xx_i2c_get_srstat(iop3xx_adap), flags)),
1 * HZ
);
if ((rc = iop3xx_i2c_error(sr)) < 0) {
@@ -177,7 +179,7 @@ iop3xx_i2c_wait_event(struct i2c_algo_iop3xx_data *iop3xx_adap,
*status = sr;
return -ETIMEDOUT;
}
- } while(!done);
+ } while (!done);
*status = sr;
@@ -204,7 +206,7 @@ iop3xx_i2c_wait_tx_done(struct i2c_algo_iop3xx_data *iop3xx_adap, int *status)
{
return iop3xx_i2c_wait_event(
iop3xx_adap,
- IOP3XX_ISR_TXEMPTY | IOP3XX_ISR_ALD | IOP3XX_ISR_BERRD,
+ IOP3XX_ISR_TXEMPTY | IOP3XX_ISR_ALD | IOP3XX_ISR_BERRD,
status, any_bits_set);
}
@@ -226,7 +228,7 @@ iop3xx_i2c_wait_idle(struct i2c_algo_iop3xx_data *iop3xx_adap, int *status)
static int
iop3xx_i2c_send_target_addr(struct i2c_algo_iop3xx_data *iop3xx_adap,
- struct i2c_msg* msg)
+ struct i2c_msg *msg)
{
unsigned long cr = __raw_readl(iop3xx_adap->ioaddr + CR_OFFSET);
int status;
@@ -273,7 +275,7 @@ iop3xx_i2c_write_byte(struct i2c_algo_iop3xx_data *iop3xx_adap, char byte,
}
static int
-iop3xx_i2c_read_byte(struct i2c_algo_iop3xx_data *iop3xx_adap, char* byte,
+iop3xx_i2c_read_byte(struct i2c_algo_iop3xx_data *iop3xx_adap, char *byte,
int stop)
{
unsigned long cr = __raw_readl(iop3xx_adap->ioaddr + CR_OFFSET);
@@ -305,7 +307,7 @@ iop3xx_i2c_writebytes(struct i2c_adapter *i2c_adap, const char *buf, int count)
int rc = 0;
for (ii = 0; rc == 0 && ii != count; ++ii)
- rc = iop3xx_i2c_write_byte(iop3xx_adap, buf[ii], ii==count-1);
+ rc = iop3xx_i2c_write_byte(iop3xx_adap, buf[ii], ii == count-1);
return rc;
}
@@ -317,7 +319,7 @@ iop3xx_i2c_readbytes(struct i2c_adapter *i2c_adap, char *buf, int count)
int rc = 0;
for (ii = 0; rc == 0 && ii != count; ++ii)
- rc = iop3xx_i2c_read_byte(iop3xx_adap, &buf[ii], ii==count-1);
+ rc = iop3xx_i2c_read_byte(iop3xx_adap, &buf[ii], ii == count-1);
return rc;
}
@@ -330,7 +332,7 @@ iop3xx_i2c_readbytes(struct i2c_adapter *i2c_adap, char *buf, int count)
* condition.
*/
static int
-iop3xx_i2c_handle_msg(struct i2c_adapter *i2c_adap, struct i2c_msg* pmsg)
+iop3xx_i2c_handle_msg(struct i2c_adapter *i2c_adap, struct i2c_msg *pmsg)
{
struct i2c_algo_iop3xx_data *iop3xx_adap = i2c_adap->algo_data;
int rc;
@@ -369,7 +371,7 @@ iop3xx_i2c_master_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
iop3xx_i2c_transaction_cleanup(iop3xx_adap);
- if(ret)
+ if (ret)
return ret;
return im;
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 55177eb21d7b..baa7319eee53 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -825,7 +825,10 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
- i2c->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err;
+ i2c->irq = ret;
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
if (ret)
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index 2fb0532d8a16..8716032f030a 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -172,12 +172,6 @@
#define MLXBF_I2C_SMBUS_THIGH_MAX_TBUF 0x14
#define MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT 0x18
-enum {
- MLXBF_I2C_TIMING_100KHZ = 100000,
- MLXBF_I2C_TIMING_400KHZ = 400000,
- MLXBF_I2C_TIMING_1000KHZ = 1000000,
-};
-
/*
* Defines SMBus operating frequency and core clock frequency.
* According to ADB files, default values are compliant to 100KHz SMBus
@@ -1202,7 +1196,7 @@ static int mlxbf_i2c_init_timings(struct platform_device *pdev,
ret = device_property_read_u32(dev, "clock-frequency", &config_khz);
if (ret < 0)
- config_khz = MLXBF_I2C_TIMING_100KHZ;
+ config_khz = I2C_MAX_STANDARD_MODE_FREQ;
switch (config_khz) {
default:
@@ -1210,15 +1204,15 @@ static int mlxbf_i2c_init_timings(struct platform_device *pdev,
pr_warn("Illegal value %d: defaulting to 100 KHz\n",
config_khz);
fallthrough;
- case MLXBF_I2C_TIMING_100KHZ:
+ case I2C_MAX_STANDARD_MODE_FREQ:
config_idx = MLXBF_I2C_TIMING_CONFIG_100KHZ;
break;
- case MLXBF_I2C_TIMING_400KHZ:
+ case I2C_MAX_FAST_MODE_FREQ:
config_idx = MLXBF_I2C_TIMING_CONFIG_400KHZ;
break;
- case MLXBF_I2C_TIMING_1000KHZ:
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
config_idx = MLXBF_I2C_TIMING_CONFIG_1000KHZ;
break;
}
@@ -2376,6 +2370,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
mlxbf_i2c_init_slave(pdev, priv);
irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
ret = devm_request_irq(dev, irq, mlxbf_smbus_irq,
IRQF_ONESHOT | IRQF_SHARED | IRQF_PROBE_SHARED,
dev_name(dev), priv);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index d94f05c8b8b7..30d9e89a3db2 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -1,16 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * (C) Copyright 2003-2004
- * Humboldt Solutions Ltd, adrian@humboldt.co.uk.
-
* This is a combined i2c adapter and algorithm driver for the
* MPC107/Tsi107 PowerPC northbridge and processors that include
* the same I2C unit (8240, 8245, 85xx).
*
- * Release 0.8
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
+ * Copyright (C) 2003-2004 Humboldt Solutions Ltd, adrian@humboldt.co.uk
+ * Copyright (C) 2021 Allied Telesis Labs
*/
#include <linux/kernel.h>
@@ -19,6 +14,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/clk.h>
@@ -58,18 +54,50 @@
#define CSR_MIF 0x02
#define CSR_RXAK 0x01
+enum mpc_i2c_action {
+ MPC_I2C_ACTION_START = 1,
+ MPC_I2C_ACTION_RESTART,
+ MPC_I2C_ACTION_READ_BEGIN,
+ MPC_I2C_ACTION_READ_BYTE,
+ MPC_I2C_ACTION_WRITE,
+ MPC_I2C_ACTION_STOP,
+
+ __MPC_I2C_ACTION_CNT
+};
+
+static const char * const action_str[] = {
+ "invalid",
+ "start",
+ "restart",
+ "read begin",
+ "read",
+ "write",
+ "stop",
+};
+
+static_assert(ARRAY_SIZE(action_str) == __MPC_I2C_ACTION_CNT);
+
struct mpc_i2c {
struct device *dev;
void __iomem *base;
u32 interrupt;
- wait_queue_head_t queue;
+ wait_queue_head_t waitq;
+ spinlock_t lock;
struct i2c_adapter adap;
int irq;
u32 real_clk;
-#ifdef CONFIG_PM_SLEEP
u8 fdr, dfsrr;
-#endif
struct clk *clk_per;
+ u32 cntl_bits;
+ enum mpc_i2c_action action;
+ struct i2c_msg *msgs;
+ int num_msgs;
+ int curr_msg;
+ u32 byte_posn;
+ u32 block;
+ int rc;
+ int expect_rxack;
+
};
struct mpc_i2c_divider {
@@ -86,19 +114,6 @@ static inline void writeccr(struct mpc_i2c *i2c, u32 x)
writeb(x, i2c->base + MPC_I2C_CR);
}
-static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
-{
- struct mpc_i2c *i2c = dev_id;
- if (readb(i2c->base + MPC_I2C_SR) & CSR_MIF) {
- /* Read again to allow register to stabilise */
- i2c->interrupt = readb(i2c->base + MPC_I2C_SR);
- writeb(0, i2c->base + MPC_I2C_SR);
- wake_up(&i2c->queue);
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
-}
-
/* Sometimes 9th clock pulse isn't generated, and slave doesn't release
* the bus, because it wants to send ACK.
* Following sequence of enabling/disabling and sending start/stop generates
@@ -121,61 +136,6 @@ static void mpc_i2c_fixup(struct mpc_i2c *i2c)
}
}
-static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
-{
- unsigned long orig_jiffies = jiffies;
- u32 cmd_err;
- int result = 0;
-
- if (!i2c->irq) {
- while (!(readb(i2c->base + MPC_I2C_SR) & CSR_MIF)) {
- schedule();
- if (time_after(jiffies, orig_jiffies + timeout)) {
- dev_dbg(i2c->dev, "timeout\n");
- writeccr(i2c, 0);
- result = -ETIMEDOUT;
- break;
- }
- }
- cmd_err = readb(i2c->base + MPC_I2C_SR);
- writeb(0, i2c->base + MPC_I2C_SR);
- } else {
- /* Interrupt mode */
- result = wait_event_timeout(i2c->queue,
- (i2c->interrupt & CSR_MIF), timeout);
-
- if (unlikely(!(i2c->interrupt & CSR_MIF))) {
- dev_dbg(i2c->dev, "wait timeout\n");
- writeccr(i2c, 0);
- result = -ETIMEDOUT;
- }
-
- cmd_err = i2c->interrupt;
- i2c->interrupt = 0;
- }
-
- if (result < 0)
- return result;
-
- if (!(cmd_err & CSR_MCF)) {
- dev_dbg(i2c->dev, "unfinished\n");
- return -EIO;
- }
-
- if (cmd_err & CSR_MAL) {
- dev_dbg(i2c->dev, "MAL\n");
- return -EAGAIN;
- }
-
- if (writing && (cmd_err & CSR_RXAK)) {
- dev_dbg(i2c->dev, "No RXAK\n");
- /* generate stop */
- writeccr(i2c, CCR_MEN);
- return -ENXIO;
- }
- return 0;
-}
-
#if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x)
static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
{20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23},
@@ -415,7 +375,7 @@ static int mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
}
*real_clk = fsl_get_sys_freq() / prescaler / div->divider;
- return div ? (int)div->fdr : -EINVAL;
+ return (int)div->fdr;
}
static void mpc_i2c_setup_8xxx(struct device_node *node,
@@ -450,168 +410,209 @@ static void mpc_i2c_setup_8xxx(struct device_node *node,
}
#endif /* CONFIG_FSL_SOC */
-static void mpc_i2c_start(struct mpc_i2c *i2c)
+static void mpc_i2c_finish(struct mpc_i2c *i2c, int rc)
{
- /* Clear arbitration */
- writeb(0, i2c->base + MPC_I2C_SR);
- /* Start with MEN */
- writeccr(i2c, CCR_MEN);
+ i2c->rc = rc;
+ i2c->block = 0;
+ i2c->cntl_bits = CCR_MEN;
+ writeccr(i2c, i2c->cntl_bits);
+ wake_up(&i2c->waitq);
}
-static void mpc_i2c_stop(struct mpc_i2c *i2c)
+static void mpc_i2c_do_action(struct mpc_i2c *i2c)
{
- writeccr(i2c, CCR_MEN);
-}
+ struct i2c_msg *msg = &i2c->msgs[i2c->curr_msg];
+ int dir = 0;
+ int recv_len = 0;
+ u8 byte;
+
+ dev_dbg(i2c->dev, "action = %s\n", action_str[i2c->action]);
+
+ i2c->cntl_bits &= ~(CCR_RSTA | CCR_MTX | CCR_TXAK);
+
+ if (msg->flags & I2C_M_RD)
+ dir = 1;
+ if (msg->flags & I2C_M_RECV_LEN)
+ recv_len = 1;
+
+ switch (i2c->action) {
+ case MPC_I2C_ACTION_RESTART:
+ i2c->cntl_bits |= CCR_RSTA;
+ fallthrough;
+
+ case MPC_I2C_ACTION_START:
+ i2c->cntl_bits |= CCR_MSTA | CCR_MTX;
+ writeccr(i2c, i2c->cntl_bits);
+ writeb((msg->addr << 1) | dir, i2c->base + MPC_I2C_DR);
+ i2c->expect_rxack = 1;
+ i2c->action = dir ? MPC_I2C_ACTION_READ_BEGIN : MPC_I2C_ACTION_WRITE;
+ break;
+
+ case MPC_I2C_ACTION_READ_BEGIN:
+ if (msg->len) {
+ if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN))
+ i2c->cntl_bits |= CCR_TXAK;
+
+ writeccr(i2c, i2c->cntl_bits);
+ /* Dummy read */
+ readb(i2c->base + MPC_I2C_DR);
+ }
+ i2c->action = MPC_I2C_ACTION_READ_BYTE;
+ break;
+
+ case MPC_I2C_ACTION_READ_BYTE:
+ if (i2c->byte_posn || !recv_len) {
+ /* Generate Tx ACK on next to last byte */
+ if (i2c->byte_posn == msg->len - 2)
+ i2c->cntl_bits |= CCR_TXAK;
+ /* Do not generate stop on last byte */
+ if (i2c->byte_posn == msg->len - 1)
+ i2c->cntl_bits |= CCR_MTX;
-static int mpc_write(struct mpc_i2c *i2c, int target,
- const u8 *data, int length, int restart)
-{
- int i, result;
- unsigned timeout = i2c->adap.timeout;
- u32 flags = restart ? CCR_RSTA : 0;
+ writeccr(i2c, i2c->cntl_bits);
+ }
- /* Start as master */
- writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX | flags);
- /* Write target byte */
- writeb((target << 1), i2c->base + MPC_I2C_DR);
+ byte = readb(i2c->base + MPC_I2C_DR);
- result = i2c_wait(i2c, timeout, 1);
- if (result < 0)
- return result;
+ if (i2c->byte_posn == 0 && recv_len) {
+ if (byte == 0 || byte > I2C_SMBUS_BLOCK_MAX) {
+ mpc_i2c_finish(i2c, -EPROTO);
+ return;
+ }
+ msg->len += byte;
+ /*
+ * For block reads, generate Tx ACK here if data length
+ * is 1 byte (total length is 2 bytes).
+ */
+ if (msg->len == 2) {
+ i2c->cntl_bits |= CCR_TXAK;
+ writeccr(i2c, i2c->cntl_bits);
+ }
+ }
+
+ dev_dbg(i2c->dev, "%s %02x\n", action_str[i2c->action], byte);
+ msg->buf[i2c->byte_posn++] = byte;
+ break;
+
+ case MPC_I2C_ACTION_WRITE:
+ dev_dbg(i2c->dev, "%s %02x\n", action_str[i2c->action],
+ msg->buf[i2c->byte_posn]);
+ writeb(msg->buf[i2c->byte_posn++], i2c->base + MPC_I2C_DR);
+ i2c->expect_rxack = 1;
+ break;
- for (i = 0; i < length; i++) {
- /* Write data byte */
- writeb(data[i], i2c->base + MPC_I2C_DR);
+ case MPC_I2C_ACTION_STOP:
+ mpc_i2c_finish(i2c, 0);
+ break;
- result = i2c_wait(i2c, timeout, 1);
- if (result < 0)
- return result;
+ default:
+ WARN(1, "Unexpected action %d\n", i2c->action);
+ break;
}
- return 0;
+ if (msg->len == i2c->byte_posn) {
+ i2c->curr_msg++;
+ i2c->byte_posn = 0;
+
+ if (i2c->curr_msg == i2c->num_msgs) {
+ i2c->action = MPC_I2C_ACTION_STOP;
+ /*
+ * We don't get another interrupt on read so
+ * finish the transfer now
+ */
+ if (dir)
+ mpc_i2c_finish(i2c, 0);
+ } else {
+ i2c->action = MPC_I2C_ACTION_RESTART;
+ }
+ }
}
-static int mpc_read(struct mpc_i2c *i2c, int target,
- u8 *data, int length, int restart, bool recv_len)
+static void mpc_i2c_do_intr(struct mpc_i2c *i2c, u8 status)
{
- unsigned timeout = i2c->adap.timeout;
- int i, result;
- u32 flags = restart ? CCR_RSTA : 0;
+ spin_lock(&i2c->lock);
- /* Switch to read - restart */
- writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX | flags);
- /* Write target address byte - this time with the read flag set */
- writeb((target << 1) | 1, i2c->base + MPC_I2C_DR);
+ if (!(status & CSR_MCF)) {
+ dev_dbg(i2c->dev, "unfinished\n");
+ mpc_i2c_finish(i2c, -EIO);
+ goto out;
+ }
- result = i2c_wait(i2c, timeout, 1);
- if (result < 0)
- return result;
+ if (status & CSR_MAL) {
+ dev_dbg(i2c->dev, "arbitration lost\n");
+ mpc_i2c_finish(i2c, -EAGAIN);
+ goto out;
+ }
- if (length) {
- if (length == 1 && !recv_len)
- writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_TXAK);
- else
- writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA);
- /* Dummy read */
- readb(i2c->base + MPC_I2C_DR);
+ if (i2c->expect_rxack && (status & CSR_RXAK)) {
+ dev_dbg(i2c->dev, "no Rx ACK\n");
+ mpc_i2c_finish(i2c, -ENXIO);
+ goto out;
}
+ i2c->expect_rxack = 0;
- for (i = 0; i < length; i++) {
- u8 byte;
-
- result = i2c_wait(i2c, timeout, 0);
- if (result < 0)
- return result;
-
- /*
- * For block reads, we have to know the total length (1st byte)
- * before we can determine if we are done.
- */
- if (i || !recv_len) {
- /* Generate txack on next to last byte */
- if (i == length - 2)
- writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA
- | CCR_TXAK);
- /* Do not generate stop on last byte */
- if (i == length - 1)
- writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA
- | CCR_MTX);
- }
+ mpc_i2c_do_action(i2c);
- byte = readb(i2c->base + MPC_I2C_DR);
+out:
+ spin_unlock(&i2c->lock);
+}
- /*
- * Adjust length if first received byte is length.
- * The length is 1 length byte plus actually data length
- */
- if (i == 0 && recv_len) {
- if (byte == 0 || byte > I2C_SMBUS_BLOCK_MAX)
- return -EPROTO;
- length += byte;
- /*
- * For block reads, generate txack here if data length
- * is 1 byte (total length is 2 bytes).
- */
- if (length == 2)
- writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA
- | CCR_TXAK);
- }
- data[i] = byte;
+static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
+{
+ struct mpc_i2c *i2c = dev_id;
+ u8 status;
+
+ status = readb(i2c->base + MPC_I2C_SR);
+ if (status & CSR_MIF) {
+ writeb(0, i2c->base + MPC_I2C_SR);
+ mpc_i2c_do_intr(i2c, status);
+ return IRQ_HANDLED;
}
+ return IRQ_NONE;
+}
- return length;
+static int mpc_i2c_wait_for_completion(struct mpc_i2c *i2c)
+{
+ long time_left;
+
+ time_left = wait_event_timeout(i2c->waitq, !i2c->block, i2c->adap.timeout);
+ if (!time_left)
+ return -ETIMEDOUT;
+ if (time_left < 0)
+ return time_left;
+
+ return 0;
}
-static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+static int mpc_i2c_execute_msg(struct mpc_i2c *i2c)
{
- struct i2c_msg *pmsg;
- int i;
- int ret = 0;
- unsigned long orig_jiffies = jiffies;
- struct mpc_i2c *i2c = i2c_get_adapdata(adap);
+ unsigned long orig_jiffies;
+ unsigned long flags;
+ int ret;
- mpc_i2c_start(i2c);
+ spin_lock_irqsave(&i2c->lock, flags);
- /* Allow bus up to 1s to become not busy */
- while (readb(i2c->base + MPC_I2C_SR) & CSR_MBB) {
- if (signal_pending(current)) {
- dev_dbg(i2c->dev, "Interrupted\n");
- writeccr(i2c, 0);
- return -EINTR;
- }
- if (time_after(jiffies, orig_jiffies + HZ)) {
- u8 status = readb(i2c->base + MPC_I2C_SR);
+ i2c->curr_msg = 0;
+ i2c->rc = 0;
+ i2c->byte_posn = 0;
+ i2c->block = 1;
+ i2c->action = MPC_I2C_ACTION_START;
- dev_dbg(i2c->dev, "timeout\n");
- if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) {
- writeb(status & ~CSR_MAL,
- i2c->base + MPC_I2C_SR);
- mpc_i2c_fixup(i2c);
- }
- return -EIO;
- }
- schedule();
- }
+ i2c->cntl_bits = CCR_MEN | CCR_MIEN;
+ writeb(0, i2c->base + MPC_I2C_SR);
+ writeccr(i2c, i2c->cntl_bits);
+
+ mpc_i2c_do_action(i2c);
+
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+ ret = mpc_i2c_wait_for_completion(i2c);
+ if (ret)
+ i2c->rc = ret;
+
+ if (i2c->rc == -EIO || i2c->rc == -EAGAIN || i2c->rc == -ETIMEDOUT)
+ i2c_recover_bus(&i2c->adap);
- for (i = 0; ret >= 0 && i < num; i++) {
- pmsg = &msgs[i];
- dev_dbg(i2c->dev,
- "Doing %s %d bytes to 0x%02x - %d of %d messages\n",
- pmsg->flags & I2C_M_RD ? "read" : "write",
- pmsg->len, pmsg->addr, i + 1, num);
- if (pmsg->flags & I2C_M_RD) {
- bool recv_len = pmsg->flags & I2C_M_RECV_LEN;
-
- ret = mpc_read(i2c, pmsg->addr, pmsg->buf, pmsg->len, i,
- recv_len);
- if (recv_len && ret > 0)
- pmsg->len = ret;
- } else {
- ret =
- mpc_write(i2c, pmsg->addr, pmsg->buf, pmsg->len, i);
- }
- }
- mpc_i2c_stop(i2c); /* Initiate STOP */
orig_jiffies = jiffies;
/* Wait until STOP is seen, allow up to 1 s */
while (readb(i2c->base + MPC_I2C_SR) & CSR_MBB) {
@@ -622,13 +623,41 @@ static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) {
writeb(status & ~CSR_MAL,
i2c->base + MPC_I2C_SR);
- mpc_i2c_fixup(i2c);
+ i2c_recover_bus(&i2c->adap);
}
return -EIO;
}
cond_resched();
}
- return (ret < 0) ? ret : num;
+
+ return i2c->rc;
+}
+
+static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ int rc, ret = num;
+ struct mpc_i2c *i2c = i2c_get_adapdata(adap);
+ int i;
+
+ dev_dbg(i2c->dev, "num = %d\n", num);
+ for (i = 0; i < num; i++)
+ dev_dbg(i2c->dev, " addr = %02x, flags = %02x, len = %d, %*ph\n",
+ msgs[i].addr, msgs[i].flags, msgs[i].len,
+ msgs[i].flags & I2C_M_RD ? 0 : msgs[i].len,
+ msgs[i].buf);
+
+ WARN_ON(i2c->msgs != NULL);
+ i2c->msgs = msgs;
+ i2c->num_msgs = num;
+
+ rc = mpc_i2c_execute_msg(i2c);
+ if (rc < 0)
+ ret = rc;
+
+ i2c->num_msgs = 0;
+ i2c->msgs = NULL;
+
+ return ret;
}
static u32 mpc_functionality(struct i2c_adapter *adap)
@@ -637,6 +666,15 @@ static u32 mpc_functionality(struct i2c_adapter *adap)
| I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL;
}
+static int fsl_i2c_bus_recovery(struct i2c_adapter *adap)
+{
+ struct mpc_i2c *i2c = i2c_get_adapdata(adap);
+
+ mpc_i2c_fixup(i2c);
+
+ return 0;
+}
+
static const struct i2c_algorithm mpc_algo = {
.master_xfer = mpc_xfer,
.functionality = mpc_functionality,
@@ -648,63 +686,61 @@ static struct i2c_adapter mpc_ops = {
.timeout = HZ,
};
-static const struct of_device_id mpc_i2c_of_match[];
+static struct i2c_bus_recovery_info fsl_i2c_recovery_info = {
+ .recover_bus = fsl_i2c_bus_recovery,
+};
+
static int fsl_i2c_probe(struct platform_device *op)
{
- const struct of_device_id *match;
+ const struct mpc_i2c_data *data;
struct mpc_i2c *i2c;
const u32 *prop;
u32 clock = MPC_I2C_CLOCK_LEGACY;
int result = 0;
int plen;
- struct resource res;
struct clk *clk;
int err;
- match = of_match_device(mpc_i2c_of_match, &op->dev);
- if (!match)
- return -EINVAL;
-
- i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
+ i2c = devm_kzalloc(&op->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
return -ENOMEM;
i2c->dev = &op->dev; /* for debug and error output */
- init_waitqueue_head(&i2c->queue);
+ init_waitqueue_head(&i2c->waitq);
+ spin_lock_init(&i2c->lock);
- i2c->base = of_iomap(op->dev.of_node, 0);
- if (!i2c->base) {
- dev_err(i2c->dev, "failed to map controller\n");
- result = -ENOMEM;
- goto fail_map;
- }
+ i2c->base = devm_platform_ioremap_resource(op, 0);
+ if (IS_ERR(i2c->base))
+ return PTR_ERR(i2c->base);
- i2c->irq = irq_of_parse_and_map(op->dev.of_node, 0);
- if (i2c->irq) { /* no i2c->irq implies polling */
- result = request_irq(i2c->irq, mpc_i2c_isr,
- IRQF_SHARED, "i2c-mpc", i2c);
- if (result < 0) {
- dev_err(i2c->dev, "failed to attach interrupt\n");
- goto fail_request;
- }
+ i2c->irq = platform_get_irq(op, 0);
+ if (i2c->irq < 0)
+ return i2c->irq;
+
+ result = devm_request_irq(&op->dev, i2c->irq, mpc_i2c_isr,
+ IRQF_SHARED, "i2c-mpc", i2c);
+ if (result < 0) {
+ dev_err(i2c->dev, "failed to attach interrupt\n");
+ return result;
}
/*
* enable clock for the I2C peripheral (non fatal),
* keep a reference upon successful allocation
*/
- clk = devm_clk_get(&op->dev, NULL);
- if (!IS_ERR(clk)) {
- err = clk_prepare_enable(clk);
- if (err) {
- dev_err(&op->dev, "failed to enable clock\n");
- goto fail_request;
- } else {
- i2c->clk_per = clk;
- }
+ clk = devm_clk_get_optional(&op->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ err = clk_prepare_enable(clk);
+ if (err) {
+ dev_err(&op->dev, "failed to enable clock\n");
+ return err;
}
+ i2c->clk_per = clk;
+
if (of_property_read_bool(op->dev.of_node, "fsl,preserve-clocking")) {
clock = MPC_I2C_CLOCK_PRESERVE;
} else {
@@ -714,8 +750,8 @@ static int fsl_i2c_probe(struct platform_device *op)
clock = *prop;
}
- if (match->data) {
- const struct mpc_i2c_data *data = match->data;
+ data = device_get_match_data(&op->dev);
+ if (data) {
data->setup(op->dev.of_node, i2c, clock);
} else {
/* Backwards compatibility */
@@ -731,31 +767,25 @@ static int fsl_i2c_probe(struct platform_device *op)
}
dev_info(i2c->dev, "timeout %u us\n", mpc_ops.timeout * 1000000 / HZ);
- platform_set_drvdata(op, i2c);
-
i2c->adap = mpc_ops;
- of_address_to_resource(op->dev.of_node, 0, &res);
scnprintf(i2c->adap.name, sizeof(i2c->adap.name),
- "MPC adapter at 0x%llx", (unsigned long long)res.start);
- i2c_set_adapdata(&i2c->adap, i2c);
+ "MPC adapter (%s)", of_node_full_name(op->dev.of_node));
i2c->adap.dev.parent = &op->dev;
+ i2c->adap.nr = op->id;
i2c->adap.dev.of_node = of_node_get(op->dev.of_node);
+ i2c->adap.bus_recovery_info = &fsl_i2c_recovery_info;
+ platform_set_drvdata(op, i2c);
+ i2c_set_adapdata(&i2c->adap, i2c);
- result = i2c_add_adapter(&i2c->adap);
- if (result < 0)
+ result = i2c_add_numbered_adapter(&i2c->adap);
+ if (result)
goto fail_add;
- return result;
+ return 0;
fail_add:
- if (i2c->clk_per)
- clk_disable_unprepare(i2c->clk_per);
- free_irq(i2c->irq, i2c);
- fail_request:
- irq_dispose_mapping(i2c->irq);
- iounmap(i2c->base);
- fail_map:
- kfree(i2c);
+ clk_disable_unprepare(i2c->clk_per);
+
return result;
};
@@ -765,20 +795,12 @@ static int fsl_i2c_remove(struct platform_device *op)
i2c_del_adapter(&i2c->adap);
- if (i2c->clk_per)
- clk_disable_unprepare(i2c->clk_per);
+ clk_disable_unprepare(i2c->clk_per);
- if (i2c->irq)
- free_irq(i2c->irq, i2c);
-
- irq_dispose_mapping(i2c->irq);
- iounmap(i2c->base);
- kfree(i2c);
return 0;
};
-#ifdef CONFIG_PM_SLEEP
-static int mpc_i2c_suspend(struct device *dev)
+static int __maybe_unused mpc_i2c_suspend(struct device *dev)
{
struct mpc_i2c *i2c = dev_get_drvdata(dev);
@@ -788,7 +810,7 @@ static int mpc_i2c_suspend(struct device *dev)
return 0;
}
-static int mpc_i2c_resume(struct device *dev)
+static int __maybe_unused mpc_i2c_resume(struct device *dev)
{
struct mpc_i2c *i2c = dev_get_drvdata(dev);
@@ -797,12 +819,7 @@ static int mpc_i2c_resume(struct device *dev)
return 0;
}
-
static SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
-#define MPC_I2C_PM_OPS (&mpc_i2c_pm_ops)
-#else
-#define MPC_I2C_PM_OPS NULL
-#endif
static const struct mpc_i2c_data mpc_i2c_data_512x = {
.setup = mpc_i2c_setup_512x,
@@ -845,7 +862,7 @@ static struct platform_driver mpc_i2c_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = mpc_i2c_of_match,
- .pm = MPC_I2C_PM_OPS,
+ .pm = &mpc_i2c_pm_ops,
},
};
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 2ffd2f354d0a..5ddfa4e56ee2 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -231,6 +231,7 @@ struct mtk_i2c {
struct i2c_adapter adap; /* i2c host adapter */
struct device *dev;
struct completion msg_complete;
+ struct i2c_timings timing_info;
/* set in i2c probe */
void __iomem *base; /* i2c base addr */
@@ -479,7 +480,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
- if (i2c->dev_comp->dma_sync) {
+ if (i2c->dev_comp->apdma_sync) {
writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
udelay(10);
writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
@@ -564,7 +565,7 @@ static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed)
static int mtk_i2c_max_step_cnt(unsigned int target_speed)
{
- if (target_speed > I2C_MAX_FAST_MODE_FREQ)
+ if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ)
return MAX_HS_STEP_CNT_DIV;
else
return MAX_STEP_CNT_DIV;
@@ -607,7 +608,8 @@ static int mtk_i2c_check_ac_timing(struct mtk_i2c *i2c,
else
clk_ns = sample_ns / 2;
- su_sta_cnt = DIV_ROUND_UP(spec->min_su_sta_ns, clk_ns);
+ su_sta_cnt = DIV_ROUND_UP(spec->min_su_sta_ns +
+ i2c->timing_info.scl_int_delay_ns, clk_ns);
if (su_sta_cnt > max_sta_cnt)
return -1;
@@ -635,7 +637,7 @@ static int mtk_i2c_check_ac_timing(struct mtk_i2c *i2c,
if (sda_min > sda_max)
return -3;
- if (check_speed > I2C_MAX_FAST_MODE_FREQ) {
+ if (check_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) {
if (i2c->dev_comp->ltiming_adjust) {
i2c->ac_timing.hs = I2C_TIME_DEFAULT_VALUE |
(sample_cnt << 12) | (high_cnt << 8);
@@ -850,7 +852,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) &
~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
- if ((i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ) || (left_num >= 1))
+ if ((i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ) || (left_num >= 1))
control_reg |= I2C_CONTROL_RS;
if (i2c->op == I2C_MASTER_WRRD)
@@ -1067,7 +1069,8 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
}
}
- if (i2c->auto_restart && num >= 2 && i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ)
+ if (i2c->auto_restart && num >= 2 &&
+ i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ)
/* ignore the first restart irq after the master code,
* otherwise the first transfer will be discarded.
*/
@@ -1175,6 +1178,8 @@ static int mtk_i2c_parse_dt(struct device_node *np, struct mtk_i2c *i2c)
i2c->use_push_pull =
of_property_read_bool(np, "mediatek,use-push-pull");
+ i2c_parse_fw_timings(i2c->dev, &i2c->timing_info, true);
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index a3363b20f168..dc77e1c4e80f 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -277,7 +277,7 @@ static int init_hw(struct nmk_i2c_dev *dev)
goto exit;
/* disable the controller */
- i2c_clr_bit(dev->virtbase + I2C_CR , I2C_CR_PE);
+ i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
disable_all_interrupts(dev);
@@ -525,7 +525,7 @@ static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
dev->virtbase + I2C_CR);
/* enable the controller */
- i2c_set_bit(dev->virtbase + I2C_CR , I2C_CR_PE);
+ i2c_set_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
init_completion(&dev->xfer_complete);
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index 6b20601ffb13..b5055a3cbd93 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -262,6 +262,10 @@ static const struct property_entry ccgx_props[] = {
{ }
};
+static const struct software_node ccgx_node = {
+ .properties = ccgx_props,
+};
+
static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
{
i2cd->gpu_ccgx_ucsi = devm_kzalloc(i2cd->dev,
@@ -274,7 +278,7 @@ static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
sizeof(i2cd->gpu_ccgx_ucsi->type));
i2cd->gpu_ccgx_ucsi->addr = 0x8;
i2cd->gpu_ccgx_ucsi->irq = irq;
- i2cd->gpu_ccgx_ucsi->properties = ccgx_props;
+ i2cd->gpu_ccgx_ucsi->swnode = &ccgx_node;
i2cd->ccgx_client = i2c_new_client_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
return PTR_ERR_OR_ZERO(i2cd->ccgx_client);
}
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 12ac4212aded..d4f6c6d60683 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1404,9 +1404,9 @@ omap_i2c_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(omap->dev, OMAP_I2C_PM_TIMEOUT);
pm_runtime_use_autosuspend(omap->dev);
- r = pm_runtime_get_sync(omap->dev);
+ r = pm_runtime_resume_and_get(omap->dev);
if (r < 0)
- goto err_free_mem;
+ goto err_disable_pm;
/*
* Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
@@ -1513,8 +1513,8 @@ err_unuse_clocks:
omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
pm_runtime_dont_use_autosuspend(omap->dev);
pm_runtime_put_sync(omap->dev);
+err_disable_pm:
pm_runtime_disable(&pdev->dev);
-err_free_mem:
return r;
}
@@ -1525,7 +1525,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
int ret;
i2c_del_adapter(&omap->adapter);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 3e38e114948b..5241e6f414e9 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -76,11 +76,6 @@ static s32 i2c_powermac_smbus_xfer( struct i2c_adapter* adap,
* but I think the current API makes no sense and I don't want
* any driver that I haven't verified for correctness to go
* anywhere near a pmac i2c bus anyway ...
- *
- * I'm also not completely sure what kind of phases to do between
- * the actual command and the data (what I am _supposed_ to do that
- * is). For now, I assume writes are a single stream and reads have
- * a repeat start/addr phase (but not stop in between)
*/
case I2C_SMBUS_BLOCK_DATA:
buf = data->block;
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index 1c259b5188de..c63d5545fc2a 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -569,9 +569,9 @@ static int cci_probe(struct platform_device *pdev)
cci->master[idx].mode = I2C_MODE_STANDARD;
ret = of_property_read_u32(child, "clock-frequency", &val);
if (!ret) {
- if (val == 400000)
+ if (val == I2C_MAX_FAST_MODE_FREQ)
cci->master[idx].mode = I2C_MODE_FAST;
- else if (val == 1000000)
+ else if (val == I2C_MAX_FAST_MODE_PLUS_FREQ)
cci->master[idx].mode = I2C_MODE_FAST_PLUS;
}
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 12f6d452c0f7..327c092a4130 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -141,6 +141,7 @@ struct rcar_i2c_priv {
enum dma_data_direction dma_direction;
struct reset_control *rstc;
+ bool atomic_xfer;
int irq;
struct i2c_client *host_notify_client;
@@ -353,7 +354,9 @@ static void rcar_i2c_prepare_msg(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
rcar_i2c_write(priv, ICMSR, 0);
}
- rcar_i2c_write(priv, ICMIER, read ? RCAR_IRQ_RECV : RCAR_IRQ_SEND);
+
+ if (!priv->atomic_xfer)
+ rcar_i2c_write(priv, ICMIER, read ? RCAR_IRQ_RECV : RCAR_IRQ_SEND);
}
static void rcar_i2c_next_msg(struct rcar_i2c_priv *priv)
@@ -418,7 +421,7 @@ static bool rcar_i2c_dma(struct rcar_i2c_priv *priv)
int len;
/* Do various checks to see if DMA is feasible at all */
- if (IS_ERR(chan) || msg->len < RCAR_MIN_DMA_LEN ||
+ if (priv->atomic_xfer || IS_ERR(chan) || msg->len < RCAR_MIN_DMA_LEN ||
!(msg->flags & I2C_M_DMA_SAFE) || (read && priv->flags & ID_P_NO_RXDMA))
return false;
@@ -646,7 +649,8 @@ static irqreturn_t rcar_i2c_irq(int irq, struct rcar_i2c_priv *priv, u32 msr)
/* Nack */
if (msr & MNR) {
/* HW automatically sends STOP after received NACK */
- rcar_i2c_write(priv, ICMIER, RCAR_IRQ_STOP);
+ if (!priv->atomic_xfer)
+ rcar_i2c_write(priv, ICMIER, RCAR_IRQ_STOP);
priv->flags |= ID_NACK;
goto out;
}
@@ -667,7 +671,8 @@ out:
if (priv->flags & ID_DONE) {
rcar_i2c_write(priv, ICMIER, 0);
rcar_i2c_write(priv, ICMSR, 0);
- wake_up(&priv->wait);
+ if (!priv->atomic_xfer)
+ wake_up(&priv->wait);
}
return IRQ_HANDLED;
@@ -684,7 +689,8 @@ static irqreturn_t rcar_i2c_gen2_irq(int irq, void *ptr)
/* Only handle interrupts that are currently enabled */
msr = rcar_i2c_read(priv, ICMSR);
- msr &= rcar_i2c_read(priv, ICMIER);
+ if (!priv->atomic_xfer)
+ msr &= rcar_i2c_read(priv, ICMIER);
return rcar_i2c_irq(irq, priv, msr);
}
@@ -696,7 +702,8 @@ static irqreturn_t rcar_i2c_gen3_irq(int irq, void *ptr)
/* Only handle interrupts that are currently enabled */
msr = rcar_i2c_read(priv, ICMSR);
- msr &= rcar_i2c_read(priv, ICMIER);
+ if (!priv->atomic_xfer)
+ msr &= rcar_i2c_read(priv, ICMIER);
/*
* Clear START or STOP immediately, except for REPSTART after read or
@@ -804,6 +811,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
int i, ret;
long time_left;
+ priv->atomic_xfer = false;
+
pm_runtime_get_sync(dev);
/* Check bus state before init otherwise bus busy info will be lost */
@@ -858,6 +867,68 @@ out:
return ret;
}
+static int rcar_i2c_master_xfer_atomic(struct i2c_adapter *adap,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct rcar_i2c_priv *priv = i2c_get_adapdata(adap);
+ struct device *dev = rcar_i2c_priv_to_dev(priv);
+ unsigned long j;
+ bool time_left;
+ int ret;
+
+ priv->atomic_xfer = true;
+
+ pm_runtime_get_sync(dev);
+
+ /* Check bus state before init otherwise bus busy info will be lost */
+ ret = rcar_i2c_bus_barrier(priv);
+ if (ret < 0)
+ goto out;
+
+ rcar_i2c_init(priv);
+
+ /* init first message */
+ priv->msg = msgs;
+ priv->msgs_left = num;
+ priv->flags = (priv->flags & ID_P_MASK) | ID_FIRST_MSG;
+ rcar_i2c_prepare_msg(priv);
+
+ j = jiffies + num * adap->timeout;
+ do {
+ u32 msr = rcar_i2c_read(priv, ICMSR);
+
+ msr &= (rcar_i2c_is_recv(priv) ? RCAR_IRQ_RECV : RCAR_IRQ_SEND) | RCAR_IRQ_STOP;
+
+ if (msr) {
+ if (priv->devtype < I2C_RCAR_GEN3)
+ rcar_i2c_gen2_irq(0, priv);
+ else
+ rcar_i2c_gen3_irq(0, priv);
+ }
+
+ time_left = time_before_eq(jiffies, j);
+ } while (!(priv->flags & ID_DONE) && time_left);
+
+ if (!time_left) {
+ rcar_i2c_init(priv);
+ ret = -ETIMEDOUT;
+ } else if (priv->flags & ID_NACK) {
+ ret = -ENXIO;
+ } else if (priv->flags & ID_ARBLOST) {
+ ret = -EAGAIN;
+ } else {
+ ret = num - priv->msgs_left; /* The number of transfer */
+ }
+out:
+ pm_runtime_put(dev);
+
+ if (ret < 0 && ret != -ENXIO)
+ dev_err(dev, "error %d : %x\n", ret, priv->flags);
+
+ return ret;
+}
+
static int rcar_reg_slave(struct i2c_client *slave)
{
struct rcar_i2c_priv *priv = i2c_get_adapdata(slave->adapter);
@@ -922,6 +993,7 @@ static u32 rcar_i2c_func(struct i2c_adapter *adap)
static const struct i2c_algorithm rcar_i2c_algo = {
.master_xfer = rcar_i2c_master_xfer,
+ .master_xfer_atomic = rcar_i2c_master_xfer_atomic,
.functionality = rcar_i2c_func,
.reg_slave = rcar_reg_slave,
.unreg_slave = rcar_unreg_slave,
@@ -1027,7 +1099,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (of_property_read_bool(dev->of_node, "smbus"))
priv->flags |= ID_P_HOST_NOTIFY;
- priv->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto out_pm_disable;
+ priv->irq = ret;
ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
if (ret < 0) {
dev_err(dev, "cannot get irq %d\n", priv->irq);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 62a903fbe912..ab928613afba 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/mfd/syscon.h>
@@ -156,12 +157,8 @@ MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
*/
static inline kernel_ulong_t s3c24xx_get_device_quirks(struct platform_device *pdev)
{
- if (pdev->dev.of_node) {
- const struct of_device_id *match;
-
- match = of_match_node(s3c24xx_i2c_match, pdev->dev.of_node);
- return (kernel_ulong_t)match->data;
- }
+ if (pdev->dev.of_node)
+ return (kernel_ulong_t)of_device_get_match_data(&pdev->dev);
return platform_get_device_id(pdev)->driver_data;
}
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 1dc387392e74..6746aa46d96c 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -18,8 +18,6 @@
/* SMBUS HID definition as supported by Microsoft Windows */
#define ACPI_SMBUS_MS_HID "SMB0001"
-ACPI_MODULE_NAME("smbus_cmi");
-
struct smbus_methods_t {
char *mt_info;
char *mt_sbr;
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index c2005c789d2b..319d1fa617c8 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -471,7 +471,10 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
goto out2;
}
- id->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto out3;
+ id->irq = ret;
id->adap.nr = pdev->id;
id->adap.algo = &sh7760_i2c_algo;
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 2917fecf6c80..4fe15cd78907 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -290,7 +290,7 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
int im, ret;
- ret = pm_runtime_get_sync(i2c_dev->dev);
+ ret = pm_runtime_resume_and_get(i2c_dev->dev);
if (ret < 0)
return ret;
@@ -576,7 +576,7 @@ static int sprd_i2c_remove(struct platform_device *pdev)
struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
int ret;
- ret = pm_runtime_get_sync(i2c_dev->dev);
+ ret = pm_runtime_resume_and_get(i2c_dev->dev);
if (ret < 0)
return ret;
@@ -640,6 +640,7 @@ static const struct of_device_id sprd_i2c_of_match[] = {
{ .compatible = "sprd,sc9860-i2c", },
{},
};
+MODULE_DEVICE_TABLE(of, sprd_i2c_of_match);
static struct platform_driver sprd_i2c_driver = {
.probe = sprd_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index c62c815b88eb..0138317ea600 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -164,7 +164,6 @@ enum {
#define STM32F7_I2C_DNF_DEFAULT 0
#define STM32F7_I2C_DNF_MAX 15
-#define STM32F7_I2C_ANALOG_FILTER_ENABLE 1
#define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */
#define STM32F7_I2C_ANALOG_FILTER_DELAY_MAX 260 /* ns */
@@ -223,8 +222,6 @@ struct stm32f7_i2c_spec {
* @clock_src: I2C clock source frequency (Hz)
* @rise_time: Rise time (ns)
* @fall_time: Fall time (ns)
- * @dnf: Digital filter coefficient (0-16)
- * @analog_filter: Analog filter delay (On/Off)
* @fmp_clr_offset: Fast Mode Plus clear register offset from set register
*/
struct stm32f7_i2c_setup {
@@ -232,8 +229,6 @@ struct stm32f7_i2c_setup {
u32 clock_src;
u32 rise_time;
u32 fall_time;
- u8 dnf;
- bool analog_filter;
u32 fmp_clr_offset;
};
@@ -312,6 +307,9 @@ struct stm32f7_i2c_msg {
* @wakeup_src: boolean to know if the device is a wakeup source
* @smbus_mode: states that the controller is configured in SMBus mode
* @host_notify_client: SMBus host-notify client
+ * @analog_filter: boolean to indicate enabling of the analog filter
+ * @dnf_dt: value of digital filter requested via dt
+ * @dnf: value of digital filter to apply
*/
struct stm32f7_i2c_dev {
struct i2c_adapter adap;
@@ -340,6 +338,9 @@ struct stm32f7_i2c_dev {
bool wakeup_src;
bool smbus_mode;
struct i2c_client *host_notify_client;
+ bool analog_filter;
+ u32 dnf_dt;
+ u32 dnf;
};
/*
@@ -385,15 +386,11 @@ static struct stm32f7_i2c_spec stm32f7_i2c_specs[] = {
static const struct stm32f7_i2c_setup stm32f7_setup = {
.rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
.fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
- .dnf = STM32F7_I2C_DNF_DEFAULT,
- .analog_filter = STM32F7_I2C_ANALOG_FILTER_ENABLE,
};
static const struct stm32f7_i2c_setup stm32mp15_setup = {
.rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
.fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
- .dnf = STM32F7_I2C_DNF_DEFAULT,
- .analog_filter = STM32F7_I2C_ANALOG_FILTER_ENABLE,
.fmp_clr_offset = 0x40,
};
@@ -462,27 +459,28 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev,
return -EINVAL;
}
- if (setup->dnf > STM32F7_I2C_DNF_MAX) {
+ i2c_dev->dnf = DIV_ROUND_CLOSEST(i2c_dev->dnf_dt, i2cclk);
+ if (i2c_dev->dnf > STM32F7_I2C_DNF_MAX) {
dev_err(i2c_dev->dev,
"DNF out of bound %d/%d\n",
- setup->dnf, STM32F7_I2C_DNF_MAX);
+ i2c_dev->dnf * i2cclk, STM32F7_I2C_DNF_MAX * i2cclk);
return -EINVAL;
}
/* Analog and Digital Filters */
af_delay_min =
- (setup->analog_filter ?
+ (i2c_dev->analog_filter ?
STM32F7_I2C_ANALOG_FILTER_DELAY_MIN : 0);
af_delay_max =
- (setup->analog_filter ?
+ (i2c_dev->analog_filter ?
STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0);
- dnf_delay = setup->dnf * i2cclk;
+ dnf_delay = i2c_dev->dnf * i2cclk;
sdadel_min = specs->hddat_min + setup->fall_time -
- af_delay_min - (setup->dnf + 3) * i2cclk;
+ af_delay_min - (i2c_dev->dnf + 3) * i2cclk;
sdadel_max = specs->vddat_max - setup->rise_time -
- af_delay_max - (setup->dnf + 4) * i2cclk;
+ af_delay_max - (i2c_dev->dnf + 4) * i2cclk;
scldel_min = setup->rise_time + specs->sudat_min;
@@ -648,6 +646,7 @@ static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev,
setup->speed_freq = t->bus_freq_hz;
i2c_dev->setup.rise_time = t->scl_rise_ns;
i2c_dev->setup.fall_time = t->scl_fall_ns;
+ i2c_dev->dnf_dt = t->digital_filter_width_ns;
setup->clock_src = clk_get_rate(i2c_dev->clk);
if (!setup->clock_src) {
@@ -655,6 +654,9 @@ static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev,
return -EINVAL;
}
+ if (!of_property_read_bool(i2c_dev->dev->of_node, "i2c-digital-filter"))
+ i2c_dev->dnf_dt = STM32F7_I2C_DNF_DEFAULT;
+
do {
ret = stm32f7_i2c_compute_timing(i2c_dev, setup,
&i2c_dev->timing);
@@ -676,12 +678,15 @@ static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev,
return ret;
}
+ i2c_dev->analog_filter = of_property_read_bool(i2c_dev->dev->of_node,
+ "i2c-analog-filter");
+
dev_dbg(i2c_dev->dev, "I2C Speed(%i), Clk Source(%i)\n",
setup->speed_freq, setup->clock_src);
dev_dbg(i2c_dev->dev, "I2C Rise(%i) and Fall(%i) Time\n",
setup->rise_time, setup->fall_time);
dev_dbg(i2c_dev->dev, "I2C Analog Filter(%s), DNF(%i)\n",
- (setup->analog_filter ? "On" : "Off"), setup->dnf);
+ (i2c_dev->analog_filter ? "On" : "Off"), i2c_dev->dnf);
i2c_dev->bus_rate = setup->speed_freq;
@@ -720,8 +725,8 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
timing |= STM32F7_I2C_TIMINGR_SCLL(t->scll);
writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR);
- /* Enable I2C */
- if (i2c_dev->setup.analog_filter)
+ /* Configure the Analog Filter */
+ if (i2c_dev->analog_filter)
stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_ANFOFF);
else
@@ -732,7 +737,7 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_DNF_MASK);
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
- STM32F7_I2C_CR1_DNF(i2c_dev->setup.dnf));
+ STM32F7_I2C_CR1_DNF(i2c_dev->dnf));
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_PE);
@@ -1597,7 +1602,8 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
/* Bus error */
if (status & STM32F7_I2C_ISR_BERR) {
- dev_err(dev, "<%s>: Bus error\n", __func__);
+ dev_err(dev, "<%s>: Bus error accessing addr 0x%x\n",
+ __func__, f7_msg->addr);
writel_relaxed(STM32F7_I2C_ICR_BERRCF, base + STM32F7_I2C_ICR);
stm32f7_i2c_release_bus(&i2c_dev->adap);
f7_msg->result = -EIO;
@@ -1605,13 +1611,15 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
/* Arbitration loss */
if (status & STM32F7_I2C_ISR_ARLO) {
- dev_dbg(dev, "<%s>: Arbitration loss\n", __func__);
+ dev_dbg(dev, "<%s>: Arbitration loss accessing addr 0x%x\n",
+ __func__, f7_msg->addr);
writel_relaxed(STM32F7_I2C_ICR_ARLOCF, base + STM32F7_I2C_ICR);
f7_msg->result = -EAGAIN;
}
if (status & STM32F7_I2C_ISR_PECERR) {
- dev_err(dev, "<%s>: PEC error in reception\n", __func__);
+ dev_err(dev, "<%s>: PEC error in reception accessing addr 0x%x\n",
+ __func__, f7_msg->addr);
writel_relaxed(STM32F7_I2C_ICR_PECCF, base + STM32F7_I2C_ICR);
f7_msg->result = -EINVAL;
}
@@ -1652,7 +1660,7 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
i2c_dev->msg_id = 0;
f7_msg->smbus = false;
- ret = pm_runtime_get_sync(i2c_dev->dev);
+ ret = pm_runtime_resume_and_get(i2c_dev->dev);
if (ret < 0)
return ret;
@@ -1698,7 +1706,7 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
f7_msg->read_write = read_write;
f7_msg->smbus = true;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
@@ -1799,7 +1807,7 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
if (ret)
return ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
@@ -1880,7 +1888,7 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
WARN_ON(!i2c_dev->slave[id]);
- ret = pm_runtime_get_sync(i2c_dev->dev);
+ ret = pm_runtime_resume_and_get(i2c_dev->dev);
if (ret < 0)
return ret;
@@ -2027,12 +2035,8 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
phy_addr = (dma_addr_t)res->start;
irq_event = platform_get_irq(pdev, 0);
- if (irq_event <= 0) {
- if (irq_event != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get IRQ event: %d\n",
- irq_event);
+ if (irq_event <= 0)
return irq_event ? : -ENOENT;
- }
irq_error = platform_get_irq(pdev, 1);
if (irq_error <= 0)
@@ -2267,13 +2271,12 @@ static int __maybe_unused stm32f7_i2c_runtime_resume(struct device *dev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
+static int __maybe_unused stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
{
int ret;
struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
- ret = pm_runtime_get_sync(i2c_dev->dev);
+ ret = pm_runtime_resume_and_get(i2c_dev->dev);
if (ret < 0)
return ret;
@@ -2289,13 +2292,13 @@ static int stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
return ret;
}
-static int stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
+static int __maybe_unused stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
{
u32 cr1;
int ret;
struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
- ret = pm_runtime_get_sync(i2c_dev->dev);
+ ret = pm_runtime_resume_and_get(i2c_dev->dev);
if (ret < 0)
return ret;
@@ -2320,7 +2323,7 @@ static int stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
return ret;
}
-static int stm32f7_i2c_suspend(struct device *dev)
+static int __maybe_unused stm32f7_i2c_suspend(struct device *dev)
{
struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
int ret;
@@ -2341,7 +2344,7 @@ static int stm32f7_i2c_suspend(struct device *dev)
return 0;
}
-static int stm32f7_i2c_resume(struct device *dev)
+static int __maybe_unused stm32f7_i2c_resume(struct device *dev)
{
struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
int ret;
@@ -2361,7 +2364,6 @@ static int stm32f7_i2c_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops stm32f7_i2c_pm_ops = {
SET_RUNTIME_PM_OPS(stm32f7_i2c_runtime_suspend,
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index c0c7d01473f2..3680d608698b 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -38,49 +38,31 @@ struct tegra_bpmp_i2c {
* firmware I2C driver to avoid any issues in future if Linux I2C flags are
* changed.
*/
-static int tegra_bpmp_xlate_flags(u16 flags, u16 *out)
+static void tegra_bpmp_xlate_flags(u16 flags, u16 *out)
{
- if (flags & I2C_M_TEN) {
+ if (flags & I2C_M_TEN)
*out |= SERIALI2C_TEN;
- flags &= ~I2C_M_TEN;
- }
- if (flags & I2C_M_RD) {
+ if (flags & I2C_M_RD)
*out |= SERIALI2C_RD;
- flags &= ~I2C_M_RD;
- }
- if (flags & I2C_M_STOP) {
+ if (flags & I2C_M_STOP)
*out |= SERIALI2C_STOP;
- flags &= ~I2C_M_STOP;
- }
- if (flags & I2C_M_NOSTART) {
+ if (flags & I2C_M_NOSTART)
*out |= SERIALI2C_NOSTART;
- flags &= ~I2C_M_NOSTART;
- }
- if (flags & I2C_M_REV_DIR_ADDR) {
+ if (flags & I2C_M_REV_DIR_ADDR)
*out |= SERIALI2C_REV_DIR_ADDR;
- flags &= ~I2C_M_REV_DIR_ADDR;
- }
- if (flags & I2C_M_IGNORE_NAK) {
+ if (flags & I2C_M_IGNORE_NAK)
*out |= SERIALI2C_IGNORE_NAK;
- flags &= ~I2C_M_IGNORE_NAK;
- }
- if (flags & I2C_M_NO_RD_ACK) {
+ if (flags & I2C_M_NO_RD_ACK)
*out |= SERIALI2C_NO_RD_ACK;
- flags &= ~I2C_M_NO_RD_ACK;
- }
- if (flags & I2C_M_RECV_LEN) {
+ if (flags & I2C_M_RECV_LEN)
*out |= SERIALI2C_RECV_LEN;
- flags &= ~I2C_M_RECV_LEN;
- }
-
- return 0;
}
/**
@@ -97,22 +79,19 @@ static int tegra_bpmp_xlate_flags(u16 flags, u16 *out)
*
* See deserialize_i2c documentation for the data format in the other direction.
*/
-static int tegra_bpmp_serialize_i2c_msg(struct tegra_bpmp_i2c *i2c,
+static void tegra_bpmp_serialize_i2c_msg(struct tegra_bpmp_i2c *i2c,
struct mrq_i2c_request *request,
struct i2c_msg *msgs,
unsigned int num)
{
char *buf = request->xfer.data_buf;
unsigned int i, j, pos = 0;
- int err;
for (i = 0; i < num; i++) {
struct i2c_msg *msg = &msgs[i];
u16 flags = 0;
- err = tegra_bpmp_xlate_flags(msg->flags, &flags);
- if (err < 0)
- return err;
+ tegra_bpmp_xlate_flags(msg->flags, &flags);
buf[pos++] = msg->addr & 0xff;
buf[pos++] = (msg->addr & 0xff00) >> 8;
@@ -128,8 +107,6 @@ static int tegra_bpmp_serialize_i2c_msg(struct tegra_bpmp_i2c *i2c,
}
request->xfer.data_size = pos;
-
- return 0;
}
/**
@@ -217,7 +194,32 @@ static int tegra_bpmp_i2c_msg_xfer(struct tegra_bpmp_i2c *i2c,
else
err = tegra_bpmp_transfer(i2c->bpmp, &msg);
- return err;
+ if (err < 0) {
+ dev_err(i2c->dev, "failed to transfer message: %d\n", err);
+ return err;
+ }
+
+ if (msg.rx.ret != 0) {
+ if (msg.rx.ret == -BPMP_EAGAIN) {
+ dev_dbg(i2c->dev, "arbitration lost\n");
+ return -EAGAIN;
+ }
+
+ if (msg.rx.ret == -BPMP_ETIMEDOUT) {
+ dev_dbg(i2c->dev, "timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ if (msg.rx.ret == -BPMP_ENXIO) {
+ dev_dbg(i2c->dev, "NAK\n");
+ return -ENXIO;
+ }
+
+ dev_err(i2c->dev, "transaction failed: %d\n", msg.rx.ret);
+ return -EIO;
+ }
+
+ return 0;
}
static int tegra_bpmp_i2c_xfer_common(struct i2c_adapter *adapter,
@@ -238,12 +240,7 @@ static int tegra_bpmp_i2c_xfer_common(struct i2c_adapter *adapter,
memset(&request, 0, sizeof(request));
memset(&response, 0, sizeof(response));
- err = tegra_bpmp_serialize_i2c_msg(i2c, &request, msgs, num);
- if (err < 0) {
- dev_err(i2c->dev, "failed to serialize message: %d\n", err);
- return err;
- }
-
+ tegra_bpmp_serialize_i2c_msg(i2c, &request, msgs, num);
err = tegra_bpmp_i2c_msg_xfer(i2c, &request, &response, atomic);
if (err < 0) {
dev_err(i2c->dev, "failed to transfer message: %d\n", err);
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 63cbb9c7c1b0..bba08cbce6e1 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/version.h>
#define MAILBOX_OP_TIMEOUT 1000 /* Operation time out in ms */
#define MAILBOX_I2C_INDEX 0
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 087b2951942e..2a8568b97c14 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -706,7 +706,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
- err = pm_runtime_get_sync(i2c->dev);
+ err = pm_runtime_resume_and_get(i2c->dev);
if (err < 0)
return err;
@@ -873,7 +873,7 @@ static int xiic_i2c_remove(struct platform_device *pdev)
/* remove adapter & data */
i2c_del_adapter(&i2c->adap);
- ret = pm_runtime_get_sync(i2c->dev);
+ ret = pm_runtime_resume_and_get(i2c->dev);
if (ret < 0)
return ret;
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index 8bc51d4e69df..4df8ad092df3 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -47,7 +47,6 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num);
*
* The board info passed can safely be __initdata, but be careful of embedded
* pointers (for platform_data, functions, etc) since that won't be copied.
- * Device properties are deep-copied though.
*/
int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len)
{
@@ -72,16 +71,6 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig
devinfo->busnum = busnum;
devinfo->board_info = *info;
- if (info->properties) {
- devinfo->board_info.properties =
- property_entries_dup(info->properties);
- if (IS_ERR(devinfo->board_info.properties)) {
- status = PTR_ERR(devinfo->board_info.properties);
- kfree(devinfo);
- break;
- }
- }
-
if (info->resources) {
devinfo->board_info.resources =
kmemdup(info->resources,
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index f21362355973..5a97e4a02fa2 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -76,6 +76,27 @@ void i2c_transfer_trace_unreg(void)
static_branch_dec(&i2c_trace_msg_key);
}
+const char *i2c_freq_mode_string(u32 bus_freq_hz)
+{
+ switch (bus_freq_hz) {
+ case I2C_MAX_STANDARD_MODE_FREQ:
+ return "Standard Mode (100 kHz)";
+ case I2C_MAX_FAST_MODE_FREQ:
+ return "Fast Mode (400 kHz)";
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
+ return "Fast Mode Plus (1.0 MHz)";
+ case I2C_MAX_TURBO_MODE_FREQ:
+ return "Turbo Mode (1.4 MHz)";
+ case I2C_MAX_HIGH_SPEED_MODE_FREQ:
+ return "High Speed Mode (3.4 MHz)";
+ case I2C_MAX_ULTRA_FAST_MODE_FREQ:
+ return "Ultra Fast Mode (5.0 MHz)";
+ default:
+ return "Unknown Mode";
+ }
+}
+EXPORT_SYMBOL_GPL(i2c_freq_mode_string);
+
const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
const struct i2c_client *client)
{
@@ -249,7 +270,7 @@ EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
int i2c_recover_bus(struct i2c_adapter *adap)
{
if (!adap->bus_recovery_info)
- return -EOPNOTSUPP;
+ return -EBUSY;
dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
return adap->bus_recovery_info->recover_bus(adap);
@@ -519,6 +540,13 @@ static int i2c_device_probe(struct device *dev)
if (status)
goto err_clear_wakeup_irq;
+ client->devres_group_id = devres_open_group(&client->dev, NULL,
+ GFP_KERNEL);
+ if (!client->devres_group_id) {
+ status = -ENOMEM;
+ goto err_detach_pm_domain;
+ }
+
/*
* When there are no more users of probe(),
* rename probe_new to probe.
@@ -531,11 +559,21 @@ static int i2c_device_probe(struct device *dev)
else
status = -EINVAL;
+ /*
+ * Note that we are not closing the devres group opened above so
+ * even resources that were attached to the device after probe is
+ * run are released when i2c_device_remove() is executed. This is
+ * needed as some drivers would allocate additional resources,
+ * for example when updating firmware.
+ */
+
if (status)
- goto err_detach_pm_domain;
+ goto err_release_driver_resources;
return 0;
+err_release_driver_resources:
+ devres_release_group(&client->dev, client->devres_group_id);
err_detach_pm_domain:
dev_pm_domain_detach(&client->dev, true);
err_clear_wakeup_irq:
@@ -564,6 +602,8 @@ static int i2c_device_remove(struct device *dev)
dev_warn(dev, "remove failed (%pe), will be ignored\n", ERR_PTR(status));
}
+ devres_release_group(&client->dev, client->devres_group_id);
+
dev_pm_domain_detach(&client->dev, true);
dev_pm_clear_wake_irq(&client->dev);
@@ -612,7 +652,7 @@ modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
if (len != -ENODEV)
return len;
- len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
if (len != -ENODEV)
return len;
@@ -910,11 +950,11 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
i2c_dev_set_name(adap, client, info);
- if (info->properties) {
- status = device_add_properties(&client->dev, info->properties);
+ if (info->swnode) {
+ status = device_add_software_node(&client->dev, info->swnode);
if (status) {
dev_err(&adap->dev,
- "Failed to add properties to client %s: %d\n",
+ "Failed to add software node to client %s: %d\n",
client->name, status);
goto out_err_put_of_node;
}
@@ -922,16 +962,15 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
status = device_register(&client->dev);
if (status)
- goto out_free_props;
+ goto out_remove_swnode;
dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
client->name, dev_name(&client->dev));
return client;
-out_free_props:
- if (info->properties)
- device_remove_properties(&client->dev);
+out_remove_swnode:
+ device_remove_software_node(&client->dev);
out_err_put_of_node:
of_node_put(info->of_node);
out_err:
@@ -961,6 +1000,7 @@ void i2c_unregister_device(struct i2c_client *client)
if (ACPI_COMPANION(&client->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
+ device_remove_software_node(&client->dev);
device_unregister(&client->dev);
}
EXPORT_SYMBOL_GPL(i2c_unregister_device);
@@ -1017,15 +1057,9 @@ struct i2c_client *i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address
}
EXPORT_SYMBOL_GPL(i2c_new_dummy_device);
-struct i2c_dummy_devres {
- struct i2c_client *client;
-};
-
-static void devm_i2c_release_dummy(struct device *dev, void *res)
+static void devm_i2c_release_dummy(void *client)
{
- struct i2c_dummy_devres *this = res;
-
- i2c_unregister_device(this->client);
+ i2c_unregister_device(client);
}
/**
@@ -1042,20 +1076,16 @@ struct i2c_client *devm_i2c_new_dummy_device(struct device *dev,
struct i2c_adapter *adapter,
u16 address)
{
- struct i2c_dummy_devres *dr;
struct i2c_client *client;
-
- dr = devres_alloc(devm_i2c_release_dummy, sizeof(*dr), GFP_KERNEL);
- if (!dr)
- return ERR_PTR(-ENOMEM);
+ int ret;
client = i2c_new_dummy_device(adapter, address);
- if (IS_ERR(client)) {
- devres_free(dr);
- } else {
- dr->client = client;
- devres_add(dev, dr);
- }
+ if (IS_ERR(client))
+ return client;
+
+ ret = devm_add_action_or_reset(dev, devm_i2c_release_dummy, client);
+ if (ret)
+ return ERR_PTR(ret);
return client;
}
@@ -1704,6 +1734,32 @@ void i2c_del_adapter(struct i2c_adapter *adap)
}
EXPORT_SYMBOL(i2c_del_adapter);
+static void devm_i2c_del_adapter(void *adapter)
+{
+ i2c_del_adapter(adapter);
+}
+
+/**
+ * devm_i2c_add_adapter - device-managed variant of i2c_add_adapter()
+ * @dev: managing device for adding this I2C adapter
+ * @adapter: the adapter to add
+ * Context: can sleep
+ *
+ * Add adapter with dynamic bus number, same with i2c_add_adapter()
+ * but the adapter will be auto deleted on driver detach.
+ */
+int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter)
+{
+ int ret;
+
+ ret = i2c_add_adapter(adapter);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_i2c_del_adapter, adapter);
+}
+EXPORT_SYMBOL_GPL(devm_i2c_add_adapter);
+
static void i2c_parse_timing(struct device *dev, char *prop_name, u32 *cur_val_p,
u32 def_val, bool use_def)
{
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 6ceb11cc4be1..6ef38a8ee95c 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -440,8 +440,13 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
sizeof(rdwr_arg)))
return -EFAULT;
- /* Put an arbitrary limit on the number of messages that can
- * be sent at once */
+ if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
+ return -EINVAL;
+
+ /*
+ * Put an arbitrary limit on the number of messages that can
+ * be sent at once
+ */
if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
return -EINVAL;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 5c9fac7cf420..3b0991fedd81 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -121,7 +121,7 @@ struct ib_gid_table {
u32 default_gid_indices;
};
-static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
+static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port)
{
struct ib_event event;
@@ -197,7 +197,7 @@ int ib_cache_gid_parse_type_str(const char *buf)
}
EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
-static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
+static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port)
{
return device->port_data[port].cache.gid;
}
@@ -237,10 +237,10 @@ static void put_gid_ndev(struct rcu_head *head)
static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
{
struct ib_device *device = entry->attr.device;
- u8 port_num = entry->attr.port_num;
+ u32 port_num = entry->attr.port_num;
struct ib_gid_table *table = rdma_gid_table(device, port_num);
- dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__,
+ dev_dbg(&device->dev, "%s port=%u index=%d gid %pI6\n", __func__,
port_num, entry->attr.index, entry->attr.gid.raw);
write_lock_irq(&table->rwlock);
@@ -282,7 +282,7 @@ static void free_gid_work(struct work_struct *work)
struct ib_gid_table_entry *entry =
container_of(work, struct ib_gid_table_entry, del_work);
struct ib_device *device = entry->attr.device;
- u8 port_num = entry->attr.port_num;
+ u32 port_num = entry->attr.port_num;
struct ib_gid_table *table = rdma_gid_table(device, port_num);
mutex_lock(&table->lock);
@@ -379,7 +379,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
* @ix: GID entry index to delete
*
*/
-static void del_gid(struct ib_device *ib_dev, u8 port,
+static void del_gid(struct ib_device *ib_dev, u32 port,
struct ib_gid_table *table, int ix)
{
struct roce_gid_ndev_storage *ndev_storage;
@@ -387,7 +387,7 @@ static void del_gid(struct ib_device *ib_dev, u8 port,
lockdep_assert_held(&table->lock);
- dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port,
+ dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port,
ix, table->data_vec[ix]->attr.gid.raw);
write_lock_irq(&table->rwlock);
@@ -543,7 +543,7 @@ static void make_default_gid(struct net_device *dev, union ib_gid *gid)
addrconf_ifid_eui48(&gid->raw[8], dev);
}
-static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
+static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr,
unsigned long mask, bool default_gid)
{
@@ -587,7 +587,7 @@ out_unlock:
return ret;
}
-int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr)
{
unsigned long mask = GID_ATTR_FIND_MASK_GID |
@@ -598,7 +598,7 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
}
static int
-_ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
+_ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr,
unsigned long mask, bool default_gid)
{
@@ -627,7 +627,7 @@ out_unlock:
return ret;
}
-int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr)
{
unsigned long mask = GID_ATTR_FIND_MASK_GID |
@@ -638,7 +638,7 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
}
-int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
struct net_device *ndev)
{
struct ib_gid_table *table;
@@ -683,7 +683,7 @@ const struct ib_gid_attr *
rdma_find_gid_by_port(struct ib_device *ib_dev,
const union ib_gid *gid,
enum ib_gid_type gid_type,
- u8 port, struct net_device *ndev)
+ u32 port, struct net_device *ndev)
{
int local_index;
struct ib_gid_table *table;
@@ -734,7 +734,7 @@ EXPORT_SYMBOL(rdma_find_gid_by_port);
*
*/
const struct ib_gid_attr *rdma_find_gid_by_filter(
- struct ib_device *ib_dev, const union ib_gid *gid, u8 port,
+ struct ib_device *ib_dev, const union ib_gid *gid, u32 port,
bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
void *),
void *context)
@@ -818,7 +818,7 @@ static void release_gid_table(struct ib_device *device,
kfree(table);
}
-static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
+static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port,
struct ib_gid_table *table)
{
int i;
@@ -834,7 +834,7 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
mutex_unlock(&table->lock);
}
-void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
+void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
struct net_device *ndev,
unsigned long gid_type_mask,
enum ib_cache_gid_default_mode mode)
@@ -867,7 +867,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
}
}
-static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
+static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port,
struct ib_gid_table *table)
{
unsigned int i;
@@ -884,7 +884,7 @@ static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
static void gid_table_release_one(struct ib_device *ib_dev)
{
- unsigned int p;
+ u32 p;
rdma_for_each_port (ib_dev, p) {
release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
@@ -895,7 +895,7 @@ static void gid_table_release_one(struct ib_device *ib_dev)
static int _gid_table_setup_one(struct ib_device *ib_dev)
{
struct ib_gid_table *table;
- unsigned int rdma_port;
+ u32 rdma_port;
rdma_for_each_port (ib_dev, rdma_port) {
table = alloc_gid_table(
@@ -915,7 +915,7 @@ rollback_table_setup:
static void gid_table_cleanup_one(struct ib_device *ib_dev)
{
- unsigned int p;
+ u32 p;
rdma_for_each_port (ib_dev, p)
cleanup_gid_table_port(ib_dev, p,
@@ -950,7 +950,7 @@ static int gid_table_setup_one(struct ib_device *ib_dev)
* Returns 0 on success or appropriate error code.
*
*/
-int rdma_query_gid(struct ib_device *device, u8 port_num,
+int rdma_query_gid(struct ib_device *device, u32 port_num,
int index, union ib_gid *gid)
{
struct ib_gid_table *table;
@@ -1014,7 +1014,7 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
unsigned long mask = GID_ATTR_FIND_MASK_GID |
GID_ATTR_FIND_MASK_GID_TYPE;
struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
- unsigned int p;
+ u32 p;
if (ndev)
mask |= GID_ATTR_FIND_MASK_NETDEV;
@@ -1043,7 +1043,7 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
EXPORT_SYMBOL(rdma_find_gid);
int ib_get_cached_pkey(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
int index,
u16 *pkey)
{
@@ -1069,9 +1069,8 @@ int ib_get_cached_pkey(struct ib_device *device,
}
EXPORT_SYMBOL(ib_get_cached_pkey);
-int ib_get_cached_subnet_prefix(struct ib_device *device,
- u8 port_num,
- u64 *sn_pfx)
+int ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num,
+ u64 *sn_pfx)
{
unsigned long flags;
@@ -1086,10 +1085,8 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
}
EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
-int ib_find_cached_pkey(struct ib_device *device,
- u8 port_num,
- u16 pkey,
- u16 *index)
+int ib_find_cached_pkey(struct ib_device *device, u32 port_num,
+ u16 pkey, u16 *index)
{
struct ib_pkey_cache *cache;
unsigned long flags;
@@ -1116,8 +1113,9 @@ int ib_find_cached_pkey(struct ib_device *device,
*index = i;
ret = 0;
break;
- } else
+ } else {
partial_ix = i;
+ }
}
if (ret && partial_ix >= 0) {
@@ -1132,10 +1130,8 @@ err:
}
EXPORT_SYMBOL(ib_find_cached_pkey);
-int ib_find_exact_cached_pkey(struct ib_device *device,
- u8 port_num,
- u16 pkey,
- u16 *index)
+int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num,
+ u16 pkey, u16 *index)
{
struct ib_pkey_cache *cache;
unsigned long flags;
@@ -1169,9 +1165,7 @@ err:
}
EXPORT_SYMBOL(ib_find_exact_cached_pkey);
-int ib_get_cached_lmc(struct ib_device *device,
- u8 port_num,
- u8 *lmc)
+int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
{
unsigned long flags;
int ret = 0;
@@ -1187,8 +1181,7 @@ int ib_get_cached_lmc(struct ib_device *device,
}
EXPORT_SYMBOL(ib_get_cached_lmc);
-int ib_get_cached_port_state(struct ib_device *device,
- u8 port_num,
+int ib_get_cached_port_state(struct ib_device *device, u32 port_num,
enum ib_port_state *port_state)
{
unsigned long flags;
@@ -1222,7 +1215,7 @@ EXPORT_SYMBOL(ib_get_cached_port_state);
* code.
*/
const struct ib_gid_attr *
-rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index)
+rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index)
{
const struct ib_gid_attr *attr = ERR_PTR(-ENODATA);
struct ib_gid_table *table;
@@ -1263,7 +1256,7 @@ ssize_t rdma_query_gid_table(struct ib_device *device,
const struct ib_gid_attr *gid_attr;
ssize_t num_entries = 0, ret;
struct ib_gid_table *table;
- unsigned int port_num, i;
+ u32 port_num, i;
struct net_device *ndev;
unsigned long flags;
@@ -1361,7 +1354,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
container_of(attr, struct ib_gid_table_entry, attr);
struct ib_device *device = entry->attr.device;
struct net_device *ndev = ERR_PTR(-EINVAL);
- u8 port_num = entry->attr.port_num;
+ u32 port_num = entry->attr.port_num;
struct ib_gid_table *table;
unsigned long flags;
bool valid;
@@ -1441,7 +1434,7 @@ int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
EXPORT_SYMBOL(rdma_read_gid_l2_fields);
static int config_non_roce_gid_cache(struct ib_device *device,
- u8 port, int gid_tbl_len)
+ u32 port, int gid_tbl_len)
{
struct ib_gid_attr gid_attr = {};
struct ib_gid_table *table;
@@ -1472,7 +1465,7 @@ err:
}
static int
-ib_cache_update(struct ib_device *device, u8 port, bool enforce_security)
+ib_cache_update(struct ib_device *device, u32 port, bool enforce_security)
{
struct ib_port_attr *tprops = NULL;
struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
@@ -1621,7 +1614,7 @@ EXPORT_SYMBOL(ib_dispatch_event);
int ib_cache_setup_one(struct ib_device *device)
{
- unsigned int p;
+ u32 p;
int err;
rwlock_init(&device->cache_lock);
@@ -1641,7 +1634,7 @@ int ib_cache_setup_one(struct ib_device *device)
void ib_cache_release_one(struct ib_device *device)
{
- unsigned int p;
+ u32 p;
/*
* The release function frees all the cache elements.
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 3d194bb60840..0ead0d223154 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -202,7 +202,7 @@ static struct attribute *cm_counter_default_attrs[] = {
struct cm_port {
struct cm_device *cm_dev;
struct ib_mad_agent *mad_agent;
- u8 port_num;
+ u32 port_num;
struct list_head cm_priv_prim_list;
struct list_head cm_priv_altr_list;
struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
@@ -255,7 +255,8 @@ struct cm_id_private {
struct completion comp;
refcount_t refcount;
/* Number of clients sharing this ib_cm_id. Only valid for listeners.
- * Protected by the cm.lock spinlock. */
+ * Protected by the cm.lock spinlock.
+ */
int listen_sharecount;
struct rcu_head rcu;
@@ -420,8 +421,7 @@ static int cm_alloc_response_msg(struct cm_port *port,
return 0;
}
-static void * cm_copy_private_data(const void *private_data,
- u8 private_data_len)
+static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
{
void *data;
@@ -680,8 +680,8 @@ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
return cm_id_priv;
}
-static struct cm_id_private * cm_find_listen(struct ib_device *device,
- __be64 service_id)
+static struct cm_id_private *cm_find_listen(struct ib_device *device,
+ __be64 service_id)
{
struct rb_node *node = cm.listen_service_table.rb_node;
struct cm_id_private *cm_id_priv;
@@ -708,8 +708,8 @@ static struct cm_id_private * cm_find_listen(struct ib_device *device,
return NULL;
}
-static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
- *timewait_info)
+static struct cm_timewait_info *
+cm_insert_remote_id(struct cm_timewait_info *timewait_info)
{
struct rb_node **link = &cm.remote_id_table.rb_node;
struct rb_node *parent = NULL;
@@ -767,8 +767,8 @@ static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
return res;
}
-static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
- *timewait_info)
+static struct cm_timewait_info *
+cm_insert_remote_qpn(struct cm_timewait_info *timewait_info)
{
struct rb_node **link = &cm.remote_qp_table.rb_node;
struct rb_node *parent = NULL;
@@ -797,8 +797,8 @@ static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
return NULL;
}
-static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
- *cm_id_priv)
+static struct cm_id_private *
+cm_insert_remote_sidr(struct cm_id_private *cm_id_priv)
{
struct rb_node **link = &cm.remote_sidr_table.rb_node;
struct rb_node *parent = NULL;
@@ -897,7 +897,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
}
EXPORT_SYMBOL(ib_create_cm_id);
-static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
+static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv)
{
struct cm_work *work;
@@ -986,7 +986,7 @@ static void cm_remove_remote(struct cm_id_private *cm_id_priv)
}
}
-static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
+static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id)
{
struct cm_timewait_info *timewait_info;
@@ -1631,7 +1631,7 @@ static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
req_msg))));
}
-static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
+static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
struct sa_path_rec *path, union ib_gid *gid)
{
if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
@@ -1750,7 +1750,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
static u16 cm_get_bth_pkey(struct cm_work *work)
{
struct ib_device *ib_dev = work->port->cm_dev->ib_device;
- u8 port_num = work->port->port_num;
+ u32 port_num = work->port->port_num;
u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
u16 pkey;
int ret;
@@ -1778,7 +1778,7 @@ static void cm_opa_to_ib_sgid(struct cm_work *work,
struct sa_path_rec *path)
{
struct ib_device *dev = work->port->cm_dev->ib_device;
- u8 port_num = work->port->port_num;
+ u32 port_num = work->port->port_num;
if (rdma_cap_opa_ah(dev, port_num) &&
(ib_is_opa_gid(&path->sgid))) {
@@ -1977,8 +1977,8 @@ unlock: spin_unlock_irq(&cm_id_priv->lock);
free: cm_free_msg(msg);
}
-static struct cm_id_private * cm_match_req(struct cm_work *work,
- struct cm_id_private *cm_id_priv)
+static struct cm_id_private *cm_match_req(struct cm_work *work,
+ struct cm_id_private *cm_id_priv)
{
struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
struct cm_timewait_info *timewait_info;
@@ -2138,20 +2138,17 @@ static int cm_req_handler(struct cm_work *work)
goto destroy;
}
- cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
-
memset(&work->path[0], 0, sizeof(work->path[0]));
if (cm_req_has_alt_path(req_msg))
memset(&work->path[1], 0, sizeof(work->path[1]));
grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
gid_attr = grh->sgid_attr;
- if (gid_attr &&
- rdma_protocol_roce(work->port->cm_dev->ib_device,
- work->port->port_num)) {
+ if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) {
work->path[0].rec_type =
sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
} else {
+ cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
cm_path_set_rec_type(
work->port->cm_dev->ib_device, work->port->port_num,
&work->path[0],
@@ -2993,7 +2990,7 @@ static void cm_format_rej_event(struct cm_work *work)
IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
}
-static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
+static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
{
struct cm_id_private *cm_id_priv;
__be32 remote_id;
@@ -3098,7 +3095,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
- switch(cm_id_priv->id.state) {
+ switch (cm_id_priv->id.state) {
case IB_CM_REQ_RCVD:
cm_state = IB_CM_MRA_REQ_SENT;
lap_state = cm_id->lap_state;
@@ -3155,7 +3152,7 @@ error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
}
EXPORT_SYMBOL(ib_send_cm_mra);
-static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
+static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
{
switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
case CM_MSG_RESPONSE_REQ:
@@ -3917,8 +3914,7 @@ static int cm_establish(struct ib_cm_id *cm_id)
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
- switch (cm_id->state)
- {
+ switch (cm_id->state) {
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
cm_id->state = IB_CM_ESTABLISHED;
@@ -4334,7 +4330,7 @@ static int cm_add_one(struct ib_device *ib_device)
unsigned long flags;
int ret;
int count = 0;
- unsigned int i;
+ u32 i;
cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
GFP_KERNEL);
@@ -4432,7 +4428,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
.clr_port_cap_mask = IB_PORT_CM_SUP
};
unsigned long flags;
- unsigned int i;
+ u32 i;
write_lock_irqsave(&cm.device_lock, flags);
list_del(&cm_dev->list);
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 0cc40656b5c5..8462de7ca26e 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -22,7 +22,7 @@
static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
{
u8 transport_type = IBA_GET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg);
- switch(transport_type) {
+ switch (transport_type) {
case 0: return IB_QPT_RC;
case 1: return IB_QPT_UC;
case 3:
@@ -37,7 +37,7 @@ static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
enum ib_qp_type qp_type)
{
- switch(qp_type) {
+ switch (qp_type) {
case IB_QPT_UC:
IBA_SET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg, 1);
break;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 94096511599f..2b9ffc21cbc4 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -43,7 +43,6 @@ MODULE_DESCRIPTION("Generic RDMA CM Agent");
MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20
-#define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
#define CMA_MAX_CM_RETRIES 15
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 18
@@ -219,14 +218,6 @@ struct rdma_bind_list {
unsigned short port;
};
-struct class_port_info_context {
- struct ib_class_port_info *class_port_info;
- struct ib_device *device;
- struct completion done;
- struct ib_sa_query *sa_query;
- u8 port_num;
-};
-
static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps,
struct rdma_bind_list *bind_list, int snum)
{
@@ -287,7 +278,7 @@ struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
}
int cma_get_default_gid_type(struct cma_device *cma_dev,
- unsigned int port)
+ u32 port)
{
if (!rdma_is_port_valid(cma_dev->device, port))
return -EINVAL;
@@ -296,7 +287,7 @@ int cma_get_default_gid_type(struct cma_device *cma_dev,
}
int cma_set_default_gid_type(struct cma_device *cma_dev,
- unsigned int port,
+ u32 port,
enum ib_gid_type default_gid_type)
{
unsigned long supported_gids;
@@ -319,7 +310,7 @@ int cma_set_default_gid_type(struct cma_device *cma_dev,
return 0;
}
-int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port)
+int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port)
{
if (!rdma_is_port_valid(cma_dev->device, port))
return -EINVAL;
@@ -327,7 +318,7 @@ int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port)
return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)];
}
-int cma_set_default_roce_tos(struct cma_device *cma_dev, unsigned int port,
+int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port,
u8 default_roce_tos)
{
if (!rdma_is_port_valid(cma_dev->device, port))
@@ -463,7 +454,6 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
id_priv->id.route.addr.dev_addr.transport =
rdma_node_get_transport(cma_dev->device->node_type);
list_add_tail(&id_priv->list, &cma_dev->id_list);
- rdma_restrack_add(&id_priv->res);
trace_cm_id_attach(id_priv, cma_dev->device);
}
@@ -562,7 +552,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
}
static const struct ib_gid_attr *
-cma_validate_port(struct ib_device *device, u8 port,
+cma_validate_port(struct ib_device *device, u32 port,
enum ib_gid_type gid_type,
union ib_gid *gid,
struct rdma_id_private *id_priv)
@@ -620,7 +610,7 @@ static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
struct cma_device *cma_dev;
enum ib_gid_type gid_type;
int ret = -ENODEV;
- unsigned int port;
+ u32 port;
if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
id_priv->id.ps == RDMA_PS_IPOIB)
@@ -700,6 +690,7 @@ static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
mutex_lock(&lock);
cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
mutex_unlock(&lock);
+ rdma_restrack_add(&id_priv->res);
return 0;
}
@@ -711,8 +702,8 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
struct cma_device *cma_dev;
enum ib_gid_type gid_type;
int ret = -ENODEV;
- unsigned int port;
union ib_gid gid;
+ u32 port;
if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
id_priv->id.ps == RDMA_PS_IPOIB)
@@ -754,8 +745,10 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
}
out:
- if (!ret)
+ if (!ret) {
cma_attach_to_dev(id_priv, cma_dev);
+ rdma_restrack_add(&id_priv->res);
+ }
mutex_unlock(&lock);
return ret;
@@ -816,6 +809,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
found:
cma_attach_to_dev(id_priv, cma_dev);
+ rdma_restrack_add(&id_priv->res);
mutex_unlock(&lock);
addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
@@ -852,6 +846,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
id_priv->id.qp_type = qp_type;
id_priv->tos_set = false;
id_priv->timeout_set = false;
+ id_priv->min_rnr_timer_set = false;
id_priv->gid_type = IB_GID_TYPE_IB;
spin_lock_init(&id_priv->lock);
mutex_init(&id_priv->qp_mutex);
@@ -1135,12 +1130,16 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
qp_attr_mask);
qp_attr->port_num = id_priv->id.port_num;
*qp_attr_mask |= IB_QP_PORT;
- } else
+ } else {
ret = -ENOSYS;
+ }
if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set)
qp_attr->timeout = id_priv->timeout;
+ if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set)
+ qp_attr->min_rnr_timer = id_priv->min_rnr_timer;
+
return ret;
}
EXPORT_SYMBOL(rdma_init_qp_attr);
@@ -1581,7 +1580,7 @@ static bool cma_match_private_data(struct rdma_id_private *id_priv,
static bool cma_protocol_roce(const struct rdma_cm_id *id)
{
struct ib_device *device = id->device;
- const int port_num = id->port_num ?: rdma_start_port(device);
+ const u32 port_num = id->port_num ?: rdma_start_port(device);
return rdma_protocol_roce(device, port_num);
}
@@ -2474,6 +2473,7 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
id->tos = id_priv->tos;
id->tos_set = id_priv->tos_set;
+ id->afonly = id_priv->afonly;
id_priv->cm_id.iw = id;
memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
@@ -2529,6 +2529,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
rdma_addr_size(cma_src_addr(id_priv)));
_cma_attach_to_dev(dev_id_priv, cma_dev);
+ rdma_restrack_add(&dev_id_priv->res);
cma_id_get(id_priv);
dev_id_priv->internal_id = 1;
dev_id_priv->afonly = id_priv->afonly;
@@ -2615,6 +2616,43 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
}
EXPORT_SYMBOL(rdma_set_ack_timeout);
+/**
+ * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the
+ * QP associated with a connection identifier.
+ * @id: Communication identifier to associated with service type.
+ * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK
+ * Timer Field" in the IBTA specification.
+ *
+ * This function should be called before rdma_connect() on active
+ * side, and on passive side before rdma_accept(). The timer value
+ * will be associated with the local QP. When it receives a send it is
+ * not read to handle, typically if the receive queue is empty, an RNR
+ * Retry NAK is returned to the requester with the min_rnr_timer
+ * encoded. The requester will then wait at least the time specified
+ * in the NAK before retrying. The default is zero, which translates
+ * to a minimum RNR Timer value of 655 ms.
+ *
+ * Return: 0 for success
+ */
+int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
+{
+ struct rdma_id_private *id_priv;
+
+ /* It is a five-bit value */
+ if (min_rnr_timer & 0xe0)
+ return -EINVAL;
+
+ if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT))
+ return -EINVAL;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ id_priv->min_rnr_timer = min_rnr_timer;
+ id_priv->min_rnr_timer_set = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(rdma_set_min_rnr_timer);
+
static void cma_query_handler(int status, struct sa_path_rec *path_rec,
void *context)
{
@@ -3169,6 +3207,7 @@ port_found:
ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
id_priv->id.port_num = p;
cma_attach_to_dev(id_priv, cma_dev);
+ rdma_restrack_add(&id_priv->res);
cma_set_loopback(cma_src_addr(id_priv));
out:
mutex_unlock(&lock);
@@ -3201,6 +3240,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
if (status)
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
status);
+ rdma_restrack_add(&id_priv->res);
} else if (status) {
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
}
@@ -3812,6 +3852,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (ret)
goto err2;
+ if (!cma_any_addr(addr))
+ rdma_restrack_add(&id_priv->res);
return 0;
err2:
if (id_priv->cma_dev)
@@ -4124,10 +4166,11 @@ int rdma_connect_locked(struct rdma_cm_id *id,
ret = cma_resolve_ib_udp(id_priv, conn_param);
else
ret = cma_connect_ib(id_priv, conn_param);
- } else if (rdma_cap_iw_cm(id->device, id->port_num))
+ } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = cma_connect_iw(id_priv, conn_param);
- else
+ } else {
ret = -ENOSYS;
+ }
if (ret)
goto err_state;
return 0;
@@ -4234,9 +4277,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
iw_param.ird = conn_param->responder_resources;
iw_param.private_data = conn_param->private_data;
iw_param.private_data_len = conn_param->private_data_len;
- if (id_priv->id.qp) {
+ if (id_priv->id.qp)
iw_param.qpn = id_priv->qp_num;
- } else
+ else
iw_param.qpn = conn_param->qp_num;
return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
@@ -4319,11 +4362,11 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
else
ret = cma_rep_recv(id_priv);
}
- } else if (rdma_cap_iw_cm(id->device, id->port_num))
+ } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = cma_accept_iw(id_priv, conn_param);
- else
+ } else {
ret = -ENOSYS;
-
+ }
if (ret)
goto reject;
@@ -4409,8 +4452,9 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = iw_cm_reject(id_priv->cm_id.iw,
private_data, private_data_len);
- } else
+ } else {
ret = -ENOSYS;
+ }
return ret;
}
@@ -4864,14 +4908,28 @@ static void cma_process_remove(struct cma_device *cma_dev)
wait_for_completion(&cma_dev->comp);
}
+static bool cma_supported(struct ib_device *device)
+{
+ u32 i;
+
+ rdma_for_each_port(device, i) {
+ if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i))
+ return true;
+ }
+ return false;
+}
+
static int cma_add_one(struct ib_device *device)
{
struct rdma_id_private *to_destroy;
struct cma_device *cma_dev;
struct rdma_id_private *id_priv;
- unsigned int i;
unsigned long supported_gids = 0;
int ret;
+ u32 i;
+
+ if (!cma_supported(device))
+ return -EOPNOTSUPP;
cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL);
if (!cma_dev)
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index e0d5e3bae458..9ac16e0db761 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -43,7 +43,7 @@ struct cma_device;
struct cma_dev_group;
struct cma_dev_port_group {
- unsigned int port_num;
+ u32 port_num;
struct cma_dev_group *cma_dev_group;
struct config_group group;
};
@@ -200,10 +200,10 @@ static const struct config_item_type cma_port_group_type = {
static int make_cma_ports(struct cma_dev_group *cma_dev_group,
struct cma_device *cma_dev)
{
- struct ib_device *ibdev;
- unsigned int i;
- unsigned int ports_num;
struct cma_dev_port_group *ports;
+ struct ib_device *ibdev;
+ u32 ports_num;
+ u32 i;
ibdev = cma_get_ib_dev(cma_dev);
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index caece96ebcf5..5c463da99845 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -86,9 +86,11 @@ struct rdma_id_private {
u8 tos;
u8 tos_set:1;
u8 timeout_set:1;
+ u8 min_rnr_timer_set:1;
u8 reuseaddr;
u8 afonly;
u8 timeout;
+ u8 min_rnr_timer;
enum ib_gid_type gid_type;
/*
@@ -117,11 +119,11 @@ void cma_dev_put(struct cma_device *dev);
typedef bool (*cma_device_filter)(struct ib_device *, void *);
struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
void *cookie);
-int cma_get_default_gid_type(struct cma_device *dev, unsigned int port);
-int cma_set_default_gid_type(struct cma_device *dev, unsigned int port,
+int cma_get_default_gid_type(struct cma_device *dev, u32 port);
+int cma_set_default_gid_type(struct cma_device *dev, u32 port,
enum ib_gid_type default_gid_type);
-int cma_get_default_roce_tos(struct cma_device *dev, unsigned int port);
-int cma_set_default_roce_tos(struct cma_device *dev, unsigned int port,
+int cma_get_default_roce_tos(struct cma_device *dev, u32 port);
+int cma_set_default_roce_tos(struct cma_device *dev, u32 port,
u8 default_roce_tos);
struct ib_device *cma_get_ib_dev(struct cma_device *dev);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 315f7a297eee..29809dd30041 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -83,14 +83,14 @@ void ib_device_unregister_sysfs(struct ib_device *device);
int ib_device_rename(struct ib_device *ibdev, const char *name);
int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim);
-typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
+typedef void (*roce_netdev_callback)(struct ib_device *device, u32 port,
struct net_device *idev, void *cookie);
-typedef bool (*roce_netdev_filter)(struct ib_device *device, u8 port,
+typedef bool (*roce_netdev_filter)(struct ib_device *device, u32 port,
struct net_device *idev, void *cookie);
struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
- unsigned int port);
+ u32 port);
void ib_enum_roce_netdev(struct ib_device *ib_dev,
roce_netdev_filter filter,
@@ -113,7 +113,7 @@ int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
struct ib_client_nl_info {
struct sk_buff *nl_msg;
struct device *cdev;
- unsigned int port;
+ u32 port;
u64 abi;
};
int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name,
@@ -128,24 +128,24 @@ int ib_cache_gid_parse_type_str(const char *buf);
const char *ib_cache_gid_type_str(enum ib_gid_type gid_type);
-void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
+void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
struct net_device *ndev,
unsigned long gid_type_mask,
enum ib_cache_gid_default_mode mode);
-int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr);
-int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
union ib_gid *gid, struct ib_gid_attr *attr);
-int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
+int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
struct net_device *ndev);
int roce_gid_mgmt_init(void);
void roce_gid_mgmt_cleanup(void);
-unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
+unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port);
int ib_cache_setup_one(struct ib_device *device);
void ib_cache_cleanup_one(struct ib_device *device);
@@ -215,14 +215,14 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
struct netlink_ext_ack *extack);
int ib_get_cached_subnet_prefix(struct ib_device *device,
- u8 port_num,
- u64 *sn_pfx);
+ u32 port_num,
+ u64 *sn_pfx);
#ifdef CONFIG_SECURITY_INFINIBAND
void ib_security_release_port_pkey_list(struct ib_device *device);
void ib_security_cache_change(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u64 subnet_prefix);
int ib_security_modify_qp(struct ib_qp *qp,
@@ -247,7 +247,7 @@ static inline void ib_security_release_port_pkey_list(struct ib_device *device)
}
static inline void ib_security_cache_change(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u64 subnet_prefix)
{
}
@@ -381,7 +381,7 @@ int ib_setup_port_attrs(struct ib_core_device *coredev);
int rdma_compatdev_set(u8 enable);
-int ib_port_register_module_stat(struct ib_device *device, u8 port_num,
+int ib_port_register_module_stat(struct ib_device *device, u32 port_num,
struct kobject *kobj, struct kobj_type *ktype,
const char *name);
void ib_port_unregister_module_stat(struct kobject *kobj);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index f3a7c1f404af..15493357cfef 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -14,10 +14,12 @@ static int __counter_set_mode(struct rdma_port_counter *port_counter,
enum rdma_nl_counter_mode new_mode,
enum rdma_nl_counter_mask new_mask)
{
- if (new_mode == RDMA_COUNTER_MODE_AUTO && port_counter->num_counters)
- if (new_mask & ~ALL_AUTO_MODE_MASKS ||
- port_counter->mode.mode != RDMA_COUNTER_MODE_NONE)
+ if (new_mode == RDMA_COUNTER_MODE_AUTO) {
+ if (new_mask & (~ALL_AUTO_MODE_MASKS))
return -EINVAL;
+ if (port_counter->num_counters)
+ return -EBUSY;
+ }
port_counter->mode.mode = new_mode;
port_counter->mode.mask = new_mask;
@@ -32,14 +34,17 @@ static int __counter_set_mode(struct rdma_port_counter *port_counter,
* @mask: Mask to configure
* @extack: Message to the user
*
- * Return 0 on success.
+ * Return 0 on success. If counter mode wasn't changed then it is considered
+ * as success as well.
+ * Return -EBUSY when changing to auto mode while there are bounded counters.
+ *
*/
-int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
+int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mask mask,
struct netlink_ext_ack *extack)
{
- enum rdma_nl_counter_mode mode = RDMA_COUNTER_MODE_AUTO;
struct rdma_port_counter *port_counter;
+ enum rdma_nl_counter_mode mode;
int ret;
port_counter = &dev->port_data[port].port_counter;
@@ -47,25 +52,26 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
return -EOPNOTSUPP;
mutex_lock(&port_counter->lock);
- if (mask) {
- ret = __counter_set_mode(port_counter, mode, mask);
- if (ret)
- NL_SET_ERR_MSG(
- extack,
- "Turning on auto mode is not allowed when there is bound QP");
+ if (mask)
+ mode = RDMA_COUNTER_MODE_AUTO;
+ else
+ mode = (port_counter->num_counters) ? RDMA_COUNTER_MODE_MANUAL :
+ RDMA_COUNTER_MODE_NONE;
+
+ if (port_counter->mode.mode == mode &&
+ port_counter->mode.mask == mask) {
+ ret = 0;
goto out;
}
- if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO) {
- ret = -EINVAL;
- goto out;
- }
+ ret = __counter_set_mode(port_counter, mode, mask);
- mode = (port_counter->num_counters) ? RDMA_COUNTER_MODE_MANUAL :
- RDMA_COUNTER_MODE_NONE;
- ret = __counter_set_mode(port_counter, mode, 0);
out:
mutex_unlock(&port_counter->lock);
+ if (ret == -EBUSY)
+ NL_SET_ERR_MSG(
+ extack,
+ "Modifying auto mode is not allowed when there is a bound QP");
return ret;
}
@@ -100,7 +106,7 @@ static int __rdma_counter_bind_qp(struct rdma_counter *counter,
return ret;
}
-static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u8 port,
+static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port,
struct ib_qp *qp,
enum rdma_nl_counter_mode mode)
{
@@ -238,7 +244,7 @@ static void counter_history_stat_update(struct rdma_counter *counter)
* Return: The counter (with ref-count increased) if found
*/
static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp,
- u8 port)
+ u32 port)
{
struct rdma_port_counter *port_counter;
struct rdma_counter *counter = NULL;
@@ -282,7 +288,7 @@ static void counter_release(struct kref *kref)
* rdma_counter_bind_qp_auto - Check and bind the QP to a counter base on
* the auto-mode rule
*/
-int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
+int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port)
{
struct rdma_port_counter *port_counter;
struct ib_device *dev = qp->device;
@@ -352,7 +358,7 @@ int rdma_counter_query_stats(struct rdma_counter *counter)
}
static u64 get_running_counters_hwstat_sum(struct ib_device *dev,
- u8 port, u32 index)
+ u32 port, u32 index)
{
struct rdma_restrack_entry *res;
struct rdma_restrack_root *rt;
@@ -388,7 +394,7 @@ next:
* rdma_counter_get_hwstat_value() - Get the sum value of all counters on a
* specific port, including the running ones and history data
*/
-u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index)
+u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index)
{
struct rdma_port_counter *port_counter;
u64 sum;
@@ -443,7 +449,7 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
/*
* rdma_counter_bind_qpn() - Bind QP @qp_num to counter @counter_id
*/
-int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
+int rdma_counter_bind_qpn(struct ib_device *dev, u32 port,
u32 qp_num, u32 counter_id)
{
struct rdma_port_counter *port_counter;
@@ -493,7 +499,7 @@ err:
* rdma_counter_bind_qpn_alloc() - Alloc a counter and bind QP @qp_num to it
* The id of new counter is returned in @counter_id
*/
-int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
+int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port,
u32 qp_num, u32 *counter_id)
{
struct rdma_port_counter *port_counter;
@@ -540,7 +546,7 @@ err:
/*
* rdma_counter_unbind_qpn() - Unbind QP @qp_num from a counter
*/
-int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port,
+int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port,
u32 qp_num, u32 counter_id)
{
struct rdma_port_counter *port_counter;
@@ -573,7 +579,7 @@ out:
return ret;
}
-int rdma_counter_get_mode(struct ib_device *dev, u8 port,
+int rdma_counter_get_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mode *mode,
enum rdma_nl_counter_mask *mask)
{
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index aac0fe14e1d9..c660cef66ac6 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -779,7 +779,7 @@ static void remove_client_context(struct ib_device *device,
static int alloc_port_data(struct ib_device *device)
{
struct ib_port_data_rcu *pdata_rcu;
- unsigned int port;
+ u32 port;
if (device->port_data)
return 0;
@@ -788,6 +788,10 @@ static int alloc_port_data(struct ib_device *device)
if (WARN_ON(!device->phys_port_cnt))
return -EINVAL;
+ /* Reserve U32_MAX so the logic to go over all the ports is sane */
+ if (WARN_ON(device->phys_port_cnt == U32_MAX))
+ return -EINVAL;
+
/*
* device->port_data is indexed directly by the port number to make
* access to this data as efficient as possible.
@@ -819,7 +823,7 @@ static int alloc_port_data(struct ib_device *device)
return 0;
}
-static int verify_immutable(const struct ib_device *dev, u8 port)
+static int verify_immutable(const struct ib_device *dev, u32 port)
{
return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
rdma_max_mad_size(dev, port) != 0);
@@ -827,7 +831,7 @@ static int verify_immutable(const struct ib_device *dev, u8 port)
static int setup_port_data(struct ib_device *device)
{
- unsigned int port;
+ u32 port;
int ret;
ret = alloc_port_data(device);
@@ -2005,7 +2009,7 @@ void ib_dispatch_event_clients(struct ib_event *event)
}
static int iw_query_port(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
struct ib_port_attr *port_attr)
{
struct in_device *inetdev;
@@ -2044,7 +2048,7 @@ static int iw_query_port(struct ib_device *device,
}
static int __ib_query_port(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
struct ib_port_attr *port_attr)
{
union ib_gid gid = {};
@@ -2078,7 +2082,7 @@ static int __ib_query_port(struct ib_device *device,
* @port_attr pointer.
*/
int ib_query_port(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
struct ib_port_attr *port_attr)
{
if (!rdma_is_port_valid(device, port_num))
@@ -2130,7 +2134,7 @@ static void add_ndev_hash(struct ib_port_data *pdata)
* NETDEV_UNREGISTER event.
*/
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
- unsigned int port)
+ u32 port)
{
struct net_device *old_ndev;
struct ib_port_data *pdata;
@@ -2173,7 +2177,7 @@ EXPORT_SYMBOL(ib_device_set_netdev);
static void free_netdevs(struct ib_device *ib_dev)
{
unsigned long flags;
- unsigned int port;
+ u32 port;
if (!ib_dev->port_data)
return;
@@ -2204,7 +2208,7 @@ static void free_netdevs(struct ib_device *ib_dev)
}
struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
- unsigned int port)
+ u32 port)
{
struct ib_port_data *pdata;
struct net_device *res;
@@ -2291,7 +2295,7 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev,
roce_netdev_callback cb,
void *cookie)
{
- unsigned int port;
+ u32 port;
rdma_for_each_port (ib_dev, port)
if (rdma_protocol_roce(ib_dev, port)) {
@@ -2369,7 +2373,7 @@ int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
* ib_query_pkey() fetches the specified P_Key table entry.
*/
int ib_query_pkey(struct ib_device *device,
- u8 port_num, u16 index, u16 *pkey)
+ u32 port_num, u16 index, u16 *pkey)
{
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
@@ -2414,7 +2418,7 @@ EXPORT_SYMBOL(ib_modify_device);
* @port_modify_mask and @port_modify structure.
*/
int ib_modify_port(struct ib_device *device,
- u8 port_num, int port_modify_mask,
+ u32 port_num, int port_modify_mask,
struct ib_port_modify *port_modify)
{
int rc;
@@ -2446,10 +2450,10 @@ EXPORT_SYMBOL(ib_modify_port);
* parameter may be NULL.
*/
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- u8 *port_num, u16 *index)
+ u32 *port_num, u16 *index)
{
union ib_gid tmp_gid;
- unsigned int port;
+ u32 port;
int ret, i;
rdma_for_each_port (device, port) {
@@ -2483,7 +2487,7 @@ EXPORT_SYMBOL(ib_find_gid);
* @index: The index into the PKey table where the PKey was found.
*/
int ib_find_pkey(struct ib_device *device,
- u8 port_num, u16 pkey, u16 *index)
+ u32 port_num, u16 pkey, u16 *index)
{
int ret, i;
u16 tmp_pkey;
@@ -2526,7 +2530,7 @@ EXPORT_SYMBOL(ib_find_pkey);
*
*/
struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
- u8 port,
+ u32 port,
u16 pkey,
const union ib_gid *gid,
const struct sockaddr *addr)
@@ -2696,7 +2700,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, reg_dm_mr);
SET_DEVICE_OP(dev_ops, reg_user_mr);
SET_DEVICE_OP(dev_ops, reg_user_mr_dmabuf);
- SET_DEVICE_OP(dev_ops, req_ncomp_notif);
SET_DEVICE_OP(dev_ops, req_notify_cq);
SET_DEVICE_OP(dev_ops, rereg_user_mr);
SET_DEVICE_OP(dev_ops, resize_cq);
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 30a0ff76b332..932b26f50d03 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -528,7 +528,8 @@ add_mapping_response_exit:
}
/* netlink attribute policy for the response to add and query mapping request
- * and response with remote address info */
+ * and response with remote address info
+ */
static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {
[IWPM_NLA_RQUERY_MAPPING_SEQ] = { .type = NLA_U32 },
[IWPM_NLA_RQUERY_LOCAL_ADDR] = {
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 9355e521d9f4..2081e4854fb0 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -61,7 +61,7 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
{
u16 pkey;
struct ib_device *dev = qp_info->port_priv->device;
- u8 pnum = qp_info->port_priv->port_num;
+ u32 pnum = qp_info->port_priv->port_num;
struct ib_ud_wr *wr = &mad_send_wr->send_wr;
struct rdma_ah_attr attr = {};
@@ -118,7 +118,7 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
* Assumes ib_mad_port_list_lock is being held
*/
static inline struct ib_mad_port_private *
-__ib_get_mad_port(struct ib_device *device, int port_num)
+__ib_get_mad_port(struct ib_device *device, u32 port_num)
{
struct ib_mad_port_private *entry;
@@ -134,7 +134,7 @@ __ib_get_mad_port(struct ib_device *device, int port_num)
* for a device/port
*/
static inline struct ib_mad_port_private *
-ib_get_mad_port(struct ib_device *device, int port_num)
+ib_get_mad_port(struct ib_device *device, u32 port_num)
{
struct ib_mad_port_private *entry;
unsigned long flags;
@@ -155,8 +155,7 @@ static inline u8 convert_mgmt_class(u8 mgmt_class)
static int get_spl_qp_index(enum ib_qp_type qp_type)
{
- switch (qp_type)
- {
+ switch (qp_type) {
case IB_QPT_SMI:
return 0;
case IB_QPT_GSI:
@@ -222,7 +221,7 @@ EXPORT_SYMBOL(ib_response_mad);
* Context: Process context.
*/
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum ib_qp_type qp_type,
struct ib_mad_reg_req *mad_reg_req,
u8 rmpp_version,
@@ -549,7 +548,7 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list)
}
static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
- u16 pkey_index, u8 port_num, struct ib_wc *wc)
+ u16 pkey_index, u32 port_num, struct ib_wc *wc)
{
memset(wc, 0, sizeof *wc);
wc->wr_cqe = cqe;
@@ -608,7 +607,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
struct ib_mad_port_private *port_priv;
struct ib_mad_agent_private *recv_mad_agent = NULL;
struct ib_device *device = mad_agent_priv->agent.device;
- u8 port_num;
+ u32 port_num;
struct ib_wc mad_wc;
struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
@@ -707,8 +706,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
(const struct ib_mad *)smp,
(struct ib_mad *)mad_priv->mad, &mad_size,
&out_mad_pkey_index);
- switch (ret)
- {
+ switch (ret) {
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
mad_agent_priv->agent.recv_handler) {
@@ -807,7 +805,7 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
/* Allocate data segments. */
for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
- seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
+ seg = kmalloc(sizeof(*seg) + seg_size, gfp_mask);
if (!seg) {
free_send_rmpp_list(send_wr);
return -ENOMEM;
@@ -837,12 +835,11 @@ int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
}
EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
-struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
- u32 remote_qpn, u16 pkey_index,
- int rmpp_active,
- int hdr_len, int data_len,
- gfp_t gfp_mask,
- u8 base_version)
+struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
+ u32 remote_qpn, u16 pkey_index,
+ int rmpp_active, int hdr_len,
+ int data_len, gfp_t gfp_mask,
+ u8 base_version)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr;
@@ -1275,11 +1272,9 @@ static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
int i;
/* Remove any methods for this mad agent */
- for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
- if (method->agent[i] == agent) {
+ for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
+ if (method->agent[i] == agent)
method->agent[i] = NULL;
- }
- }
}
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
@@ -1454,9 +1449,8 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
* Was MAD registration request supplied
* with original registration ?
*/
- if (!agent_priv->reg_req) {
+ if (!agent_priv->reg_req)
goto out;
- }
port_priv = agent_priv->qp_info->port_priv;
mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
@@ -1613,7 +1607,7 @@ out:
if (mad_agent && !mad_agent->agent.recv_handler) {
dev_notice(&port_priv->device->dev,
- "No receive handler for client %p on port %d\n",
+ "No receive handler for client %p on port %u\n",
&mad_agent->agent, port_priv->port_num);
deref_mad_agent(mad_agent);
mad_agent = NULL;
@@ -1677,15 +1671,16 @@ static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
rwc->recv_buf.mad->mad_hdr.mgmt_class;
}
-static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
- const struct ib_mad_send_wr_private *wr,
- const struct ib_mad_recv_wc *rwc )
+static inline int
+rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
+ const struct ib_mad_send_wr_private *wr,
+ const struct ib_mad_recv_wc *rwc)
{
struct rdma_ah_attr attr;
u8 send_resp, rcv_resp;
union ib_gid sgid;
struct ib_device *device = mad_agent_priv->agent.device;
- u8 port_num = mad_agent_priv->agent.port_num;
+ u32 port_num = mad_agent_priv->agent.port_num;
u8 lmc;
bool has_grh;
@@ -1834,7 +1829,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
deref_mad_agent(mad_agent_priv);
} else {
/* not user rmpp, revert to normal behavior and
- * drop the mad */
+ * drop the mad
+ */
ib_free_recv_mad(mad_recv_wc);
deref_mad_agent(mad_agent_priv);
return;
@@ -1860,14 +1856,12 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
mad_recv_wc);
deref_mad_agent(mad_agent_priv);
}
-
- return;
}
static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
const struct ib_mad_qp_info *qp_info,
const struct ib_wc *wc,
- int port_num,
+ u32 port_num,
struct ib_mad_private *recv,
struct ib_mad_private *response)
{
@@ -1954,7 +1948,7 @@ static enum smi_action
handle_opa_smi(struct ib_mad_port_private *port_priv,
struct ib_mad_qp_info *qp_info,
struct ib_wc *wc,
- int port_num,
+ u32 port_num,
struct ib_mad_private *recv,
struct ib_mad_private *response)
{
@@ -2010,7 +2004,7 @@ static enum smi_action
handle_smi(struct ib_mad_port_private *port_priv,
struct ib_mad_qp_info *qp_info,
struct ib_wc *wc,
- int port_num,
+ u32 port_num,
struct ib_mad_private *recv,
struct ib_mad_private *response,
bool opa)
@@ -2034,7 +2028,7 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
struct ib_mad_private_header *mad_priv_hdr;
struct ib_mad_private *recv, *response = NULL;
struct ib_mad_agent_private *mad_agent;
- int port_num;
+ u32 port_num;
int ret = IB_MAD_RESULT_SUCCESS;
size_t mad_size;
u16 resp_mad_pkey_index = 0;
@@ -2202,9 +2196,10 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
temp_mad_send_wr->timeout))
break;
}
- }
- else
+ } else {
list_item = &mad_agent_priv->wait_list;
+ }
+
list_add(&mad_send_wr->agent_list, list_item);
/* Reschedule a work item if we have a shorter timeout */
@@ -2258,7 +2253,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
adjust_timeout(mad_agent_priv);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- if (mad_send_wr->status != IB_WC_SUCCESS )
+ if (mad_send_wr->status != IB_WC_SUCCESS)
mad_send_wc->status = mad_send_wr->status;
if (ret == IB_RMPP_RESULT_INTERNAL)
ib_rmpp_send_handler(mad_send_wc);
@@ -2947,7 +2942,7 @@ static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
* Create the QP, PD, MR, and CQ if needed
*/
static int ib_mad_port_open(struct ib_device *device,
- int port_num)
+ u32 port_num)
{
int ret, cq_size;
struct ib_mad_port_private *port_priv;
@@ -3002,7 +2997,7 @@ static int ib_mad_port_open(struct ib_device *device,
if (ret)
goto error7;
- snprintf(name, sizeof name, "ib_mad%d", port_num);
+ snprintf(name, sizeof(name), "ib_mad%u", port_num);
port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!port_priv->wq) {
ret = -ENOMEM;
@@ -3048,7 +3043,7 @@ error3:
* If there are no classes using the port, free the port
* resources (CQ, MR, PD, QP) and remove the port's info structure
*/
-static int ib_mad_port_close(struct ib_device *device, int port_num)
+static int ib_mad_port_close(struct ib_device *device, u32 port_num)
{
struct ib_mad_port_private *port_priv;
unsigned long flags;
@@ -3057,7 +3052,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
port_priv = __ib_get_mad_port(device, port_num);
if (port_priv == NULL) {
spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
- dev_err(&device->dev, "Port %d not found\n", port_num);
+ dev_err(&device->dev, "Port %u not found\n", port_num);
return -ENODEV;
}
list_del_init(&port_priv->port_list);
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index e0573e4d0404..8af0619a39cd 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -382,8 +382,8 @@ static inline int get_seg_num(struct ib_mad_recv_buf *seg)
return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
}
-static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
- struct ib_mad_recv_buf *seg)
+static inline struct ib_mad_recv_buf *get_next_seg(struct list_head *rmpp_list,
+ struct ib_mad_recv_buf *seg)
{
if (seg->list.next == rmpp_list)
return NULL;
@@ -396,8 +396,8 @@ static inline int window_size(struct ib_mad_agent_private *agent)
return max(agent->qp_info->recv_queue.max_active >> 3, 1);
}
-static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
- int seg_num)
+static struct ib_mad_recv_buf *find_seg_location(struct list_head *rmpp_list,
+ int seg_num)
{
struct ib_mad_recv_buf *seg_buf;
int cur_seg_num;
@@ -449,7 +449,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
return hdr_size + rmpp_recv->seg_num * data_size - pad;
}
-static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
+static struct ib_mad_recv_wc *complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
{
struct ib_mad_recv_wc *rmpp_wc;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 57519ca6cd2c..a5dd4b7a74bc 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -63,7 +63,7 @@ struct mcast_port {
struct rb_root table;
atomic_t refcount;
struct completion comp;
- u8 port_num;
+ u32 port_num;
};
struct mcast_device {
@@ -605,7 +605,7 @@ found:
*/
struct ib_sa_multicast *
ib_sa_join_multicast(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
int (*callback)(int status,
@@ -690,7 +690,7 @@ void ib_sa_free_multicast(struct ib_sa_multicast *multicast)
}
EXPORT_SYMBOL(ib_sa_free_multicast);
-int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
+int ib_sa_get_mcmember_rec(struct ib_device *device, u32 port_num,
union ib_gid *mgid, struct ib_sa_mcmember_rec *rec)
{
struct mcast_device *dev;
@@ -732,7 +732,7 @@ EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
* success or appropriate error code.
*
*/
-int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
+int ib_init_ah_from_mcmember(struct ib_device *device, u32 port_num,
struct ib_sa_mcmember_rec *rec,
struct net_device *ndev,
enum ib_gid_type gid_type,
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index d306049c22a2..34d0cc1a4147 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -92,7 +92,9 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_CTX] = { .type = NLA_NESTED },
[RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_CTX_ENTRY] = { .type = NLA_NESTED },
[RDMA_NLDEV_ATTR_RES_DST_ADDR] = {
.len = sizeof(struct __kernel_sockaddr_storage) },
[RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 },
@@ -130,6 +132,11 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_SRQN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_SRQ_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_MIN_RANGE] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_MAX_RANGE] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 },
@@ -146,6 +153,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 },
[RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 },
[RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -242,7 +250,7 @@ static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
{
char fw[IB_FW_VERSION_NAME_MAX];
int ret = 0;
- u8 port;
+ u32 port;
if (fill_nldev_handle(msg, device))
return -EMSGSIZE;
@@ -385,6 +393,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
[RDMA_RESTRACK_CM_ID] = "cm_id",
[RDMA_RESTRACK_MR] = "mr",
[RDMA_RESTRACK_CTX] = "ctx",
+ [RDMA_RESTRACK_SRQ] = "srq",
};
struct nlattr *table_attr;
@@ -703,6 +712,135 @@ static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
err: return -EMSGSIZE;
}
+static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_ucontext *ctx = container_of(res, struct ib_ucontext, res);
+
+ if (rdma_is_kernel_res(res))
+ return 0;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id))
+ return -EMSGSIZE;
+
+ return fill_res_name_pid(msg, res);
+}
+
+static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range,
+ uint32_t max_range)
+{
+ struct nlattr *entry_attr;
+
+ if (!min_range)
+ return 0;
+
+ entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
+ if (!entry_attr)
+ return -EMSGSIZE;
+
+ if (min_range == max_range) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range))
+ goto err;
+ } else {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range))
+ goto err;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range))
+ goto err;
+ }
+ nla_nest_end(msg, entry_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, entry_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq)
+{
+ uint32_t min_range = 0, prev = 0;
+ struct rdma_restrack_entry *res;
+ struct rdma_restrack_root *rt;
+ struct nlattr *table_attr;
+ struct ib_qp *qp = NULL;
+ unsigned long id = 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ rt = &srq->device->res[RDMA_RESTRACK_QP];
+ xa_lock(&rt->xa);
+ xa_for_each(&rt->xa, id, res) {
+ if (!rdma_restrack_get(res))
+ continue;
+
+ qp = container_of(res, struct ib_qp, res);
+ if (!qp->srq || (qp->srq->res.id != srq->res.id)) {
+ rdma_restrack_put(res);
+ continue;
+ }
+
+ if (qp->qp_num < prev)
+ /* qp_num should be ascending */
+ goto err_loop;
+
+ if (min_range == 0) {
+ min_range = qp->qp_num;
+ } else if (qp->qp_num > (prev + 1)) {
+ if (fill_res_range_qp_entry(msg, min_range, prev))
+ goto err_loop;
+
+ min_range = qp->qp_num;
+ }
+ prev = qp->qp_num;
+ rdma_restrack_put(res);
+ }
+
+ xa_unlock(&rt->xa);
+
+ if (fill_res_range_qp_entry(msg, min_range, prev))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_loop:
+ rdma_restrack_put(res);
+ xa_unlock(&rt->xa);
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_srq *srq = container_of(res, struct ib_srq, res);
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id))
+ goto err;
+
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type))
+ goto err;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id))
+ goto err;
+
+ if (ib_srq_has_cq(srq->srq_type)) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN,
+ srq->ext.cq->res.id))
+ goto err;
+ }
+
+ if (fill_res_srq_qps(msg, srq))
+ goto err;
+
+ return fill_res_name_pid(msg, res);
+
+err:
+ return -EMSGSIZE;
+}
+
static int fill_stat_counter_mode(struct sk_buff *msg,
struct rdma_counter *counter)
{
@@ -1236,6 +1374,19 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
.id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID,
},
+ [RDMA_RESTRACK_CTX] = {
+ .nldev_attr = RDMA_NLDEV_ATTR_RES_CTX,
+ .flags = NLDEV_PER_DEV,
+ .entry = RDMA_NLDEV_ATTR_RES_CTX_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_CTXN,
+ },
+ [RDMA_RESTRACK_SRQ] = {
+ .nldev_attr = RDMA_NLDEV_ATTR_RES_SRQ,
+ .flags = NLDEV_PER_DEV,
+ .entry = RDMA_NLDEV_ATTR_RES_SRQ_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_SRQN,
+ },
+
};
static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1476,6 +1627,8 @@ RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR);
RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
+RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX);
+RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ);
static LIST_HEAD(link_ops);
static DECLARE_RWSEM(link_ops_rwsem);
@@ -1697,6 +1850,19 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_free(msg);
return err;
}
+
+ /*
+ * Copy-on-fork is supported.
+ * See commits:
+ * 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes")
+ * 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm")
+ * for more details. Don't backport this without them.
+ *
+ * Return value ignored on purpose, assume copy-on-fork is not
+ * supported in case of failure.
+ */
+ nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1);
+
nlmsg_end(msg, nlh);
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
}
@@ -2139,6 +2305,14 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.doit = nldev_res_get_pd_doit,
.dump = nldev_res_get_pd_dumpit,
},
+ [RDMA_NLDEV_CMD_RES_CTX_GET] = {
+ .doit = nldev_res_get_ctx_doit,
+ .dump = nldev_res_get_ctx_dumpit,
+ },
+ [RDMA_NLDEV_CMD_RES_SRQ_GET] = {
+ .doit = nldev_res_get_srq_doit,
+ .dump = nldev_res_get_srq_dumpit,
+ },
[RDMA_NLDEV_CMD_SYS_GET] = {
.doit = nldev_sys_get_doit,
},
diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h
index af4879bdf3d6..64e2822af70f 100644
--- a/drivers/infiniband/core/opa_smi.h
+++ b/drivers/infiniband/core/opa_smi.h
@@ -40,11 +40,11 @@
#include "smi.h"
enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
- int port_num, int phys_port_cnt);
+ u32 port_num, int phys_port_cnt);
int opa_smi_get_fwd_port(struct opa_smp *smp);
extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
- bool is_switch, int port_num);
+ bool is_switch, u32 port_num);
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 75eafd9208aa..94d83b665a2f 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -112,7 +112,7 @@ static void assert_uverbs_usecnt(struct ib_uobject *uobj,
* however the type's allocat_commit function cannot have been called and the
* uobject cannot be on the uobjects_lists
*
- * For RDMA_REMOVE_DESTROY the caller shold be holding a kref (eg via
+ * For RDMA_REMOVE_DESTROY the caller should be holding a kref (eg via
* rdma_lookup_get_uobject) and the object is left in a state where the caller
* needs to call rdma_lookup_put_uobject.
*
@@ -916,7 +916,7 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
}
/*
- * Destroy the uncontext and every uobject associated with it.
+ * Destroy the ucontext and every uobject associated with it.
*
* This is internally locked and can be called in parallel from multiple
* contexts.
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index ffabaf327242..033207882c82 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -47,6 +47,7 @@ static const char *type2str(enum rdma_restrack_type type)
[RDMA_RESTRACK_MR] = "MR",
[RDMA_RESTRACK_CTX] = "CTX",
[RDMA_RESTRACK_COUNTER] = "COUNTER",
+ [RDMA_RESTRACK_SRQ] = "SRQ",
};
return names[type];
@@ -141,6 +142,8 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
return container_of(res, struct ib_ucontext, res)->device;
case RDMA_RESTRACK_COUNTER:
return container_of(res, struct rdma_counter, res)->device;
+ case RDMA_RESTRACK_SRQ:
+ return container_of(res, struct ib_srq, res)->device;
default:
WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type);
return NULL;
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 34fff94eaa38..7b638d91a4ec 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -70,7 +70,7 @@ struct netdev_event_work {
};
static const struct {
- bool (*is_supported)(const struct ib_device *device, u8 port_num);
+ bool (*is_supported)(const struct ib_device *device, u32 port_num);
enum ib_gid_type gid_type;
} PORT_CAP_TO_GID_TYPE[] = {
{rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
@@ -79,7 +79,7 @@ static const struct {
#define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
-unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
+unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port)
{
int i;
unsigned int ret_flags = 0;
@@ -96,7 +96,7 @@ unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
EXPORT_SYMBOL(roce_gid_type_mask_support);
static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
- u8 port, union ib_gid *gid,
+ u32 port, union ib_gid *gid,
struct ib_gid_attr *gid_attr)
{
int i;
@@ -144,7 +144,7 @@ static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_de
#define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
BONDING_SLAVE_STATE_NA)
static bool
-is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port,
+is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
struct net_device *real_dev;
@@ -168,7 +168,7 @@ is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port,
}
static bool
-is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port,
+is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
struct net_device *master_dev;
@@ -197,7 +197,7 @@ is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port,
* considered for deriving default RoCE GID, returns false otherwise.
*/
static bool
-is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port,
+is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
struct net_device *cookie_ndev = cookie;
@@ -223,13 +223,13 @@ is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port,
return res;
}
-static bool pass_all_filter(struct ib_device *ib_dev, u8 port,
+static bool pass_all_filter(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
return true;
}
-static bool upper_device_filter(struct ib_device *ib_dev, u8 port,
+static bool upper_device_filter(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
bool res;
@@ -260,7 +260,7 @@ static bool upper_device_filter(struct ib_device *ib_dev, u8 port,
* not have been established as slave device yet.
*/
static bool
-is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
+is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev,
void *cookie)
{
@@ -280,7 +280,7 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
static void update_gid_ip(enum gid_op_type gid_op,
struct ib_device *ib_dev,
- u8 port, struct net_device *ndev,
+ u32 port, struct net_device *ndev,
struct sockaddr *addr)
{
union ib_gid gid;
@@ -294,7 +294,7 @@ static void update_gid_ip(enum gid_op_type gid_op,
}
static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
- u8 port,
+ u32 port,
struct net_device *rdma_ndev,
struct net_device *event_ndev)
{
@@ -328,7 +328,7 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
}
static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
- u8 port, struct net_device *ndev)
+ u32 port, struct net_device *ndev)
{
const struct in_ifaddr *ifa;
struct in_device *in_dev;
@@ -372,7 +372,7 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
}
static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
- u8 port, struct net_device *ndev)
+ u32 port, struct net_device *ndev)
{
struct inet6_ifaddr *ifp;
struct inet6_dev *in6_dev;
@@ -417,7 +417,7 @@ static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
}
}
-static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
+static void _add_netdev_ips(struct ib_device *ib_dev, u32 port,
struct net_device *ndev)
{
enum_netdev_ipv4_ips(ib_dev, port, ndev);
@@ -425,13 +425,13 @@ static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
enum_netdev_ipv6_ips(ib_dev, port, ndev);
}
-static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
+static void add_netdev_ips(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
_add_netdev_ips(ib_dev, port, cookie);
}
-static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
+static void del_netdev_ips(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
@@ -446,7 +446,7 @@ static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
*
* del_default_gids() deletes the default GIDs of the event/cookie netdevice.
*/
-static void del_default_gids(struct ib_device *ib_dev, u8 port,
+static void del_default_gids(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
struct net_device *cookie_ndev = cookie;
@@ -458,7 +458,7 @@ static void del_default_gids(struct ib_device *ib_dev, u8 port,
IB_CACHE_GID_DEFAULT_MODE_DELETE);
}
-static void add_default_gids(struct ib_device *ib_dev, u8 port,
+static void add_default_gids(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
struct net_device *event_ndev = cookie;
@@ -470,7 +470,7 @@ static void add_default_gids(struct ib_device *ib_dev, u8 port,
}
static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
- u8 port,
+ u32 port,
struct net_device *rdma_ndev,
void *cookie)
{
@@ -515,7 +515,7 @@ void rdma_roce_rescan_device(struct ib_device *ib_dev)
EXPORT_SYMBOL(rdma_roce_rescan_device);
static void callback_for_addr_gid_device_scan(struct ib_device *device,
- u8 port,
+ u32 port,
struct net_device *rdma_ndev,
void *cookie)
{
@@ -547,10 +547,10 @@ static int netdev_upper_walk(struct net_device *upper,
return 0;
}
-static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
+static void handle_netdev_upper(struct ib_device *ib_dev, u32 port,
void *cookie,
void (*handle_netdev)(struct ib_device *ib_dev,
- u8 port,
+ u32 port,
struct net_device *ndev))
{
struct net_device *ndev = cookie;
@@ -574,25 +574,25 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
}
}
-static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
+static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
struct net_device *event_ndev)
{
ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
}
-static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
+static void del_netdev_upper_ips(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
}
-static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
+static void add_netdev_upper_ips(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev, void *cookie)
{
handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
}
-static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
+static void del_netdev_default_ips_join(struct ib_device *ib_dev, u32 port,
struct net_device *rdma_ndev,
void *cookie)
{
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 31156e22d3e7..a588c2038479 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -25,7 +25,7 @@ MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
* registration is also enabled if registering memory might yield better
* performance than using multiple SGE entries, see rdma_rw_io_needs_mr()
*/
-static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
+static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u32 port_num)
{
if (rdma_protocol_iwarp(dev, port_num))
return true;
@@ -42,7 +42,7 @@ static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
* optimization otherwise. Additionally we have a debug option to force usage
* of MRs to help testing this code path.
*/
-static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
+static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u32 port_num,
enum dma_data_direction dir, int dma_nents)
{
if (dir == DMA_FROM_DEVICE) {
@@ -87,7 +87,7 @@ static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg)
}
/* Caller must have zero-initialized *reg. */
-static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
+static int rdma_rw_init_one_mr(struct ib_qp *qp, u32 port_num,
struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
u32 sg_cnt, u32 offset)
{
@@ -121,7 +121,7 @@ static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
}
static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{
struct rdma_rw_reg_ctx *prev = NULL;
@@ -308,7 +308,7 @@ static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
* Returns the number of WQEs that will be needed on the workqueue if
* successful, or a negative error code.
*/
-int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{
@@ -377,7 +377,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_init);
* successful, or a negative error code.
*/
int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
struct scatterlist *prot_sg, u32 prot_sg_cnt,
struct ib_sig_attrs *sig_attrs,
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
@@ -505,7 +505,7 @@ static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval)
* completion notification.
*/
struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
+ u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
{
struct ib_send_wr *first_wr, *last_wr;
int i;
@@ -562,7 +562,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_wrs);
* is not set @cqe must be set so that the caller gets a completion
* notification.
*/
-int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
{
struct ib_send_wr *first_wr;
@@ -581,8 +581,9 @@ EXPORT_SYMBOL(rdma_rw_ctx_post);
* @sg_cnt: number of entries in @sg
* @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
*/
-void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
- struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir)
+void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
+ enum dma_data_direction dir)
{
int i;
@@ -620,7 +621,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_destroy);
* @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
*/
void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
struct scatterlist *prot_sg, u32 prot_sg_cnt,
enum dma_data_direction dir)
{
@@ -647,7 +648,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
* compute max_rdma_ctxts and the size of the transport's Send and
* Send Completion Queues.
*/
-unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num,
+unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num,
unsigned int maxpages)
{
unsigned int mr_pages;
diff --git a/drivers/infiniband/core/sa.h b/drivers/infiniband/core/sa.h
index cbaaaa92fff3..143de37ae598 100644
--- a/drivers/infiniband/core/sa.h
+++ b/drivers/infiniband/core/sa.h
@@ -49,7 +49,7 @@ static inline void ib_sa_client_put(struct ib_sa_client *client)
}
int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num, u8 method,
+ struct ib_device *device, u32 port_num, u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
unsigned long timeout_ms, gfp_t gfp_mask,
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 9ef1a355131b..8f1705c403b4 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -95,7 +95,7 @@ struct ib_sa_port {
struct delayed_work ib_cpi_work;
spinlock_t classport_lock; /* protects class port info set */
spinlock_t ah_lock;
- u8 port_num;
+ u32 port_num;
};
struct ib_sa_device {
@@ -1194,7 +1194,7 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
}
EXPORT_SYMBOL(ib_sa_cancel_query);
-static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
+static u8 get_src_path_mask(struct ib_device *device, u32 port_num)
{
struct ib_sa_device *sa_dev;
struct ib_sa_port *port;
@@ -1213,7 +1213,7 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
return src_path_mask;
}
-static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
+static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num,
struct sa_path_rec *rec,
struct rdma_ah_attr *ah_attr,
const struct ib_gid_attr *gid_attr)
@@ -1251,7 +1251,7 @@ static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
* User must invoke rdma_destroy_ah_attr() to release reference to SGID
* attributes which are initialized using ib_init_ah_attr_from_path().
*/
-int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
+int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num,
struct sa_path_rec *rec,
struct rdma_ah_attr *ah_attr,
const struct ib_gid_attr *gid_attr)
@@ -1409,7 +1409,7 @@ EXPORT_SYMBOL(ib_sa_pack_path);
static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
struct ib_sa_device *sa_dev,
- u8 port_num)
+ u32 port_num)
{
struct ib_sa_port *port;
unsigned long flags;
@@ -1444,7 +1444,7 @@ enum opa_pr_supported {
*/
static int opa_pr_query_possible(struct ib_sa_client *client,
struct ib_sa_device *sa_dev,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
struct sa_path_rec *rec)
{
struct ib_port_attr port_attr;
@@ -1533,7 +1533,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
* the query.
*/
int ib_sa_path_rec_get(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
struct sa_path_rec *rec,
ib_sa_comp_mask comp_mask,
unsigned long timeout_ms, gfp_t gfp_mask,
@@ -1688,7 +1688,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
* the query.
*/
int ib_sa_service_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num, u8 method,
+ struct ib_device *device, u32 port_num, u8 method,
struct ib_sa_service_rec *rec,
ib_sa_comp_mask comp_mask,
unsigned long timeout_ms, gfp_t gfp_mask,
@@ -1784,7 +1784,7 @@ static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
}
int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
@@ -1876,7 +1876,7 @@ static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
}
int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
struct ib_sa_guidinfo_rec *rec,
ib_sa_comp_mask comp_mask, u8 method,
unsigned long timeout_ms, gfp_t gfp_mask,
@@ -2265,7 +2265,7 @@ static void ib_sa_event(struct ib_event_handler *handler,
unsigned long flags;
struct ib_sa_device *sa_dev =
container_of(handler, typeof(*sa_dev), event_handler);
- u8 port_num = event->element.port_num - sa_dev->start_port;
+ u32 port_num = event->element.port_num - sa_dev->start_port;
struct ib_sa_port *port = &sa_dev->port[port_num];
if (!rdma_cap_ib_sa(handler->device, port->port_num))
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 75e7ec017836..e5a78d1a63c9 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -193,7 +193,7 @@ static void qp_to_error(struct ib_qp_security *sec)
static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u64 subnet_prefix)
{
struct ib_port_pkey *pp, *tmp_pp;
@@ -245,7 +245,7 @@ static int port_pkey_list_insert(struct ib_port_pkey *pp)
struct pkey_index_qp_list *tmp_pkey;
struct pkey_index_qp_list *pkey;
struct ib_device *dev;
- u8 port_num = pp->port_num;
+ u32 port_num = pp->port_num;
int ret = 0;
if (pp->state != IB_PORT_PKEY_VALID)
@@ -538,7 +538,7 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec)
}
void ib_security_cache_change(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u64 subnet_prefix)
{
struct pkey_index_qp_list *pkey;
@@ -649,7 +649,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
}
static int ib_security_pkey_access(struct ib_device *dev,
- u8 port_num,
+ u32 port_num,
u16 pkey_index,
void *sec)
{
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index f19b23817c2b..45f09b75c893 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -41,7 +41,7 @@
#include "smi.h"
#include "opa_smi.h"
-static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
+static enum smi_action __smi_handle_dr_smp_send(bool is_switch, u32 port_num,
u8 *hop_ptr, u8 hop_cnt,
const u8 *initial_path,
const u8 *return_path,
@@ -127,7 +127,7 @@ static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
* Return IB_SMI_DISCARD if the SMP should be discarded
*/
enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- bool is_switch, int port_num)
+ bool is_switch, u32 port_num)
{
return __smi_handle_dr_smp_send(is_switch, port_num,
&smp->hop_ptr, smp->hop_cnt,
@@ -139,7 +139,7 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
}
enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
- bool is_switch, int port_num)
+ bool is_switch, u32 port_num)
{
return __smi_handle_dr_smp_send(is_switch, port_num,
&smp->hop_ptr, smp->hop_cnt,
@@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
OPA_LID_PERMISSIVE);
}
-static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
+static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, u32 port_num,
int phys_port_cnt,
u8 *hop_ptr, u8 hop_cnt,
const u8 *initial_path,
@@ -238,7 +238,7 @@ static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
* Return IB_SMI_DISCARD if the SMP should be dropped
*/
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
- int port_num, int phys_port_cnt)
+ u32 port_num, int phys_port_cnt)
{
return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
&smp->hop_ptr, smp->hop_cnt,
@@ -254,7 +254,7 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
* Return IB_SMI_DISCARD if the SMP should be dropped
*/
enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
- int port_num, int phys_port_cnt)
+ u32 port_num, int phys_port_cnt)
{
return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
&smp->hop_ptr, smp->hop_cnt,
diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h
index 91d9b353ab85..e350ed623c45 100644
--- a/drivers/infiniband/core/smi.h
+++ b/drivers/infiniband/core/smi.h
@@ -52,11 +52,11 @@ enum smi_forward_action {
};
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
- int port_num, int phys_port_cnt);
+ u32 port_num, int phys_port_cnt);
int smi_get_fwd_port(struct ib_smp *smp);
extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- bool is_switch, int port_num);
+ bool is_switch, u32 port_num);
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index b8abb30f80df..05b702de00e8 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -62,7 +62,7 @@ struct ib_port {
const struct attribute_group *pma_table;
struct attribute_group *hw_stats_ag;
struct rdma_hw_stats *hw_stats;
- u8 port_num;
+ u32 port_num;
};
struct port_attribute {
@@ -94,7 +94,7 @@ struct hw_stats_attribute {
const char *buf,
size_t count);
int index;
- u8 port_num;
+ u32 port_num;
};
static ssize_t port_attr_show(struct kobject *kobj,
@@ -297,7 +297,7 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
static const char *phys_state_to_str(enum ib_port_phys_state phys_state)
{
- static const char * phys_state_str[] = {
+ static const char *phys_state_str[] = {
"<unknown>",
"Sleep",
"Polling",
@@ -470,14 +470,14 @@ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
struct port_table_attribute port_pma_attr_##_name = { \
.attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
.index = (_offset) | ((_width) << 16) | ((_counter) << 24), \
- .attr_id = IB_PMA_PORT_COUNTERS , \
+ .attr_id = IB_PMA_PORT_COUNTERS, \
}
#define PORT_PMA_ATTR_EXT(_name, _width, _offset) \
struct port_table_attribute port_pma_attr_ext_##_name = { \
.attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
.index = (_offset) | ((_width) << 16), \
- .attr_id = IB_PMA_PORT_COUNTERS_EXT , \
+ .attr_id = IB_PMA_PORT_COUNTERS_EXT, \
}
/*
@@ -812,7 +812,7 @@ static const struct attribute_group *get_counter_table(struct ib_device *dev,
}
static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats,
- u8 port_num, int index)
+ u32 port_num, int index)
{
int ret;
@@ -938,7 +938,7 @@ static void free_hsag(struct kobject *kobj, struct attribute_group *attr_group)
kfree(attr_group);
}
-static struct attribute *alloc_hsa(int index, u8 port_num, const char *name)
+static struct attribute *alloc_hsa(int index, u32 port_num, const char *name)
{
struct hw_stats_attribute *hsa;
@@ -956,7 +956,7 @@ static struct attribute *alloc_hsa(int index, u8 port_num, const char *name)
return &hsa->attr;
}
-static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
+static struct attribute *alloc_hsa_lifespan(char *name, u32 port_num)
{
struct hw_stats_attribute *hsa;
@@ -975,7 +975,7 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
}
static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
- u8 port_num)
+ u32 port_num)
{
struct attribute_group *hsag;
struct rdma_hw_stats *stats;
@@ -1049,7 +1049,6 @@ err_free_hsag:
kfree(hsag);
err_free_stats:
kfree(stats);
- return;
}
static int add_port(struct ib_core_device *coredev, int port_num)
@@ -1075,9 +1074,8 @@ static int add_port(struct ib_core_device *coredev, int port_num)
ret = kobject_init_and_add(&p->kobj, &port_type,
coredev->ports_kobj,
"%d", port_num);
- if (ret) {
+ if (ret)
goto err_put;
- }
p->gid_attr_group = kzalloc(sizeof(*p->gid_attr_group), GFP_KERNEL);
if (!p->gid_attr_group) {
@@ -1088,9 +1086,8 @@ static int add_port(struct ib_core_device *coredev, int port_num)
p->gid_attr_group->port = p;
ret = kobject_init_and_add(&p->gid_attr_group->kobj, &gid_attr_type,
&p->kobj, "gid_attrs");
- if (ret) {
+ if (ret)
goto err_put_gid_attrs;
- }
if (device->ops.process_mad && is_full_dev) {
p->pma_table = get_counter_table(device, port_num);
@@ -1383,7 +1380,7 @@ void ib_free_port_attrs(struct ib_core_device *coredev)
int ib_setup_port_attrs(struct ib_core_device *coredev)
{
struct ib_device *device = rdma_device_to_ibdev(&coredev->dev);
- unsigned int port;
+ u32 port;
int ret;
coredev->ports_kobj = kobject_create_and_add("ports",
@@ -1437,7 +1434,7 @@ void ib_device_unregister_sysfs(struct ib_device *device)
* @ktype: pointer to the ktype for this kobject.
* @name: the name of the kobject
*/
-int ib_port_register_module_stat(struct ib_device *device, u8 port_num,
+int ib_port_register_module_stat(struct ib_device *device, u32 port_num,
struct kobject *kobj, struct kobj_type *ktype,
const char *name)
{
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index da2512c30ffd..15d57ba4d07a 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -231,7 +231,7 @@ static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
memcpy(dst->private_data, src->private_data,
src->private_data_len);
dst->private_data_len = src->private_data_len;
- dst->responder_resources =src->responder_resources;
+ dst->responder_resources = src->responder_resources;
dst->initiator_depth = src->initiator_depth;
dst->flow_control = src->flow_control;
dst->retry_count = src->retry_count;
@@ -1034,7 +1034,7 @@ static void ucma_copy_conn_param(struct rdma_cm_id *id,
{
dst->private_data = src->private_data;
dst->private_data_len = src->private_data_len;
- dst->responder_resources =src->responder_resources;
+ dst->responder_resources = src->responder_resources;
dst->initiator_depth = src->initiator_depth;
dst->flow_control = src->flow_control;
dst->retry_count = src->retry_count;
@@ -1708,8 +1708,8 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
ssize_t ret;
if (!ib_safe_file_access(filp)) {
- pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
- task_tgid_vnr(current), current->comm);
+ pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ __func__, task_tgid_vnr(current), current->comm);
return -EACCES;
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 9b607013e2a2..0eb40025075f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -100,10 +100,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
*/
pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
- /* At minimum, drivers must support PAGE_SIZE or smaller */
- if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
- return 0;
-
umem->iova = va = virt;
/* The best result is the smallest page size that results in the minimum
* number of required pages. Compute the largest page size that could
@@ -309,8 +305,8 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
int ret;
if (offset > umem->length || length > umem->length - offset) {
- pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
- offset, umem->length, end);
+ pr_err("%s not in range. offset: %zd umem length: %zd end: %zd\n",
+ __func__, offset, umem->length, end);
return -EINVAL;
}
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index f9b5162d9260..0d65ce146fc4 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -168,6 +168,10 @@ void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
{
struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
+ dma_resv_lock(dmabuf->resv, NULL);
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+ dma_resv_unlock(dmabuf->resv);
+
dma_buf_detach(dmabuf, umem_dmabuf->attach);
dma_buf_put(dmabuf);
kfree(umem_dmabuf);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index dd7f3b437c6b..852efedda798 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -101,7 +101,7 @@ struct ib_umad_port {
struct ib_device *ib_dev;
struct ib_umad_device *umad_dev;
int dev_num;
- u8 port_num;
+ u32 port_num;
};
struct ib_umad_device {
@@ -165,8 +165,8 @@ static void ib_umad_dev_put(struct ib_umad_device *dev)
static int hdr_size(struct ib_umad_file *file)
{
- return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
- sizeof (struct ib_user_mad_hdr_old);
+ return file->use_pkey_index ? sizeof(struct ib_user_mad_hdr) :
+ sizeof(struct ib_user_mad_hdr_old);
}
/* caller must hold file->mutex */
@@ -688,8 +688,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
mutex_lock(&file->mutex);
if (!file->port->ib_dev) {
- dev_notice(&file->port->dev,
- "ib_umad_reg_agent: invalid device\n");
+ dev_notice(&file->port->dev, "%s: invalid device\n", __func__);
ret = -EPIPE;
goto out;
}
@@ -701,7 +700,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
if (ureq.qpn != 0 && ureq.qpn != 1) {
dev_notice(&file->port->dev,
- "ib_umad_reg_agent: invalid QPN %d specified\n",
+ "%s: invalid QPN %d specified\n", __func__,
ureq.qpn);
ret = -EINVAL;
goto out;
@@ -711,9 +710,9 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
if (!__get_agent(file, agent_id))
goto found;
- dev_notice(&file->port->dev,
- "ib_umad_reg_agent: Max Agents (%u) reached\n",
+ dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n", __func__,
IB_UMAD_MAX_AGENTS);
+
ret = -ENOMEM;
goto out;
@@ -790,8 +789,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
mutex_lock(&file->mutex);
if (!file->port->ib_dev) {
- dev_notice(&file->port->dev,
- "ib_umad_reg_agent2: invalid device\n");
+ dev_notice(&file->port->dev, "%s: invalid device\n", __func__);
ret = -EPIPE;
goto out;
}
@@ -802,17 +800,16 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
}
if (ureq.qpn != 0 && ureq.qpn != 1) {
- dev_notice(&file->port->dev,
- "ib_umad_reg_agent2: invalid QPN %d specified\n",
- ureq.qpn);
+ dev_notice(&file->port->dev, "%s: invalid QPN %d specified\n",
+ __func__, ureq.qpn);
ret = -EINVAL;
goto out;
}
if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) {
dev_notice(&file->port->dev,
- "ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n",
- ureq.flags, IB_USER_MAD_REG_FLAGS_CAP);
+ "%s failed: invalid registration flags specified 0x%x; supported 0x%x\n",
+ __func__, ureq.flags, IB_USER_MAD_REG_FLAGS_CAP);
ret = -EINVAL;
if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP,
@@ -827,8 +824,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
if (!__get_agent(file, agent_id))
goto found;
- dev_notice(&file->port->dev,
- "ib_umad_reg_agent2: Max Agents (%u) reached\n",
+ dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n", __func__,
IB_UMAD_MAX_AGENTS);
ret = -ENOMEM;
goto out;
@@ -840,7 +836,7 @@ found:
req.mgmt_class_version = ureq.mgmt_class_version;
if (ureq.oui & 0xff000000) {
dev_notice(&file->port->dev,
- "ib_umad_reg_agent2 failed: oui invalid 0x%08x\n",
+ "%s failed: oui invalid 0x%08x\n", __func__,
ureq.oui);
ret = -EINVAL;
goto out;
@@ -1145,7 +1141,7 @@ static const struct file_operations umad_sm_fops = {
static struct ib_umad_port *get_port(struct ib_device *ibdev,
struct ib_umad_device *umad_dev,
- unsigned int port)
+ u32 port)
{
if (!umad_dev)
return ERR_PTR(-EOPNOTSUPP);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index f5b8be3bedde..d5e15a8c870d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -364,7 +364,7 @@ static void copy_query_dev_fields(struct ib_ucontext *ucontext,
resp->max_srq_sge = attr->max_srq_sge;
resp->max_pkeys = attr->max_pkeys;
resp->local_ca_ack_delay = attr->local_ca_ack_delay;
- resp->phys_port_cnt = ib_dev->phys_port_cnt;
+ resp->phys_port_cnt = min_t(u32, ib_dev->phys_port_cnt, U8_MAX);
}
static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs)
@@ -2002,12 +2002,13 @@ static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs)
static void *alloc_wr(size_t wr_size, __u32 num_sge)
{
- if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
- sizeof (struct ib_sge))
+ if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof(struct ib_sge))) /
+ sizeof(struct ib_sge))
return NULL;
- return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
- num_sge * sizeof (struct ib_sge), GFP_KERNEL);
+ return kmalloc(ALIGN(wr_size, sizeof(struct ib_sge)) +
+ num_sge * sizeof(struct ib_sge),
+ GFP_KERNEL);
}
static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
@@ -2216,7 +2217,7 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
const struct ib_sge __user *sgls;
const void __user *wqes;
- if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
+ if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
return ERR_PTR(-EINVAL);
wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
@@ -2249,14 +2250,14 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
}
if (user_wr->num_sge >=
- (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
- sizeof (struct ib_sge)) {
+ (U32_MAX - ALIGN(sizeof(*next), sizeof(struct ib_sge))) /
+ sizeof(struct ib_sge)) {
ret = -EINVAL;
goto err;
}
- next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
- user_wr->num_sge * sizeof (struct ib_sge),
+ next = kmalloc(ALIGN(sizeof(*next), sizeof(struct ib_sge)) +
+ user_wr->num_sge * sizeof(struct ib_sge),
GFP_KERNEL);
if (!next) {
ret = -ENOMEM;
@@ -2274,8 +2275,8 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
next->num_sge = user_wr->num_sge;
if (next->num_sge) {
- next->sg_list = (void *) next +
- ALIGN(sizeof *next, sizeof (struct ib_sge));
+ next->sg_list = (void *)next +
+ ALIGN(sizeof(*next), sizeof(struct ib_sge));
if (copy_from_user(next->sg_list, sgls + sg_ind,
next->num_sge *
sizeof(struct ib_sge))) {
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index ff047eb024ab..990f0724acc6 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -752,9 +752,10 @@ int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx)
return uverbs_set_output(bundle, attr);
}
-int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
- size_t idx, s64 lower_bound, u64 upper_bound,
- s64 *def_val)
+int _uverbs_get_const_signed(s64 *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val)
{
const struct uverbs_attr *attr;
@@ -773,7 +774,30 @@ int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
return 0;
}
-EXPORT_SYMBOL(_uverbs_get_const);
+EXPORT_SYMBOL(_uverbs_get_const_signed);
+
+int _uverbs_get_const_unsigned(u64 *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 upper_bound, u64 *def_val)
+{
+ const struct uverbs_attr *attr;
+
+ attr = uverbs_attr_get(attrs_bundle, idx);
+ if (IS_ERR(attr)) {
+ if ((PTR_ERR(attr) != -ENOENT) || !def_val)
+ return PTR_ERR(attr);
+
+ *to = *def_val;
+ } else {
+ *to = attr->ptr_attr.data;
+ }
+
+ if (*to > upper_bound)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(_uverbs_get_const_unsigned);
int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
size_t idx, const void *from, size_t size)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 28464c58738c..2b0798151fb7 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -96,10 +96,10 @@ static const char * const wc_statuses[] = {
[IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
[IB_WC_LOC_PROT_ERR] = "local protection error",
[IB_WC_WR_FLUSH_ERR] = "WR flushed",
- [IB_WC_MW_BIND_ERR] = "memory management operation error",
+ [IB_WC_MW_BIND_ERR] = "memory bind operation error",
[IB_WC_BAD_RESP_ERR] = "bad response error",
[IB_WC_LOC_ACCESS_ERR] = "local access error",
- [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
+ [IB_WC_REM_INV_REQ_ERR] = "remote invalid request error",
[IB_WC_REM_ACCESS_ERR] = "remote access error",
[IB_WC_REM_OP_ERR] = "remote operation error",
[IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
@@ -227,7 +227,8 @@ rdma_node_get_transport(unsigned int node_type)
}
EXPORT_SYMBOL(rdma_node_get_transport);
-enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
+enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
+ u32 port_num)
{
enum rdma_transport_type lt;
if (device->ops.get_link_layer)
@@ -341,7 +342,8 @@ int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
}
/* uverbs manipulates usecnt with proper locking, while the kabi
- requires the caller to guarantee we can't race here. */
+ * requires the caller to guarantee we can't race here.
+ */
WARN_ON(atomic_read(&pd->usecnt));
ret = pd->device->ops.dealloc_pd(pd, udata);
@@ -658,7 +660,7 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
EXPORT_SYMBOL(ib_get_rdma_header_version);
static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
const struct ib_grh *grh)
{
int grh_version;
@@ -701,7 +703,7 @@ static bool find_gid_index(const union ib_gid *gid,
}
static const struct ib_gid_attr *
-get_sgid_attr_from_eth(struct ib_device *device, u8 port_num,
+get_sgid_attr_from_eth(struct ib_device *device, u32 port_num,
u16 vlan_id, const union ib_gid *sgid,
enum ib_gid_type gid_type)
{
@@ -788,7 +790,7 @@ static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
* On success the caller is responsible to call rdma_destroy_ah_attr on the
* attr.
*/
-int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
+int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
struct rdma_ah_attr *ah_attr)
{
@@ -919,7 +921,7 @@ void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
EXPORT_SYMBOL(rdma_destroy_ah_attr);
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
- const struct ib_grh *grh, u8 port_num)
+ const struct ib_grh *grh, u32 port_num)
{
struct rdma_ah_attr ah_attr;
struct ib_ah *ah;
@@ -1037,8 +1039,12 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
}
atomic_inc(&pd->usecnt);
+ rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ);
+ rdma_restrack_parent_name(&srq->res, &pd->res);
+
ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
if (ret) {
+ rdma_restrack_put(&srq->res);
atomic_dec(&srq->pd->usecnt);
if (srq->srq_type == IB_SRQT_XRC)
atomic_dec(&srq->ext.xrc.xrcd->usecnt);
@@ -1048,6 +1054,8 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
return ERR_PTR(ret);
}
+ rdma_restrack_add(&srq->res);
+
return srq;
}
EXPORT_SYMBOL(ib_create_srq_user);
@@ -1086,6 +1094,7 @@ int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
atomic_dec(&srq->ext.xrc.xrcd->usecnt);
if (ib_srq_has_cq(srq->srq_type))
atomic_dec(&srq->ext.cq->usecnt);
+ rdma_restrack_del(&srq->res);
kfree(srq);
return ret;
@@ -1673,7 +1682,7 @@ static bool is_qp_type_connected(const struct ib_qp *qp)
static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
- u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+ u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
const struct ib_gid_attr *old_sgid_attr_av;
const struct ib_gid_attr *old_sgid_attr_alt_av;
int ret;
@@ -1801,7 +1810,7 @@ int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
}
EXPORT_SYMBOL(ib_modify_qp_with_udata);
-int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width)
+int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
{
int rc;
u32 netdev_speed;
@@ -2467,7 +2476,7 @@ int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
}
EXPORT_SYMBOL(ib_check_mr_status);
-int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
+int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
int state)
{
if (!device->ops.set_vf_link_state)
@@ -2477,7 +2486,7 @@ int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
}
EXPORT_SYMBOL(ib_set_vf_link_state);
-int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
struct ifla_vf_info *info)
{
if (!device->ops.get_vf_config)
@@ -2487,7 +2496,7 @@ int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
}
EXPORT_SYMBOL(ib_get_vf_config);
-int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
struct ifla_vf_stats *stats)
{
if (!device->ops.get_vf_stats)
@@ -2497,7 +2506,7 @@ int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
}
EXPORT_SYMBOL(ib_get_vf_stats);
-int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
int type)
{
if (!device->ops.set_vf_guid)
@@ -2507,7 +2516,7 @@ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
}
EXPORT_SYMBOL(ib_set_vf_guid);
-int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid)
{
@@ -2849,7 +2858,7 @@ void ib_drain_qp(struct ib_qp *qp)
}
EXPORT_SYMBOL(ib_drain_qp);
-struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
+struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *))
@@ -2875,7 +2884,7 @@ struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
}
EXPORT_SYMBOL(rdma_alloc_netdev);
-int rdma_init_netdev(struct ib_device *device, u8 port_num,
+int rdma_init_netdev(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *),
diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig
index 0feac5132ce1..6a17f5cdb020 100644
--- a/drivers/infiniband/hw/bnxt_re/Kconfig
+++ b/drivers/infiniband/hw/bnxt_re/Kconfig
@@ -2,9 +2,7 @@
config INFINIBAND_BNXT_RE
tristate "Broadcom Netxtreme HCA support"
depends on 64BIT
- depends on ETHERNET && NETDEVICES && PCI && INET && DCB
- select NET_VENDOR_BROADCOM
- select BNXT
+ depends on INET && DCB && BNXT
help
This driver supports Broadcom NetXtreme-E 10/25/40/50 gigabit
RoCE HCAs. To compile this driver as a module, choose M here:
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index b930ea3dab7a..ba26d8e6a9c2 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -138,6 +138,7 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_QOS_WORK_REG 5
#define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7
#define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8
+#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 5f5408cdf008..3e54e1ae75b4 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -114,7 +114,7 @@ static const char * const bnxt_re_stat_name[] = {
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port, int index)
+ u32 port, int index)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct ctx_hw_stats *bnxt_re_stats = rdev->qplib_ctx.stats.dma;
@@ -235,7 +235,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
}
struct rdma_hw_stats *bnxt_re_ib_alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
BUILD_BUG_ON(ARRAY_SIZE(bnxt_re_stat_name) != BNXT_RE_NUM_COUNTERS);
/* We support only per port stats */
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index 76399f477e5c..ede048607d6c 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -97,8 +97,8 @@ enum bnxt_re_hw_stats {
};
struct rdma_hw_stats *bnxt_re_ib_alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num);
+ u32 port_num);
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port, int index);
+ u32 port, int index);
#endif /* __BNXT_RE_HW_STATS_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index ba515efd4fdc..2efaa80bfbd2 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -189,7 +189,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
}
/* Port */
-int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
struct ib_port_attr *port_attr)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
@@ -229,7 +229,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
return 0;
}
-int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr port_attr;
@@ -254,7 +254,7 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
}
-int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
u16 index, u16 *pkey)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
@@ -266,7 +266,7 @@ int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
&rdev->qplib_res.pkey_tbl, index, pkey);
}
-int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
int index, union ib_gid *gid)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
@@ -374,7 +374,7 @@ int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
}
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 9a8130b79256..d68671cc6173 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -149,19 +149,19 @@ static inline u16 bnxt_re_get_rwqe_size(int nsge)
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
-int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
struct ib_port_attr *port_attr);
-int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable);
void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
-int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
u16 index, u16 *pkey);
int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context);
int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context);
-int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
+int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
int index, union ib_gid *gid);
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
- u8 port_num);
+ u32 port_num);
int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index fdb8c2478258..8bfbf0231a9e 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -81,6 +81,7 @@ static struct workqueue_struct *bnxt_re_wq;
static void bnxt_re_remove_device(struct bnxt_re_dev *rdev);
static void bnxt_re_dealloc_driver(struct ib_device *ib_dev);
static void bnxt_re_stop_irq(void *handle);
+static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
{
@@ -221,6 +222,37 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
/* for handling bnxt_en callbacks later */
static void bnxt_re_stop(void *p)
{
+ struct bnxt_re_dev *rdev = p;
+ struct bnxt *bp;
+
+ if (!rdev)
+ return;
+ ASSERT_RTNL();
+
+ /* L2 driver invokes this callback during device error/crash or device
+ * reset. Current RoCE driver doesn't recover the device in case of
+ * error. Handle the error by dispatching fatal events to all qps
+ * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
+ * L2 driver want to modify the MSIx table.
+ */
+ bp = netdev_priv(rdev->netdev);
+
+ ibdev_info(&rdev->ibdev, "Handle device stop call from L2 driver");
+ /* Check the current device state from L2 structure and move the
+ * device to detached state if FW_FATAL_COND is set.
+ * This prevents more commands to HW during clean-up,
+ * in case the device is already in error.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+
+ bnxt_re_dev_stop(rdev);
+ bnxt_re_stop_irq(rdev);
+ /* Move the device states to detached and avoid sending any more
+ * commands to HW
+ */
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
}
static void bnxt_re_start(void *p)
@@ -234,6 +266,8 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
if (!rdev)
return;
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ return;
rdev->num_vfs = num_vfs;
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
bnxt_re_set_resource_limits(rdev);
@@ -427,6 +461,9 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
if (!en_dev)
return rc;
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ return 0;
+
memset(&fw_msg, 0, sizeof(fw_msg));
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
@@ -489,6 +526,9 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
if (!en_dev)
return rc;
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ return 0;
+
memset(&fw_msg, 0, sizeof(fw_msg));
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
@@ -561,24 +601,12 @@ static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
return container_of(ibdev, struct bnxt_re_dev, ibdev);
}
-static void bnxt_re_dev_unprobe(struct net_device *netdev,
- struct bnxt_en_dev *en_dev)
-{
- dev_put(netdev);
- module_put(en_dev->pdev->driver->driver.owner);
-}
-
static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
{
- struct bnxt *bp = netdev_priv(netdev);
struct bnxt_en_dev *en_dev;
struct pci_dev *pdev;
- /* Call bnxt_en's RoCE probe via indirect API */
- if (!bp->ulp_probe)
- return ERR_PTR(-EINVAL);
-
- en_dev = bp->ulp_probe(netdev);
+ en_dev = bnxt_ulp_probe(netdev);
if (IS_ERR(en_dev))
return en_dev;
@@ -593,10 +621,6 @@ static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
return ERR_PTR(-ENODEV);
}
- /* Bump net device reference count */
- if (!try_module_get(pdev->driver->driver.owner))
- return ERR_PTR(-ENODEV);
-
dev_hold(netdev);
return en_dev;
@@ -1523,13 +1547,12 @@ fail:
static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev)
{
- struct bnxt_en_dev *en_dev = rdev->en_dev;
struct net_device *netdev = rdev->netdev;
bnxt_re_dev_remove(rdev);
if (netdev)
- bnxt_re_dev_unprobe(netdev, en_dev);
+ dev_put(netdev);
}
static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
@@ -1551,7 +1574,7 @@ static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
*rdev = bnxt_re_dev_add(netdev, en_dev);
if (!*rdev) {
rc = -ENOMEM;
- bnxt_re_dev_unprobe(netdev, en_dev);
+ dev_put(netdev);
goto exit;
}
exit:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 995d4633b0a1..d4d4959c2434 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -2784,6 +2784,7 @@ do_rq:
dev_err(&cq->hwq.pdev->dev,
"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
cqe_cons, rq->max_wqe);
+ rc = -EINVAL;
goto done;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 441eb421e5e5..5d384def5e5f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -212,6 +212,10 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
u8 opcode, retry_cnt = 0xFF;
int rc = 0;
+ /* Prevent posting if f/w is not in a state to process */
+ if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
+ return 0;
+
do {
opcode = req->opcode;
rc = __send_message(rcfw, req, resp, sb, is_block);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 5f2f0a5a3560..9474c0046582 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -138,6 +138,8 @@ struct bnxt_qplib_qp_node {
#define FIRMWARE_INITIALIZED_FLAG (0)
#define FIRMWARE_FIRST_FLAG (31)
#define FIRMWARE_TIMED_OUT (3)
+#define ERR_DEVICE_DETACHED (4)
+
struct bnxt_qplib_cmdq_mbox {
struct bnxt_qplib_reg_desc reg;
void __iomem *prod;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index fa7878336100..3ca47004b752 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -854,6 +854,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
unmap_io:
pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
+ dpit->dbr_bar_reg_iomem = NULL;
return -ENOMEM;
}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index e42c812e74c3..291471d12197 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -145,7 +145,7 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status);
static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
static LIST_HEAD(timeout_list);
-static spinlock_t timeout_lock;
+static DEFINE_SPINLOCK(timeout_lock);
static void deref_cm_id(struct c4iw_ep_common *epc)
{
@@ -4452,7 +4452,6 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
int __init c4iw_cm_init(void)
{
- spin_lock_init(&timeout_lock);
skb_queue_head_init(&rxq);
workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index f85477f3b037..cdec5deb37a1 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -341,11 +341,6 @@ static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
return container_of(ibdev, struct c4iw_dev, ibdev);
}
-static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
-{
- return container_of(rdev, struct c4iw_dev, rdev);
-}
-
static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
{
return xa_load(&rhp->cqs, cqid);
@@ -659,12 +654,6 @@ static inline u32 c4iw_ib_to_tpt_access(int a)
FW_RI_MEM_ACCESS_LOCAL_READ;
}
-static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
-}
-
enum c4iw_mmid_state {
C4IW_STAG_STATE_VALID,
C4IW_STAG_STATE_INVALID
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 1f1f856f8715..3f1893e180dd 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -237,12 +237,12 @@ static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
return 0;
}
-static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
+static int c4iw_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
struct c4iw_dev *dev;
- pr_debug("ibdev %p, port %d, index %d, gid %p\n",
+ pr_debug("ibdev %p, port %u, index %d, gid %p\n",
ibdev, port, index, gid);
if (!port)
return -EINVAL;
@@ -295,7 +295,7 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
return 0;
}
-static int c4iw_query_port(struct ib_device *ibdev, u8 port,
+static int c4iw_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
int ret = 0;
@@ -378,7 +378,7 @@ static const char * const names[] = {
};
static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
@@ -391,7 +391,7 @@ static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev,
static int c4iw_get_mib(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port, int index)
+ u32 port, int index)
{
struct tp_tcp_stats v4, v6;
struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
@@ -420,7 +420,7 @@ static const struct attribute_group c4iw_attr_group = {
.attrs = c4iw_class_attributes,
};
-static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int c4iw_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 5c95c789f302..e800e8e8bed5 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -216,7 +216,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
goto out;
entry->qid = qid;
list_add_tail(&entry->entry, &uctx->cqids);
- for (i = qid; i & rdev->qpmask; i++) {
+ for (i = qid + 1; i & rdev->qpmask; i++) {
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
goto out;
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index b170817b2741..c3b0e2896475 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -487,11 +487,6 @@ static inline int t4_rq_empty(struct t4_wq *wq)
return wq->rq.in_use == 0;
}
-static inline int t4_rq_full(struct t4_wq *wq)
-{
- return wq->rq.in_use == (wq->rq.size - 1);
-}
-
static inline u32 t4_rq_avail(struct t4_wq *wq)
{
return wq->rq.size - 1 - wq->rq.in_use;
@@ -534,11 +529,6 @@ static inline int t4_sq_empty(struct t4_wq *wq)
return wq->sq.in_use == 0;
}
-static inline int t4_sq_full(struct t4_wq *wq)
-{
- return wq->sq.in_use == (wq->sq.size - 1);
-}
-
static inline u32 t4_sq_avail(struct t4_wq *wq)
{
return wq->sq.size - 1 - wq->sq.in_use;
@@ -679,11 +669,6 @@ static inline void t4_enable_wq_db(struct t4_wq *wq)
wq->rq.queue[wq->rq.size].status.db_off = 0;
}
-static inline int t4_wq_db_enabled(struct t4_wq *wq)
-{
- return !wq->rq.queue[wq->rq.size].status.db_off;
-}
-
enum t4_cq_flags {
CQ_ARMED = 1,
};
@@ -817,19 +802,6 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
return ret;
}
-static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
-{
- if (cq->sw_in_use == cq->size) {
- pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
- __func__, cq->cqid);
- cq->error = 1;
- return NULL;
- }
- if (cq->sw_in_use)
- return &cq->sw_queue[cq->sw_cidx];
- return NULL;
-}
-
static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{
int ret = 0;
@@ -843,11 +815,6 @@ static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
return ret;
}
-static inline int t4_cq_in_error(struct t4_cq *cq)
-{
- return *cq->qp_errp;
-}
-
static inline void t4_set_cq_in_error(struct t4_cq *cq)
{
*cq->qp_errp = 1;
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index e5d9712e98c4..ea322cec27d2 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -120,14 +120,14 @@ struct efa_ah {
int efa_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *udata);
-int efa_query_port(struct ib_device *ibdev, u8 port,
+int efa_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
+int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid);
-int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey);
int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
@@ -142,7 +142,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
-int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable);
int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata);
void efa_dealloc_ucontext(struct ib_ucontext *ibucontext);
@@ -156,9 +156,9 @@ int efa_destroy_ah(struct ib_ah *ibah, u32 flags);
int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata);
enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
- u8 port_num);
-struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num);
+ u32 port_num);
+struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u32 port_num);
int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
- u8 port_num, int index);
+ u32 port_num, int index);
#endif /* _EFA_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 0f578734bddb..816cfd65b7ac 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/module.h>
@@ -209,11 +209,11 @@ static void efa_set_host_info(struct efa_dev *dev)
if (!hinf)
return;
- strlcpy(hinf->os_dist_str, utsname()->release,
- min(sizeof(hinf->os_dist_str), sizeof(utsname()->release)));
+ strscpy(hinf->os_dist_str, utsname()->release,
+ sizeof(hinf->os_dist_str));
hinf->os_type = EFA_ADMIN_OS_LINUX;
- strlcpy(hinf->kernel_ver_str, utsname()->version,
- min(sizeof(hinf->kernel_ver_str), sizeof(utsname()->version)));
+ strscpy(hinf->kernel_ver_str, utsname()->version,
+ sizeof(hinf->kernel_ver_str));
hinf->kernel_ver = LINUX_VERSION_CODE;
EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 479b604e533a..51572f1dc611 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -247,7 +247,7 @@ int efa_query_device(struct ib_device *ibdev,
return 0;
}
-int efa_query_port(struct ib_device *ibdev, u8 port,
+int efa_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct efa_dev *dev = to_edev(ibdev);
@@ -319,7 +319,7 @@ int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
return 0;
}
-int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
+int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
struct efa_dev *dev = to_edev(ibdev);
@@ -329,7 +329,7 @@ int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
return 0;
}
-int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
if (index > 0)
@@ -1619,7 +1619,7 @@ int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return 0;
}
-int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -1904,7 +1904,7 @@ int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
return 0;
}
-struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
+struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u32 port_num)
{
return rdma_alloc_hw_stats_struct(efa_stats_names,
ARRAY_SIZE(efa_stats_names),
@@ -1912,7 +1912,7 @@ struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
}
int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
- u8 port_num, int index)
+ u32 port_num, int index)
{
struct efa_com_get_stats_params params = {};
union efa_com_get_stats_result result;
@@ -1981,7 +1981,7 @@ int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
}
enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_UNSPECIFIED;
}
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 04b1e8f021f6..16543f717527 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -962,7 +962,6 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
struct hfi1_msix_entry *msix)
{
struct cpu_mask_set *set = NULL;
- struct hfi1_ctxtdata *rcd;
struct hfi1_affinity_node *entry;
mutex_lock(&node_affinity.lock);
@@ -976,14 +975,15 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
case IRQ_GENERAL:
/* Don't do accounting for general contexts */
break;
- case IRQ_RCVCTXT:
- rcd = (struct hfi1_ctxtdata *)msix->arg;
+ case IRQ_RCVCTXT: {
+ struct hfi1_ctxtdata *rcd = msix->arg;
+
/* Don't do accounting for control contexts */
if (rcd->ctxt != HFI1_CTRL_CTXT)
set = &entry->rcv_intr;
break;
+ }
case IRQ_NETDEVCTXT:
- rcd = (struct hfi1_ctxtdata *)msix->arg;
set = &entry->def_intr;
break;
default:
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 993cbf37e0b9..5eeae8df415b 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1322,7 +1322,7 @@ CNTR_ELEM(#name, \
access_ibp_##cntr)
/**
- * hfi_addr_from_offset - return addr for readq/writeq
+ * hfi1_addr_from_offset - return addr for readq/writeq
* @dd: the dd device
* @offset: the offset of the CSR within bar0
*
@@ -8316,7 +8316,7 @@ static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
}
/**
- * gerneral_interrupt() - General interrupt handler
+ * general_interrupt - General interrupt handler
* @irq: MSIx IRQ vector
* @data: hfi1 devdata
*
@@ -15243,8 +15243,8 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
(dd->revision >> CCE_REVISION_SW_SHIFT)
& CCE_REVISION_SW_MASK);
- /* alloc netdev data */
- ret = hfi1_netdev_alloc(dd);
+ /* alloc VNIC/AIP rx data */
+ ret = hfi1_alloc_rx(dd);
if (ret)
goto bail_cleanup;
@@ -15348,7 +15348,7 @@ bail_clear_intr:
hfi1_comp_vectors_clean_up(dd);
msix_clean_up_interrupts(dd);
bail_cleanup:
- hfi1_netdev_free(dd);
+ hfi1_free_rx(dd);
hfi1_pcie_ddcleanup(dd);
bail_free:
hfi1_free_devdata(dd);
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 2c6f2de74d4d..ac26649d4463 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -822,11 +822,6 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
#define LCB_START DC_LCB_CSRS
#define LCB_END DC_8051_CSRS /* next block is 8051 */
-static inline int is_lcb_offset(u32 offset)
-{
- return (offset >= LCB_START && offset < LCB_END);
-}
-
extern uint num_vls;
extern uint disable_integrity;
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 0b64aa87ab73..f88bb4af245f 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1026,7 +1026,7 @@ static bool __set_armed_to_active(struct hfi1_packet *packet)
}
/**
- * armed to active - the fast path for armed to active
+ * set_armed_to_active - the fast path for armed to active
* @packet: the packet structure
*
* Return true if packet processing needs to bail.
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.c b/drivers/infiniband/hw/hfi1/exp_rcv.c
index 91f13140ddf2..a414214f6035 100644
--- a/drivers/infiniband/hw/hfi1/exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.c
@@ -49,7 +49,7 @@
#include "trace.h"
/**
- * exp_tid_group_init - initialize exp_tid_set
+ * hfi1_exp_tid_set_init - initialize exp_tid_set
* @set: the set
*/
static void hfi1_exp_tid_set_init(struct exp_tid_set *set)
@@ -70,7 +70,7 @@ void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd)
}
/**
- * alloc_ctxt_rcv_groups - initialize expected receive groups
+ * hfi1_alloc_ctxt_rcv_groups - initialize expected receive groups
* @rcd: the context to add the groupings to
*/
int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
@@ -100,7 +100,7 @@ int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
}
/**
- * free_ctxt_rcv_groups - free expected receive groups
+ * hfi1_free_ctxt_rcv_groups - free expected receive groups
* @rcd: the context to free
*
* The routine dismantles the expect receive linked
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 0e83d4b61e46..2cf102b5abd4 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -1916,6 +1916,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
__func__, (ptr -
(u32 *)dd->platform_config.data));
+ ret = -EINVAL;
goto bail;
}
/* Jump the CRC DWORD */
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 2a9a040569eb..867ae0b1aa95 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -69,7 +69,6 @@
#include <rdma/ib_hdrs.h>
#include <rdma/opa_addr.h>
#include <linux/rhashtable.h>
-#include <linux/netdevice.h>
#include <rdma/rdma_vt.h>
#include "chip_registers.h"
@@ -717,12 +716,6 @@ static inline void incr_cntr64(u64 *cntr)
(*cntr)++;
}
-static inline void incr_cntr32(u32 *cntr)
-{
- if (*cntr < (u32)-1LL)
- (*cntr)++;
-}
-
#define MAX_NAME_SIZE 64
struct hfi1_msix_entry {
enum irq_type type;
@@ -864,7 +857,7 @@ struct hfi1_pportdata {
u8 rx_pol_inv;
u8 hw_pidx; /* physical port index */
- u8 port; /* IB port number and index into dd->pports - 1 */
+ u32 port; /* IB port number and index into dd->pports - 1 */
/* type of neighbor node */
u8 neighbor_type;
u8 neighbor_normal;
@@ -1066,6 +1059,7 @@ struct sdma_vl_map;
#define SERIAL_MAX 16 /* length of the serial number */
typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64);
+struct hfi1_netdev_rx;
struct hfi1_devdata {
struct hfi1_ibdev verbs_dev; /* must be first */
/* pointers to related structs for this device */
@@ -1408,7 +1402,7 @@ struct hfi1_devdata {
/* Lock to protect IRQ SRC register access */
spinlock_t irq_src_lock;
int vnic_num_vports;
- struct net_device *dummy_netdev;
+ struct hfi1_netdev_rx *netdev_rx;
struct hfi1_affinity_node *affinity_entry;
/* Keeps track of IPoIB RSM rule users */
@@ -1480,7 +1474,7 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
struct hfi1_ctxtdata **rcd);
void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd);
void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
- struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
+ struct hfi1_devdata *dd, u8 hw_pidx, u32 port);
void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
int hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
@@ -1976,10 +1970,10 @@ static inline struct hfi1_ibdev *dev_from_rdi(struct rvt_dev_info *rdi)
return container_of(rdi, struct hfi1_ibdev, rdi);
}
-static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port)
+static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u32 port)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+ u32 pidx = port - 1; /* IB number port from 1, hdw from 0 */
WARN_ON(pidx >= dd->num_pports);
return &dd->pport[pidx].ibport_data;
@@ -2198,7 +2192,7 @@ extern const struct attribute_group ib_hfi1_attr_group;
int hfi1_device_create(struct hfi1_devdata *dd);
void hfi1_device_remove(struct hfi1_devdata *dd);
-int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
+int hfi1_create_port_files(struct ib_device *ibdev, u32 port_num,
struct kobject *kobj);
int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd);
void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 786c6316273f..e3a8a420c045 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -627,7 +627,7 @@ static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
* Common code for initializing the physical port structure.
*/
void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
- struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
+ struct hfi1_devdata *dd, u8 hw_pidx, u32 port)
{
int i;
uint default_pkey_idx;
@@ -1775,7 +1775,7 @@ static void remove_one(struct pci_dev *pdev)
hfi1_unregister_ib_device(dd);
/* free netdev data */
- hfi1_netdev_free(dd);
+ hfi1_free_rx(dd);
/*
* Disable the IB link, disable interrupts on the device,
@@ -1860,7 +1860,8 @@ bail:
}
/**
- * allocate eager buffers, both kernel and user contexts.
+ * hfi1_setup_eagerbufs - llocate eager buffers, both kernel and user
+ * contexts.
* @rcd: the context we are setting up.
*
* Allocate the eager TID buffers and program them into hip.
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index d580aa17ae37..cda81a7843c2 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -321,7 +321,7 @@ static inline void iowait_drain_wakeup(struct iowait *wait)
/**
* iowait_get_txhead() - get packet off of iowait list
*
- * @wait iowait_work struture
+ * @wait: iowait_work structure
*/
static inline struct sdma_txreq *iowait_get_txhead(struct iowait_work *wait)
{
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
index f650cac9d424..2cff38b105ac 100644
--- a/drivers/infiniband/hw/hfi1/ipoib.h
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -52,8 +52,9 @@ union hfi1_ipoib_flow {
* @producer_lock: producer sync lock
* @consumer_lock: consumer sync lock
*/
+struct ipoib_txreq;
struct hfi1_ipoib_circ_buf {
- void **items;
+ struct ipoib_txreq **items;
unsigned long head;
unsigned long tail;
unsigned long max_items;
@@ -125,10 +126,10 @@ hfi1_ipoib_priv(const struct net_device *dev)
return &((struct hfi1_ipoib_rdma_netdev *)netdev_priv(dev))->dev_priv;
}
-int hfi1_ipoib_send_dma(struct net_device *dev,
- struct sk_buff *skb,
- struct ib_ah *address,
- u32 dqpn);
+int hfi1_ipoib_send(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn);
int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv);
void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv);
@@ -143,8 +144,10 @@ struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
int size, void *data);
int hfi1_ipoib_rn_get_params(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params);
+void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q);
+
#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
index 3242290eb6a7..e594a961f513 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_main.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -101,14 +101,6 @@ static const struct net_device_ops hfi1_ipoib_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
};
-static int hfi1_ipoib_send(struct net_device *dev,
- struct sk_buff *skb,
- struct ib_ah *address,
- u32 dqpn)
-{
- return hfi1_ipoib_send_dma(dev, skb, address, dqpn);
-}
-
static int hfi1_ipoib_mcast_attach(struct net_device *dev,
struct ib_device *device,
union ib_gid *mgid,
@@ -194,7 +186,7 @@ static void hfi1_ipoib_set_id(struct net_device *dev, int id)
}
static int hfi1_ipoib_setup_rn(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
struct net_device *netdev,
void *param)
{
@@ -204,6 +196,7 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device,
int rc;
rn->send = hfi1_ipoib_send;
+ rn->tx_timeout = hfi1_ipoib_tx_timeout;
rn->attach_mcast = hfi1_ipoib_mcast_attach;
rn->detach_mcast = hfi1_ipoib_mcast_detach;
rn->set_id = hfi1_ipoib_set_id;
@@ -243,7 +236,7 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device,
}
int hfi1_ipoib_rn_get_params(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params)
{
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index edd4eeac8dd1..993f9838b6c8 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -15,6 +15,7 @@
#include "verbs.h"
#include "trace_ibhdrs.h"
#include "ipoib.h"
+#include "trace_tx.h"
/* Add a convenience helper */
#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
@@ -63,12 +64,14 @@ static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
{
+ trace_hfi1_txq_stop(txq);
if (atomic_inc_return(&txq->stops) == 1)
netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
}
static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
{
+ trace_hfi1_txq_wake(txq);
if (atomic_dec_and_test(&txq->stops))
netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
}
@@ -89,8 +92,10 @@ static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
{
++txq->sent_txreqs;
if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
- !atomic_xchg(&txq->ring_full, 1))
+ !atomic_xchg(&txq->ring_full, 1)) {
+ trace_hfi1_txq_full(txq);
hfi1_ipoib_stop_txq(txq);
+ }
}
static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
@@ -112,8 +117,10 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
* to protect against ring overflow.
*/
if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
- atomic_xchg(&txq->ring_full, 0))
+ atomic_xchg(&txq->ring_full, 0)) {
+ trace_hfi1_txq_xmit_unstopped(txq);
hfi1_ipoib_wake_txq(txq);
+ }
}
static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
@@ -202,7 +209,7 @@ static void hfi1_ipoib_add_tx(struct ipoib_txreq *tx)
/* Finish storing txreq before incrementing head. */
smp_store_release(&tx_ring->head, CIRC_ADD(head, 1, max_tx));
- napi_schedule(tx->txq->napi);
+ napi_schedule_irqoff(tx->txq->napi);
} else {
struct hfi1_ipoib_txq *txq = tx->txq;
struct hfi1_ipoib_dev_priv *priv = tx->priv;
@@ -405,6 +412,7 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
sdma_select_engine_sc(priv->dd,
txp->flow.tx_queue,
txp->flow.sc5);
+ trace_hfi1_flow_switch(txp->txq);
}
return tx;
@@ -525,6 +533,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
if (txq->flow.as_int != txp->flow.as_int) {
int ret;
+ trace_hfi1_flow_flush(txq);
ret = hfi1_ipoib_flush_tx_list(dev, txq);
if (unlikely(ret)) {
if (ret == -EBUSY)
@@ -572,10 +581,10 @@ static u8 hfi1_ipoib_calc_entropy(struct sk_buff *skb)
return (u8)skb_get_queue_mapping(skb);
}
-int hfi1_ipoib_send_dma(struct net_device *dev,
- struct sk_buff *skb,
- struct ib_ah *address,
- u32 dqpn)
+int hfi1_ipoib_send(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn)
{
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
struct ipoib_txparms txp;
@@ -635,8 +644,10 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
/* came from non-list submit */
list_add_tail(&txreq->list, &txq->tx_list);
if (list_empty(&txq->wait.list)) {
- if (!atomic_xchg(&txq->no_desc, 1))
+ if (!atomic_xchg(&txq->no_desc, 1)) {
+ trace_hfi1_txq_queued(txq);
hfi1_ipoib_stop_txq(txq);
+ }
iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
}
@@ -659,6 +670,7 @@ static void hfi1_ipoib_sdma_wakeup(struct iowait *wait, int reason)
struct hfi1_ipoib_txq *txq =
container_of(wait, struct hfi1_ipoib_txq, wait);
+ trace_hfi1_txq_wakeup(txq);
if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND);
}
@@ -702,14 +714,14 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
priv->tx_napis = kcalloc_node(dev->num_tx_queues,
sizeof(struct napi_struct),
- GFP_ATOMIC,
+ GFP_KERNEL,
priv->dd->node);
if (!priv->tx_napis)
goto free_txreq_cache;
priv->txqs = kcalloc_node(dev->num_tx_queues,
sizeof(struct hfi1_ipoib_txq),
- GFP_ATOMIC,
+ GFP_KERNEL,
priv->dd->node);
if (!priv->txqs)
goto free_tx_napis;
@@ -741,9 +753,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
priv->dd->node);
txq->tx_ring.items =
- vzalloc_node(array_size(tx_ring_size,
- sizeof(struct ipoib_txreq)),
- priv->dd->node);
+ kcalloc_node(tx_ring_size,
+ sizeof(struct ipoib_txreq *),
+ GFP_KERNEL, priv->dd->node);
if (!txq->tx_ring.items)
goto free_txqs;
@@ -764,7 +776,7 @@ free_txqs:
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
netif_napi_del(txq->napi);
- vfree(txq->tx_ring.items);
+ kfree(txq->tx_ring.items);
}
kfree(priv->txqs);
@@ -817,7 +829,7 @@ void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
hfi1_ipoib_drain_tx_list(txq);
netif_napi_del(txq->napi);
(void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
- vfree(txq->tx_ring.items);
+ kfree(txq->tx_ring.items);
}
kfree(priv->txqs);
@@ -854,3 +866,32 @@ void hfi1_ipoib_napi_tx_disable(struct net_device *dev)
(void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
}
}
+
+void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct hfi1_ipoib_txq *txq = &priv->txqs[q];
+ u64 completed = atomic64_read(&txq->complete_txreqs);
+
+ dd_dev_info(priv->dd, "timeout txq %llx q %u stopped %u stops %d no_desc %d ring_full %d\n",
+ (unsigned long long)txq, q,
+ __netif_subqueue_stopped(dev, txq->q_idx),
+ atomic_read(&txq->stops),
+ atomic_read(&txq->no_desc),
+ atomic_read(&txq->ring_full));
+ dd_dev_info(priv->dd, "sde %llx engine %u\n",
+ (unsigned long long)txq->sde,
+ txq->sde ? txq->sde->this_idx : 0);
+ dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
+ dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
+ txq->sent_txreqs, completed, hfi1_ipoib_used(txq));
+ dd_dev_info(priv->dd, "tx_queue_len %u max_items %lu\n",
+ dev->tx_queue_len, txq->tx_ring.max_items);
+ dd_dev_info(priv->dd, "head %lu tail %lu\n",
+ txq->tx_ring.head, txq->tx_ring.tail);
+ dd_dev_info(priv->dd, "wait queued %u\n",
+ !list_empty(&txq->wait.list));
+ dd_dev_info(priv->dd, "tx_list empty %u\n",
+ list_empty(&txq->tx_list));
+}
+
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index e2f2f7847aed..1fe5e702f31d 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -108,7 +108,7 @@ static u16 hfi1_lookup_pkey_value(struct hfi1_ibport *ibp, int pkey_idx)
return 0;
}
-void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u32 port)
{
struct ib_event event;
@@ -297,7 +297,7 @@ static struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u32 dlid)
struct rvt_qp *qp0;
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_devdata *dd = dd_from_ppd(ppd);
- u8 port_num = ppd->port;
+ u32 port_num = ppd->port;
memset(&attr, 0, sizeof(attr));
attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
@@ -515,7 +515,7 @@ static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
/*
* Send a Port Capability Mask Changed trap (ch. 14.3.11).
*/
-void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
+void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num)
{
struct trap_node *trap;
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
@@ -581,7 +581,7 @@ void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
u8 *data, struct ib_device *ibdev,
- u8 port, u32 *resp_len, u32 max_len)
+ u32 port, u32 *resp_len, u32 max_len)
{
struct opa_node_description *nd;
@@ -601,12 +601,12 @@ static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
}
static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct opa_node_info *ni;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
+ u32 pidx = port - 1; /* IB number port from 1, hw from 0 */
ni = (struct opa_node_info *)data;
@@ -641,11 +641,11 @@ static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
}
static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
+ u32 port)
{
struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
+ u32 pidx = port - 1; /* IB number port from 1, hw from 0 */
/* GUID 0 is illegal */
if (smp->attr_mod || pidx >= dd->num_pports ||
@@ -794,7 +794,7 @@ void read_ltp_rtt(struct hfi1_devdata *dd)
}
static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
int i;
@@ -1009,7 +1009,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
* @port: the IB port number
* @pkeys: the pkey table is placed here
*/
-static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
+static int get_pkeys(struct hfi1_devdata *dd, u32 port, u16 *pkeys)
{
struct hfi1_pportdata *ppd = dd->pport + port - 1;
@@ -1019,7 +1019,7 @@ static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
}
static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -1349,7 +1349,7 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
*
*/
static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len, int local_mad)
{
struct opa_port_info *pi = (struct opa_port_info *)data;
@@ -1667,7 +1667,7 @@ get_only:
* @port: the IB port number
* @pkeys: the PKEY table
*/
-static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
+static int set_pkeys(struct hfi1_devdata *dd, u32 port, u16 *pkeys)
{
struct hfi1_pportdata *ppd;
int i;
@@ -1718,7 +1718,7 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
}
static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -1732,7 +1732,7 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
u32 size = 0;
if (n_blocks_sent == 0) {
- pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
+ pr_warn("OPA Get PKey AM Invalid : P = %u; B = 0x%x; N = 0x%x\n",
port, start_block, n_blocks_sent);
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
@@ -1825,7 +1825,7 @@ static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
}
static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -1848,7 +1848,7 @@ static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -1877,7 +1877,7 @@ static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -1900,7 +1900,7 @@ static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -1921,7 +1921,7 @@ static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
u32 n_blocks = OPA_AM_NBLK(am);
@@ -1943,7 +1943,7 @@ static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
u32 n_blocks = OPA_AM_NBLK(am);
@@ -1985,7 +1985,7 @@ static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
u32 n_blocks = OPA_AM_NPORT(am);
@@ -2010,7 +2010,7 @@ static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
u32 n_blocks = OPA_AM_NPORT(am);
@@ -2042,7 +2042,7 @@ static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
u32 nports = OPA_AM_NPORT(am);
@@ -2084,7 +2084,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len, int local_mad)
{
u32 nports = OPA_AM_NPORT(am);
@@ -2132,7 +2132,7 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -2184,7 +2184,7 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port, u32 *resp_len,
+ struct ib_device *ibdev, u32 port, u32 *resp_len,
u32 max_len)
{
u32 num_ports = OPA_AM_NPORT(am);
@@ -2208,7 +2208,7 @@ static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port, u32 *resp_len,
+ struct ib_device *ibdev, u32 port, u32 *resp_len,
u32 max_len)
{
u32 num_ports = OPA_AM_NPORT(am);
@@ -2232,7 +2232,7 @@ static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
@@ -2274,7 +2274,7 @@ static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
@@ -2722,7 +2722,7 @@ u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd,
static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u32 port, u32 *resp_len)
{
struct opa_port_status_req *req =
(struct opa_port_status_req *)pmp->data;
@@ -2732,7 +2732,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
unsigned long vl;
size_t response_data_size;
u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
- u8 port_num = req->port_num;
+ u32 port_num = req->port_num;
u8 num_vls = hweight64(vl_select_mask);
struct _vls_pctrs *vlinfo;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -2888,7 +2888,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
return reply((struct ib_mad_hdr *)pmp);
}
-static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
+static u64 get_error_counter_summary(struct ib_device *ibdev, u32 port,
u8 res_lli, u8 res_ler)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -2973,7 +2973,7 @@ static void pma_get_opa_port_dctrs(struct ib_device *ibdev,
static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u32 port, u32 *resp_len)
{
struct opa_port_data_counters_msg *req =
(struct opa_port_data_counters_msg *)pmp->data;
@@ -2987,7 +2987,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
u8 lq, num_vls;
u8 res_lli, res_ler;
u64 port_mask;
- u8 port_num;
+ u32 port_num;
unsigned long vl;
unsigned long vl_select_mask;
int vfi;
@@ -3123,7 +3123,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
}
static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
+ struct ib_device *ibdev, u32 port)
{
struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)
pmp->data;
@@ -3151,7 +3151,7 @@ bail:
}
static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
- struct _port_ectrs *rsp, u8 port)
+ struct _port_ectrs *rsp, u32 port)
{
u64 tmp, tmp2;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -3194,11 +3194,11 @@ static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u32 port, u32 *resp_len)
{
size_t response_data_size;
struct _port_ectrs *rsp;
- u8 port_num;
+ u32 port_num;
struct opa_port_error_counters64_msg *req;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
u32 num_ports;
@@ -3283,7 +3283,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
}
static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
+ struct ib_device *ibdev, u32 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
pmp->data;
@@ -3369,7 +3369,7 @@ bail:
static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u32 port, u32 *resp_len)
{
size_t response_data_size;
struct _port_ei *rsp;
@@ -3377,7 +3377,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
u64 port_mask;
u32 num_ports;
- u8 port_num;
+ u32 port_num;
u8 num_pslm;
u64 reg;
@@ -3468,7 +3468,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u32 port, u32 *resp_len)
{
struct opa_clear_port_status *req =
(struct opa_clear_port_status *)pmp->data;
@@ -3620,14 +3620,14 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u32 port, u32 *resp_len)
{
struct _port_ei *rsp;
struct opa_port_error_info_msg *req;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
u64 port_mask;
u32 num_ports;
- u8 port_num;
+ u32 port_num;
u8 num_pslm;
u32 error_info_select;
@@ -3702,7 +3702,7 @@ struct opa_congestion_info_attr {
} __packed;
static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct opa_congestion_info_attr *p =
@@ -3727,7 +3727,7 @@ static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
u8 *data, struct ib_device *ibdev,
- u8 port, u32 *resp_len, u32 max_len)
+ u32 port, u32 *resp_len, u32 max_len)
{
int i;
struct opa_congestion_setting_attr *p =
@@ -3819,7 +3819,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
}
static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct opa_congestion_setting_attr *p =
@@ -3860,7 +3860,7 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
u8 *data, struct ib_device *ibdev,
- u8 port, u32 *resp_len, u32 max_len)
+ u32 port, u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
@@ -3925,7 +3925,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
}
static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct ib_cc_table_attr *cc_table_attr =
@@ -3977,7 +3977,7 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
@@ -4036,7 +4036,7 @@ struct opa_led_info {
#define OPA_LED_MASK BIT(OPA_LED_SHIFT)
static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -4066,7 +4066,7 @@ static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -4089,7 +4089,7 @@ static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
}
static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
- u8 *data, struct ib_device *ibdev, u8 port,
+ u8 *data, struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len)
{
int ret;
@@ -4179,7 +4179,7 @@ static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
}
static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
- u8 *data, struct ib_device *ibdev, u8 port,
+ u8 *data, struct ib_device *ibdev, u32 port,
u32 *resp_len, u32 max_len, int local_mad)
{
int ret;
@@ -4254,7 +4254,7 @@ static inline void set_aggr_error(struct opa_aggregate *ag)
}
static int subn_get_opa_aggregate(struct opa_smp *smp,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len)
{
int i;
@@ -4303,7 +4303,7 @@ static int subn_get_opa_aggregate(struct opa_smp *smp,
}
static int subn_set_opa_aggregate(struct opa_smp *smp,
- struct ib_device *ibdev, u8 port,
+ struct ib_device *ibdev, u32 port,
u32 *resp_len, int local_mad)
{
int i;
@@ -4509,7 +4509,7 @@ static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp,
}
static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct opa_mad *in_mad,
+ u32 port, const struct opa_mad *in_mad,
struct opa_mad *out_mad,
u32 *resp_len, int local_mad)
{
@@ -4614,7 +4614,7 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
}
static int process_subn(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct ib_mad *in_mad,
+ u32 port, const struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
struct ib_smp *smp = (struct ib_smp *)out_mad;
@@ -4672,7 +4672,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
return ret;
}
-static int process_perf(struct ib_device *ibdev, u8 port,
+static int process_perf(struct ib_device *ibdev, u32 port,
const struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
@@ -4734,7 +4734,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,
return ret;
}
-static int process_perf_opa(struct ib_device *ibdev, u8 port,
+static int process_perf_opa(struct ib_device *ibdev, u32 port,
const struct opa_mad *in_mad,
struct opa_mad *out_mad, u32 *resp_len)
{
@@ -4816,7 +4816,7 @@ static int process_perf_opa(struct ib_device *ibdev, u8 port,
}
static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct ib_wc *in_wc,
+ u32 port, const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
const struct opa_mad *in_mad,
struct opa_mad *out_mad, size_t *out_mad_size,
@@ -4869,7 +4869,7 @@ bail:
return ret;
}
-static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u32 port,
const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
const struct ib_mad *in_mad,
@@ -4914,7 +4914,7 @@ static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
*
* This is called by the ib_mad module.
*/
-int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u32 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in_mad, struct ib_mad *out_mad,
size_t *out_mad_size, u16 *out_mad_pkey_index)
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 889e63d3f2cc..0205d308ef5e 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -436,7 +436,7 @@ struct sc2vlnt {
COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4))
-void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u32 port);
void hfi1_handle_trap_timer(struct timer_list *t);
u16 tx_link_width(u16 link_width);
u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd, u16 link_width,
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index f3fb28e3d5d7..d213f65d4cdd 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -89,7 +89,7 @@ int hfi1_mmu_rb_register(void *ops_arg,
struct mmu_rb_handler *h;
int ret;
- h = kmalloc(sizeof(*h), GFP_KERNEL);
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c
index cf3040bb177f..57a5f02ebc77 100644
--- a/drivers/infiniband/hw/hfi1/msix.c
+++ b/drivers/infiniband/hw/hfi1/msix.c
@@ -206,7 +206,7 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
}
/**
- * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
+ * msix_netdev_request_rcd_irq - Helper function for RCVAVAIL IRQs
* for netdev context
* @rcd: valid netdev contexti
*/
@@ -221,7 +221,7 @@ int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd)
}
/**
- * msix_request_smda_ira() - Helper for getting SDMA IRQ resources
+ * msix_request_sdma_irq - Helper for getting SDMA IRQ resources
* @sde: valid sdma engine
*
*/
@@ -243,7 +243,7 @@ int msix_request_sdma_irq(struct sdma_engine *sde)
}
/**
- * msix_request_general_irq(void) - Helper for getting general IRQ
+ * msix_request_general_irq - Helper for getting general IRQ
* resources
* @dd: valid device data
*/
@@ -269,7 +269,7 @@ int msix_request_general_irq(struct hfi1_devdata *dd)
}
/**
- * enable_sdma_src() - Helper to enable SDMA IRQ srcs
+ * enable_sdma_srcs - Helper to enable SDMA IRQ srcs
* @dd: valid devdata structure
* @i: index of SDMA engine
*/
@@ -349,7 +349,7 @@ void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr)
}
/**
- * hfi1_clean_up_msix_interrupts() - Free all MSIx IRQ resources
+ * msix_clean_up_interrupts - Free all MSIx IRQ resources
* @dd: valid device data data structure
*
* Free the MSIx and associated PCI resources, if they have been allocated.
@@ -372,7 +372,7 @@ void msix_clean_up_interrupts(struct hfi1_devdata *dd)
}
/**
- * msix_netdev_syncrhonize_irq() - netdev IRQ synchronize
+ * msix_netdev_synchronize_irq - netdev IRQ synchronize
* @dd: valid devdata
*/
void msix_netdev_synchronize_irq(struct hfi1_devdata *dd)
diff --git a/drivers/infiniband/hw/hfi1/netdev.h b/drivers/infiniband/hw/hfi1/netdev.h
index 947543a3e0c4..8aa074670a9c 100644
--- a/drivers/infiniband/hw/hfi1/netdev.h
+++ b/drivers/infiniband/hw/hfi1/netdev.h
@@ -14,15 +14,14 @@
/**
* struct hfi1_netdev_rxq - Receive Queue for HFI
- * dummy netdev. Both IPoIB and VNIC netdevices will be working on
- * top of this device.
+ * Both IPoIB and VNIC netdevices will be working on the rx abstraction.
* @napi: napi object
- * @priv: ptr to netdev_priv
+ * @rx: ptr to netdev_rx
* @rcd: ptr to receive context data
*/
struct hfi1_netdev_rxq {
struct napi_struct napi;
- struct hfi1_netdev_priv *priv;
+ struct hfi1_netdev_rx *rx;
struct hfi1_ctxtdata *rcd;
};
@@ -36,7 +35,8 @@ struct hfi1_netdev_rxq {
#define NUM_NETDEV_MAP_ENTRIES HFI1_MAX_NETDEV_CTXTS
/**
- * struct hfi1_netdev_priv: data required to setup and run HFI netdev.
+ * struct hfi1_netdev_rx: data required to setup and run HFI netdev.
+ * @rx_napi: the dummy netdevice to support "polling" the receive contexts
* @dd: hfi1_devdata
* @rxq: pointer to dummy netdev receive queues.
* @num_rx_q: number of receive queues
@@ -48,7 +48,8 @@ struct hfi1_netdev_rxq {
* @netdevs: atomic counter of netdevs using dummy netdev.
* When 0 receive queues will be freed.
*/
-struct hfi1_netdev_priv {
+struct hfi1_netdev_rx {
+ struct net_device rx_napi;
struct hfi1_devdata *dd;
struct hfi1_netdev_rxq *rxq;
int num_rx_q;
@@ -61,41 +62,27 @@ struct hfi1_netdev_priv {
};
static inline
-struct hfi1_netdev_priv *hfi1_netdev_priv(struct net_device *dev)
-{
- return (struct hfi1_netdev_priv *)&dev[1];
-}
-
-static inline
int hfi1_netdev_ctxt_count(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
-
- return priv->num_rx_q;
+ return dd->netdev_rx->num_rx_q;
}
static inline
struct hfi1_ctxtdata *hfi1_netdev_get_ctxt(struct hfi1_devdata *dd, int ctxt)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
-
- return priv->rxq[ctxt].rcd;
+ return dd->netdev_rx->rxq[ctxt].rcd;
}
static inline
int hfi1_netdev_get_free_rmt_idx(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
-
- return priv->rmt_start;
+ return dd->netdev_rx->rmt_start;
}
static inline
void hfi1_netdev_set_free_rmt_idx(struct hfi1_devdata *dd, int rmt_idx)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
-
- priv->rmt_start = rmt_idx;
+ dd->netdev_rx->rmt_start = rmt_idx;
}
u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
@@ -105,8 +92,8 @@ void hfi1_netdev_enable_queues(struct hfi1_devdata *dd);
void hfi1_netdev_disable_queues(struct hfi1_devdata *dd);
int hfi1_netdev_rx_init(struct hfi1_devdata *dd);
int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd);
-int hfi1_netdev_alloc(struct hfi1_devdata *dd);
-void hfi1_netdev_free(struct hfi1_devdata *dd);
+int hfi1_alloc_rx(struct hfi1_devdata *dd);
+void hfi1_free_rx(struct hfi1_devdata *dd);
int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data);
void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id);
void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id);
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
index 1bcab992ac26..03b098a494b5 100644
--- a/drivers/infiniband/hw/hfi1/netdev_rx.c
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -17,11 +17,11 @@
#include <linux/etherdevice.h>
#include <rdma/ib_verbs.h>
-static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_priv *priv,
+static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_rx *rx,
struct hfi1_ctxtdata *uctxt)
{
unsigned int rcvctrl_ops;
- struct hfi1_devdata *dd = priv->dd;
+ struct hfi1_devdata *dd = rx->dd;
int ret;
uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
@@ -118,11 +118,11 @@ static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
hfi1_free_ctxt(uctxt);
}
-static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
+static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_rx *rx,
struct hfi1_ctxtdata **ctxt)
{
int rc;
- struct hfi1_devdata *dd = priv->dd;
+ struct hfi1_devdata *dd = rx->dd;
rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
if (rc) {
@@ -130,7 +130,7 @@ static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
return rc;
}
- rc = hfi1_netdev_setup_ctxt(priv, *ctxt);
+ rc = hfi1_netdev_setup_ctxt(rx, *ctxt);
if (rc) {
dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
hfi1_netdev_deallocate_ctxt(dd, *ctxt);
@@ -183,31 +183,31 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
(u32)HFI1_MAX_NETDEV_CTXTS);
}
-static int hfi1_netdev_rxq_init(struct net_device *dev)
+static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx)
{
int i;
int rc;
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
- struct hfi1_devdata *dd = priv->dd;
+ struct hfi1_devdata *dd = rx->dd;
+ struct net_device *dev = &rx->rx_napi;
- priv->num_rx_q = dd->num_netdev_contexts;
- priv->rxq = kcalloc_node(priv->num_rx_q, sizeof(struct hfi1_netdev_rxq),
- GFP_KERNEL, dd->node);
+ rx->num_rx_q = dd->num_netdev_contexts;
+ rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq),
+ GFP_KERNEL, dd->node);
- if (!priv->rxq) {
+ if (!rx->rxq) {
dd_dev_err(dd, "Unable to allocate netdev queue data\n");
return (-ENOMEM);
}
- for (i = 0; i < priv->num_rx_q; i++) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
- rc = hfi1_netdev_allot_ctxt(priv, &rxq->rcd);
+ rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd);
if (rc)
goto bail_context_irq_failure;
hfi1_rcd_get(rxq->rcd);
- rxq->priv = priv;
+ rxq->rx = rx;
rxq->rcd->napi = &rxq->napi;
dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
i, rxq->rcd->ctxt);
@@ -227,7 +227,7 @@ static int hfi1_netdev_rxq_init(struct net_device *dev)
bail_context_irq_failure:
dd_dev_err(dd, "Unable to allot receive context\n");
for (; i >= 0; i--) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
if (rxq->rcd) {
hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
@@ -235,20 +235,19 @@ bail_context_irq_failure:
rxq->rcd = NULL;
}
}
- kfree(priv->rxq);
- priv->rxq = NULL;
+ kfree(rx->rxq);
+ rx->rxq = NULL;
return rc;
}
-static void hfi1_netdev_rxq_deinit(struct net_device *dev)
+static void hfi1_netdev_rxq_deinit(struct hfi1_netdev_rx *rx)
{
int i;
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
- struct hfi1_devdata *dd = priv->dd;
+ struct hfi1_devdata *dd = rx->dd;
- for (i = 0; i < priv->num_rx_q; i++) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
netif_napi_del(&rxq->napi);
hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
@@ -256,41 +255,41 @@ static void hfi1_netdev_rxq_deinit(struct net_device *dev)
rxq->rcd = NULL;
}
- kfree(priv->rxq);
- priv->rxq = NULL;
- priv->num_rx_q = 0;
+ kfree(rx->rxq);
+ rx->rxq = NULL;
+ rx->num_rx_q = 0;
}
-static void enable_queues(struct hfi1_netdev_priv *priv)
+static void enable_queues(struct hfi1_netdev_rx *rx)
{
int i;
- for (i = 0; i < priv->num_rx_q; i++) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
- dd_dev_info(priv->dd, "enabling queue %d on context %d\n", i,
+ dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i,
rxq->rcd->ctxt);
napi_enable(&rxq->napi);
- hfi1_rcvctrl(priv->dd,
+ hfi1_rcvctrl(rx->dd,
HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
rxq->rcd);
}
}
-static void disable_queues(struct hfi1_netdev_priv *priv)
+static void disable_queues(struct hfi1_netdev_rx *rx)
{
int i;
- msix_netdev_synchronize_irq(priv->dd);
+ msix_netdev_synchronize_irq(rx->dd);
- for (i = 0; i < priv->num_rx_q; i++) {
- struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+ for (i = 0; i < rx->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
- dd_dev_info(priv->dd, "disabling queue %d on context %d\n", i,
+ dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i,
rxq->rcd->ctxt);
/* wait for napi if it was scheduled */
- hfi1_rcvctrl(priv->dd,
+ hfi1_rcvctrl(rx->dd,
HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
rxq->rcd);
napi_synchronize(&rxq->napi);
@@ -307,15 +306,14 @@ static void disable_queues(struct hfi1_netdev_priv *priv)
*/
int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
int res;
- if (atomic_fetch_inc(&priv->netdevs))
+ if (atomic_fetch_inc(&rx->netdevs))
return 0;
mutex_lock(&hfi1_mutex);
- init_dummy_netdev(dd->dummy_netdev);
- res = hfi1_netdev_rxq_init(dd->dummy_netdev);
+ res = hfi1_netdev_rxq_init(rx);
mutex_unlock(&hfi1_mutex);
return res;
}
@@ -328,12 +326,12 @@ int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
*/
int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
/* destroy the RX queues only if it is the last netdev going away */
- if (atomic_fetch_add_unless(&priv->netdevs, -1, 0) == 1) {
+ if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) {
mutex_lock(&hfi1_mutex);
- hfi1_netdev_rxq_deinit(dd->dummy_netdev);
+ hfi1_netdev_rxq_deinit(rx);
mutex_unlock(&hfi1_mutex);
}
@@ -341,39 +339,43 @@ int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
}
/**
- * hfi1_netdev_alloc - Allocates netdev and private data. It is required
- * because RMT index and MSI-X interrupt can be set only
- * during driver initialization.
- *
+ * hfi1_alloc_rx - Allocates the rx support structure
* @dd: hfi1 dev data
+ *
+ * Allocate the rx structure to support gathering the receive
+ * resources and the dummy netdev.
+ *
+ * Updates dd struct pointer upon success.
+ *
+ * Return: 0 (success) -error on failure
+ *
*/
-int hfi1_netdev_alloc(struct hfi1_devdata *dd)
+int hfi1_alloc_rx(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv;
- const int netdev_size = sizeof(*dd->dummy_netdev) +
- sizeof(struct hfi1_netdev_priv);
+ struct hfi1_netdev_rx *rx;
- dd_dev_info(dd, "allocating netdev size %d\n", netdev_size);
- dd->dummy_netdev = kcalloc_node(1, netdev_size, GFP_KERNEL, dd->node);
+ dd_dev_info(dd, "allocating rx size %ld\n", sizeof(*rx));
+ rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node);
- if (!dd->dummy_netdev)
+ if (!rx)
return -ENOMEM;
+ rx->dd = dd;
+ init_dummy_netdev(&rx->rx_napi);
- priv = hfi1_netdev_priv(dd->dummy_netdev);
- priv->dd = dd;
- xa_init(&priv->dev_tbl);
- atomic_set(&priv->enabled, 0);
- atomic_set(&priv->netdevs, 0);
+ xa_init(&rx->dev_tbl);
+ atomic_set(&rx->enabled, 0);
+ atomic_set(&rx->netdevs, 0);
+ dd->netdev_rx = rx;
return 0;
}
-void hfi1_netdev_free(struct hfi1_devdata *dd)
+void hfi1_free_rx(struct hfi1_devdata *dd)
{
- if (dd->dummy_netdev) {
- dd_dev_info(dd, "hfi1 netdev freed\n");
- kfree(dd->dummy_netdev);
- dd->dummy_netdev = NULL;
+ if (dd->netdev_rx) {
+ dd_dev_info(dd, "hfi1 rx freed\n");
+ kfree(dd->netdev_rx);
+ dd->netdev_rx = NULL;
}
}
@@ -388,33 +390,33 @@ void hfi1_netdev_free(struct hfi1_devdata *dd)
*/
void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv;
+ struct hfi1_netdev_rx *rx;
- if (!dd->dummy_netdev)
+ if (!dd->netdev_rx)
return;
- priv = hfi1_netdev_priv(dd->dummy_netdev);
- if (atomic_fetch_inc(&priv->enabled))
+ rx = dd->netdev_rx;
+ if (atomic_fetch_inc(&rx->enabled))
return;
mutex_lock(&hfi1_mutex);
- enable_queues(priv);
+ enable_queues(rx);
mutex_unlock(&hfi1_mutex);
}
void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
{
- struct hfi1_netdev_priv *priv;
+ struct hfi1_netdev_rx *rx;
- if (!dd->dummy_netdev)
+ if (!dd->netdev_rx)
return;
- priv = hfi1_netdev_priv(dd->dummy_netdev);
- if (atomic_dec_if_positive(&priv->enabled))
+ rx = dd->netdev_rx;
+ if (atomic_dec_if_positive(&rx->enabled))
return;
mutex_lock(&hfi1_mutex);
- disable_queues(priv);
+ disable_queues(rx);
mutex_unlock(&hfi1_mutex);
}
@@ -430,9 +432,9 @@ void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
*/
int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
- return xa_insert(&priv->dev_tbl, id, data, GFP_NOWAIT);
+ return xa_insert(&rx->dev_tbl, id, data, GFP_NOWAIT);
}
/**
@@ -444,9 +446,9 @@ int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
*/
void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
- return xa_erase(&priv->dev_tbl, id);
+ return xa_erase(&rx->dev_tbl, id);
}
/**
@@ -457,24 +459,24 @@ void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
*/
void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
- return xa_load(&priv->dev_tbl, id);
+ return xa_load(&rx->dev_tbl, id);
}
/**
- * hfi1_netdev_get_first_dat - Gets first entry with greater or equal id.
+ * hfi1_netdev_get_first_data - Gets first entry with greater or equal id.
*
* @dd: hfi1 dev data
* @start_id: requested integer id up to INT_MAX
*/
void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
{
- struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ struct hfi1_netdev_rx *rx = dd->netdev_rx;
unsigned long index = *start_id;
void *ret;
- ret = xa_find(&priv->dev_tbl, &index, UINT_MAX, XA_PRESENT);
+ ret = xa_find(&rx->dev_tbl, &index, UINT_MAX, XA_PRESENT);
*start_id = (int)index;
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 46b5290b2839..1fcc6e9666e0 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1285,7 +1285,7 @@ bail:
}
/**
- * sdma_clean() Clean up allocated memory
+ * sdma_clean - Clean up allocated memory
* @dd: struct hfi1_devdata
* @num_engines: num sdma engines
*
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 7a851191f987..f57d55272dd2 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -907,24 +907,6 @@ static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq,
return 0;
}
-/**
- * sdma_iowait_schedule() - initialize wait structure
- * @sde: sdma_engine to schedule
- * @wait: wait struct to schedule
- *
- * This function initializes the iowait
- * structure embedded in the QP or PQ.
- *
- */
-static inline void sdma_iowait_schedule(
- struct sdma_engine *sde,
- struct iowait *wait)
-{
- struct hfi1_pportdata *ppd = sde->dd->pport;
-
- iowait_schedule(wait, ppd->hfi1_wq, sde->cpu);
-}
-
/* for use by interrupt handling */
void sdma_engine_error(struct sdma_engine *sde, u64 status);
void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 5650130e68d4..eaf441ece25e 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -649,7 +649,7 @@ const struct attribute_group ib_hfi1_attr_group = {
.attrs = hfi1_attributes,
};
-int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
+int hfi1_create_port_files(struct ib_device *ibdev, u32 port_num,
struct kobject *kobj)
{
struct hfi1_pportdata *ppd;
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
index 769e5e4710c6..d44fc54858b9 100644
--- a/drivers/infiniband/hw/hfi1/trace_tx.h
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -53,6 +53,8 @@
#include "hfi.h"
#include "mad.h"
#include "sdma.h"
+#include "ipoib.h"
+#include "user_sdma.h"
const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
@@ -653,6 +655,80 @@ TRACE_EVENT(hfi1_sdma_user_completion,
__entry->code)
);
+TRACE_EVENT(hfi1_usdma_defer,
+ TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
+ struct sdma_engine *sde,
+ struct iowait *wait),
+ TP_ARGS(pq, sde, wait),
+ TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
+ __field(struct hfi1_user_sdma_pkt_q *, pq)
+ __field(struct sdma_engine *, sde)
+ __field(struct iowait *, wait)
+ __field(int, engine)
+ __field(int, empty)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
+ __entry->pq = pq;
+ __entry->sde = sde;
+ __entry->wait = wait;
+ __entry->engine = sde->this_idx;
+ __entry->empty = list_empty(&__entry->wait->list);
+ ),
+ TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
+ __get_str(dev),
+ (unsigned long long)__entry->pq,
+ (unsigned long long)__entry->sde,
+ (unsigned long long)__entry->wait,
+ __entry->engine,
+ __entry->empty
+ )
+);
+
+TRACE_EVENT(hfi1_usdma_activate,
+ TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
+ struct iowait *wait,
+ int reason),
+ TP_ARGS(pq, wait, reason),
+ TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
+ __field(struct hfi1_user_sdma_pkt_q *, pq)
+ __field(struct iowait *, wait)
+ __field(int, reason)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
+ __entry->pq = pq;
+ __entry->wait = wait;
+ __entry->reason = reason;
+ ),
+ TP_printk("[%s] pq %llx wait %llx reason %d",
+ __get_str(dev),
+ (unsigned long long)__entry->pq,
+ (unsigned long long)__entry->wait,
+ __entry->reason
+ )
+);
+
+TRACE_EVENT(hfi1_usdma_we,
+ TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
+ int we_ret),
+ TP_ARGS(pq, we_ret),
+ TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
+ __field(struct hfi1_user_sdma_pkt_q *, pq)
+ __field(int, state)
+ __field(int, we_ret)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
+ __entry->pq = pq;
+ __entry->state = pq->state;
+ __entry->we_ret = we_ret;
+ ),
+ TP_printk("[%s] pq %llx state %d we_ret %d",
+ __get_str(dev),
+ (unsigned long long)__entry->pq,
+ __entry->state,
+ __entry->we_ret
+ )
+);
+
const char *print_u32_array(struct trace_seq *, u32 *, int);
#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
@@ -858,6 +934,109 @@ DEFINE_EVENT(
TP_ARGS(qp, flag)
);
+DECLARE_EVENT_CLASS(/* AIP */
+ hfi1_ipoib_txq_template,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(txq->priv->dd)
+ __field(struct hfi1_ipoib_txq *, txq)
+ __field(struct sdma_engine *, sde)
+ __field(ulong, head)
+ __field(ulong, tail)
+ __field(uint, used)
+ __field(uint, flow)
+ __field(int, stops)
+ __field(int, no_desc)
+ __field(u8, idx)
+ __field(u8, stopped)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(txq->priv->dd)
+ __entry->txq = txq;
+ __entry->sde = txq->sde;
+ __entry->head = txq->tx_ring.head;
+ __entry->tail = txq->tx_ring.tail;
+ __entry->idx = txq->q_idx;
+ __entry->used =
+ txq->sent_txreqs -
+ atomic64_read(&txq->complete_txreqs);
+ __entry->flow = txq->flow.as_int;
+ __entry->stops = atomic_read(&txq->stops);
+ __entry->no_desc = atomic_read(&txq->no_desc);
+ __entry->stopped =
+ __netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
+ ),
+ TP_printk(/* print */
+ "[%s] txq %llx idx %u sde %llx head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
+ __get_str(dev),
+ (unsigned long long)__entry->txq,
+ __entry->idx,
+ (unsigned long long)__entry->sde,
+ __entry->head,
+ __entry->tail,
+ __entry->flow,
+ __entry->used,
+ __entry->stops,
+ __entry->no_desc,
+ __entry->stopped
+ )
+);
+
+DEFINE_EVENT(/* queue stop */
+ hfi1_ipoib_txq_template, hfi1_txq_stop,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* queue wake */
+ hfi1_ipoib_txq_template, hfi1_txq_wake,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* flow flush */
+ hfi1_ipoib_txq_template, hfi1_flow_flush,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* flow switch */
+ hfi1_ipoib_txq_template, hfi1_flow_switch,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* wakeup */
+ hfi1_ipoib_txq_template, hfi1_txq_wakeup,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* full */
+ hfi1_ipoib_txq_template, hfi1_txq_full,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* queued */
+ hfi1_ipoib_txq_template, hfi1_txq_queued,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* xmit_stopped */
+ hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* xmit_unstopped */
+ hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
#endif /* __HFI1_TRACE_TX_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 4a4956f96a7e..da5b2e37355a 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -133,6 +133,7 @@ static int defer_packet_queue(
container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
write_seqlock(&sde->waitlock);
+ trace_hfi1_usdma_defer(pq, sde, &pq->busy);
if (sdma_progress(sde, seq, txreq))
goto eagain;
/*
@@ -157,7 +158,8 @@ static void activate_packet_queue(struct iowait *wait, int reason)
{
struct hfi1_user_sdma_pkt_q *pq =
container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
- pq->busy.lock = NULL;
+
+ trace_hfi1_usdma_activate(pq, wait, reason);
xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
wake_up(&wait->wait_dma);
};
@@ -599,13 +601,17 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
while (req->seqsubmitted != req->info.npkts) {
ret = user_sdma_send_pkts(req, pcount);
if (ret < 0) {
+ int we_ret;
+
if (ret != -EBUSY)
goto free_req;
- if (wait_event_interruptible_timeout(
+ we_ret = wait_event_interruptible_timeout(
pq->busy.wait_dma,
pq->state == SDMA_PKT_Q_ACTIVE,
msecs_to_jiffies(
- SDMA_IOWAIT_TIMEOUT)) <= 0)
+ SDMA_IOWAIT_TIMEOUT));
+ trace_hfi1_usdma_we(pq, we_ret);
+ if (we_ret <= 0)
flush_pq_iowait(pq);
}
}
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index 1e8c02fe8ad1..fabe58139906 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -53,6 +53,7 @@
#include "common.h"
#include "iowait.h"
#include "user_exp_rcv.h"
+#include "mmu_rb.h"
/* The maximum number of Data io vectors per message/request */
#define MAX_VECTORS_PER_REQ 8
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 0dd4bb0a5a7e..554294340caa 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1407,7 +1407,7 @@ static inline u16 opa_width_to_ib(u16 in)
}
}
-static int query_port(struct rvt_dev_info *rdi, u8 port_num,
+static int query_port(struct rvt_dev_info *rdi, u32 port_num,
struct ib_port_attr *props)
{
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
@@ -1485,7 +1485,7 @@ bail:
return ret;
}
-static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
+static int shut_down_port(struct rvt_dev_info *rdi, u32 port_num)
{
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
@@ -1694,7 +1694,7 @@ static int init_cntr_names(const char *names_in,
}
static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
int i, err;
@@ -1758,7 +1758,7 @@ static u64 hfi1_sps_ints(void)
}
static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
- u8 port, int index)
+ u32 port, int index)
{
u64 *values;
int count;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index d36e3e14896d..420df17cd184 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -325,10 +325,10 @@ static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
*/
void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl,
u32 qp1, u32 qp2, u32 lid1, u32 lid2);
-void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num);
+void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num);
void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
-int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u32 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in_mad, struct ib_mad *out_mad,
size_t *out_mad_size, u16 *out_mad_pkey_index);
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index d2d526c5a756..4bdfc7932376 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -99,11 +99,6 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
return tx;
}
-static inline struct sdma_txreq *get_sdma_txreq(struct verbs_txreq *tx)
-{
- return &tx->txreq;
-}
-
static inline struct verbs_txreq *get_waiting_verbs_txreq(struct iowait_work *w)
{
struct sdma_txreq *stx;
diff --git a/drivers/infiniband/hw/hfi1/vnic.h b/drivers/infiniband/hw/hfi1/vnic.h
index 66150a13f374..a7a450e2cf2c 100644
--- a/drivers/infiniband/hw/hfi1/vnic.h
+++ b/drivers/infiniband/hw/hfi1/vnic.h
@@ -156,7 +156,7 @@ bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
/* vnic rdma netdev operations */
struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum rdma_netdev_t type,
const char *name,
unsigned char name_assign_type,
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index a90824de0f57..7e79c0578ecf 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -593,7 +593,7 @@ static void hfi1_vnic_free_rn(struct net_device *netdev)
}
struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum rdma_netdev_t type,
const char *name,
unsigned char name_assign_type,
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 4bcaaa0524b1..5d389ed55376 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -304,6 +304,9 @@ done:
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
{
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ hns_roce_cleanup_xrcd_table(hr_dev);
+
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
hns_roce_cleanup_srq_table(hr_dev);
hns_roce_cleanup_qp_table(hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 339e3fd98b0b..8f68cc3ff193 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -38,22 +38,14 @@
#define CMD_POLL_TOKEN 0xffff
#define CMD_MAX_NUM 32
-#define CMD_TOKEN_MASK 0x1f
static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier,
u8 op_modifier, u16 op, u16 token,
int event)
{
- struct hns_roce_cmdq *cmd = &hr_dev->cmd;
- int ret;
-
- mutex_lock(&cmd->hcr_mutex);
- ret = hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier,
- op_modifier, op, token, event);
- mutex_unlock(&cmd->hcr_mutex);
-
- return ret;
+ return hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier,
+ op_modifier, op, token, event);
}
/* this should be called with "poll_sem" */
@@ -62,18 +54,19 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
u8 op_modifier, u16 op,
unsigned int timeout)
{
- struct device *dev = hr_dev->dev;
int ret;
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
in_modifier, op_modifier, op,
CMD_POLL_TOKEN, 0);
if (ret) {
- dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to post mailbox %x in poll mode, ret = %d.\n",
+ op, ret);
return ret;
}
- return hr_dev->hw->chk_mbox(hr_dev, timeout);
+ return hr_dev->hw->poll_mbox_done(hr_dev, timeout);
}
static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
@@ -96,15 +89,18 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
struct hns_roce_cmd_context *context =
&hr_dev->cmd.context[token % hr_dev->cmd.max_cmds];
- if (token != context->token)
+ if (unlikely(token != context->token)) {
+ dev_err_ratelimited(hr_dev->dev,
+ "[cmd] invalid ae token %x,context token is %x!\n",
+ token, context->token);
return;
+ }
context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO);
context->out_param = out_param;
complete(&context->done);
}
-/* this should be called with "use_events" */
static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier,
u8 op_modifier, u16 op,
@@ -116,44 +112,44 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
int ret;
spin_lock(&cmd->context_lock);
- WARN_ON(cmd->free_head < 0);
- context = &cmd->context[cmd->free_head];
- context->token += cmd->token_mask + 1;
- cmd->free_head = context->next;
+
+ do {
+ context = &cmd->context[cmd->free_head];
+ cmd->free_head = context->next;
+ } while (context->busy);
+
+ context->busy = 1;
+ context->token += cmd->max_cmds;
+
spin_unlock(&cmd->context_lock);
- init_completion(&context->done);
+ reinit_completion(&context->done);
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
in_modifier, op_modifier, op,
context->token, 1);
- if (ret)
+ if (ret) {
+ dev_err_ratelimited(dev,
+ "failed to post mailbox %x in event mode, ret = %d.\n",
+ op, ret);
goto out;
+ }
- /*
- * It is timeout when wait_for_completion_timeout return 0
- * The return value is the time limit set in advance
- * how many seconds showing
- */
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
- dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n");
+ dev_err_ratelimited(dev, "[cmd] token %x mailbox %x timeout.\n",
+ context->token, op);
ret = -EBUSY;
goto out;
}
ret = context->result;
- if (ret) {
- dev_err(dev, "[cmd]event mod cmd process error!err=%d\n", ret);
- goto out;
- }
+ if (ret)
+ dev_err_ratelimited(dev, "[cmd] token %x mailbox %x error %d\n",
+ context->token, op, ret);
out:
- spin_lock(&cmd->context_lock);
- context->next = cmd->free_head;
- cmd->free_head = context - cmd->context;
- spin_unlock(&cmd->context_lock);
-
+ context->busy = 0;
return ret;
}
@@ -175,44 +171,28 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned int timeout)
{
- int ret;
+ bool is_busy;
- if (hr_dev->hw->rst_prc_mbox) {
- ret = hr_dev->hw->rst_prc_mbox(hr_dev);
- if (ret == CMD_RST_PRC_SUCCESS)
- return 0;
- else if (ret == CMD_RST_PRC_EBUSY)
- return -EBUSY;
- }
+ if (hr_dev->hw->chk_mbox_avail)
+ if (!hr_dev->hw->chk_mbox_avail(hr_dev, &is_busy))
+ return is_busy ? -EBUSY : 0;
if (hr_dev->cmd.use_events)
- ret = hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
- in_modifier, op_modifier, op,
- timeout);
+ return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op,
+ timeout);
else
- ret = hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
- in_modifier, op_modifier, op,
- timeout);
-
- if (ret == CMD_RST_PRC_EBUSY)
- return -EBUSY;
-
- if (ret && (hr_dev->hw->rst_prc_mbox &&
- hr_dev->hw->rst_prc_mbox(hr_dev) == CMD_RST_PRC_SUCCESS))
- return 0;
-
- return ret;
+ return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op,
+ timeout);
}
int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
{
- struct device *dev = hr_dev->dev;
-
- mutex_init(&hr_dev->cmd.hcr_mutex);
sema_init(&hr_dev->cmd.poll_sem, 1);
hr_dev->cmd.use_events = 0;
hr_dev->cmd.max_cmds = CMD_MAX_NUM;
- hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
+ hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", hr_dev->dev,
HNS_ROCE_MAILBOX_SIZE,
HNS_ROCE_MAILBOX_SIZE, 0);
if (!hr_dev->cmd.pool)
@@ -239,16 +219,16 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
for (i = 0; i < hr_cmd->max_cmds; ++i) {
hr_cmd->context[i].token = i;
hr_cmd->context[i].next = i + 1;
+ init_completion(&hr_cmd->context[i].done);
}
-
- hr_cmd->context[hr_cmd->max_cmds - 1].next = -1;
+ hr_cmd->context[hr_cmd->max_cmds - 1].next = 0;
hr_cmd->free_head = 0;
sema_init(&hr_cmd->event_sem, hr_cmd->max_cmds);
spin_lock_init(&hr_cmd->context_lock);
- hr_cmd->token_mask = CMD_TOKEN_MASK;
hr_cmd->use_events = 1;
+ down(&hr_cmd->poll_sem);
return 0;
}
@@ -259,6 +239,8 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
kfree(hr_cmd->context);
hr_cmd->use_events = 0;
+
+ up(&hr_cmd->poll_sem);
}
struct hns_roce_cmd_mailbox *
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 23c438cef40d..d5fe56c78394 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -48,7 +48,8 @@
#define roce_set_field(origin, mask, shift, val) \
do { \
(origin) &= ~cpu_to_le32(mask); \
- (origin) |= cpu_to_le32(((u32)(val) << (u32)(shift)) & (mask)); \
+ (origin) |= \
+ cpu_to_le32(((u32)(val) << (u32)(shift)) & (mask)); \
} while (0)
#define roce_set_bit(origin, shift, val) \
@@ -59,9 +60,9 @@
#define _hr_reg_enable(ptr, field_type, field_h, field_l) \
({ \
const field_type *_ptr = ptr; \
- *((__le32 *)_ptr + (field_h) / 32) |= \
- cpu_to_le32(BIT((field_l) % 32)) + \
- BUILD_BUG_ON_ZERO((field_h) != (field_l)); \
+ *((__le32 *)_ptr + (field_h) / 32) |= cpu_to_le32( \
+ BIT((field_l) % 32) + \
+ BUILD_BUG_ON_ZERO((field_h) != (field_l))); \
})
#define hr_reg_enable(ptr, field) _hr_reg_enable(ptr, field)
@@ -69,11 +70,9 @@
#define _hr_reg_clear(ptr, field_type, field_h, field_l) \
({ \
const field_type *_ptr = ptr; \
+ BUILD_BUG_ON(((field_h) / 32) != ((field_l) / 32)); \
*((__le32 *)_ptr + (field_h) / 32) &= \
- cpu_to_le32( \
- ~GENMASK((field_h) % 32, (field_l) % 32)) + \
- BUILD_BUG_ON_ZERO(((field_h) / 32) != \
- ((field_l) / 32)); \
+ ~cpu_to_le32(GENMASK((field_h) % 32, (field_l) % 32)); \
})
#define hr_reg_clear(ptr, field) _hr_reg_clear(ptr, field)
@@ -87,6 +86,16 @@
#define hr_reg_write(ptr, field, val) _hr_reg_write(ptr, field, val)
+#define _hr_reg_read(ptr, field_type, field_h, field_l) \
+ ({ \
+ const field_type *_ptr = ptr; \
+ BUILD_BUG_ON(((field_h) / 32) != ((field_l) / 32)); \
+ FIELD_GET(GENMASK((field_h) % 32, (field_l) % 32), \
+ le32_to_cpu(*((__le32 *)_ptr + (field_h) / 32))); \
+ })
+
+#define hr_reg_read(ptr, field) _hr_reg_read(ptr, field)
+
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 74fc4940b03a..800884b074f2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -225,7 +225,7 @@ static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
struct ib_udata *udata, unsigned long addr,
struct hns_roce_ib_create_cq_resp *resp)
{
- bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB;
+ bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB;
struct hns_roce_ucontext *uctx;
int err;
@@ -250,8 +250,8 @@ static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
*hr_cq->set_ci_db = 0;
hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
}
- hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
+ hr_cq->db_reg = hr_dev->reg_base + hr_dev->odb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
}
return 0;
@@ -276,6 +276,57 @@ static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
}
}
+static int verify_cq_create_attr(struct hns_roce_dev *hr_dev,
+ const struct ib_cq_init_attr *attr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+
+ if (!attr->cqe || attr->cqe > hr_dev->caps.max_cqes) {
+ ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
+ attr->cqe, hr_dev->caps.max_cqes);
+ return -EINVAL;
+ }
+
+ if (attr->comp_vector >= hr_dev->caps.num_comp_vectors) {
+ ibdev_err(ibdev, "failed to check CQ vector = %u, max = %d.\n",
+ attr->comp_vector, hr_dev->caps.num_comp_vectors);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_cq_ucmd(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
+ struct hns_roce_ib_create_cq *ucmd)
+{
+ struct ib_device *ibdev = hr_cq->ib_cq.device;
+ int ret;
+
+ ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd)));
+ if (ret) {
+ ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
+ struct hns_roce_ib_create_cq *ucmd)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+
+ cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+ cq_entries = roundup_pow_of_two(cq_entries);
+ hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
+ hr_cq->cq_depth = cq_entries;
+ hr_cq->vector = vector;
+
+ spin_lock_init(&hr_cq->lock);
+ INIT_LIST_HEAD(&hr_cq->sq_list);
+ INIT_LIST_HEAD(&hr_cq->rq_list);
+}
+
static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
struct hns_roce_ib_create_cq *ucmd)
{
@@ -299,44 +350,23 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_ib_create_cq ucmd = {};
- int vector = attr->comp_vector;
- u32 cq_entries = attr->cqe;
int ret;
if (attr->flags)
return -EOPNOTSUPP;
- if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
- ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
- cq_entries, hr_dev->caps.max_cqes);
- return -EINVAL;
- }
-
- if (vector >= hr_dev->caps.num_comp_vectors) {
- ibdev_err(ibdev, "failed to check CQ vector = %d, max = %d.\n",
- vector, hr_dev->caps.num_comp_vectors);
- return -EINVAL;
- }
-
- cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
- cq_entries = roundup_pow_of_two(cq_entries);
- hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
- hr_cq->cq_depth = cq_entries;
- hr_cq->vector = vector;
- spin_lock_init(&hr_cq->lock);
- INIT_LIST_HEAD(&hr_cq->sq_list);
- INIT_LIST_HEAD(&hr_cq->rq_list);
+ ret = verify_cq_create_attr(hr_dev, attr);
+ if (ret)
+ return ret;
if (udata) {
- ret = ib_copy_from_udata(&ucmd, udata,
- min(udata->inlen, sizeof(ucmd)));
- if (ret) {
- ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n",
- ret);
+ ret = get_cq_ucmd(hr_cq, udata, &ucmd);
+ if (ret)
return ret;
- }
}
+ set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
+
set_cqe_size(hr_cq, udata, &ucmd);
ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 3d6b7a2db496..97800d2b9d39 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -137,6 +137,7 @@ enum {
SERV_TYPE_UC,
SERV_TYPE_RD,
SERV_TYPE_UD,
+ SERV_TYPE_XRC = 5,
};
enum hns_roce_qp_state {
@@ -168,6 +169,8 @@ enum hns_roce_event {
HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
HNS_ROCE_EVENT_TYPE_MB = 0x13,
HNS_ROCE_EVENT_TYPE_FLR = 0x15,
+ HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION = 0x16,
+ HNS_ROCE_EVENT_TYPE_INVALID_XRCETH = 0x17,
};
#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12
@@ -176,9 +179,10 @@ enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
- HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3),
- HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4),
+ HNS_ROCE_CAP_FLAG_CQ_RECORD_DB = BIT(3),
+ HNS_ROCE_CAP_FLAG_QP_RECORD_DB = BIT(4),
HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
+ HNS_ROCE_CAP_FLAG_XRC = BIT(6),
HNS_ROCE_CAP_FLAG_MW = BIT(7),
HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
@@ -214,12 +218,6 @@ enum {
HNS_ROCE_RST_DIRECT_RETURN = 0,
};
-enum {
- CMD_RST_PRC_OTHERS,
- CMD_RST_PRC_SUCCESS,
- CMD_RST_PRC_EBUSY,
-};
-
#define HNS_ROCE_CMD_SUCCESS 1
/* The minimum page size is 4K for hardware */
@@ -244,6 +242,11 @@ struct hns_roce_pd {
unsigned long pdn;
};
+struct hns_roce_xrcd {
+ struct ib_xrcd ibxrcd;
+ u32 xrcdn;
+};
+
struct hns_roce_bitmap {
/* Bitmap Traversal last a bit which is 1 */
unsigned long last;
@@ -363,7 +366,7 @@ struct hns_roce_wq {
int wqe_shift; /* WQE size */
u32 head;
u32 tail;
- void __iomem *db_reg_l;
+ void __iomem *db_reg;
};
struct hns_roce_sge {
@@ -437,7 +440,7 @@ struct hns_roce_cq {
u32 cq_depth;
u32 cons_index;
u32 *set_ci_db;
- void __iomem *cq_db_l;
+ void __iomem *db_reg;
u16 *tptr_addr;
int arm_sn;
int cqe_size;
@@ -467,7 +470,8 @@ struct hns_roce_srq {
u32 rsv_sge;
int wqe_shift;
u32 cqn;
- void __iomem *db_reg_l;
+ u32 xrcdn;
+ void __iomem *db_reg;
atomic_t refcount;
struct completion free;
@@ -546,6 +550,7 @@ struct hns_roce_cmd_context {
int next;
u64 out_param;
u16 token;
+ u16 busy;
};
struct hns_roce_cmdq {
@@ -562,11 +567,6 @@ struct hns_roce_cmdq {
int free_head;
struct hns_roce_cmd_context *context;
/*
- * Result of get integer part
- * which max_comds compute according a power of 2
- */
- u16 token_mask;
- /*
* Process whether use event mode, init default non-zero
* After the event queue of cmd event ready,
* can switch into event mode
@@ -640,6 +640,8 @@ struct hns_roce_qp {
enum hns_roce_event event_type);
unsigned long qpn;
+ u32 xrcdn;
+
atomic_t refcount;
struct completion free;
@@ -695,7 +697,7 @@ struct hns_roce_aeqe {
struct hns_roce_eq {
struct hns_roce_dev *hr_dev;
- void __iomem *doorbell;
+ void __iomem *db_reg;
int type_flag; /* Aeq:1 ceq:0 */
int eqn;
@@ -723,6 +725,13 @@ struct hns_roce_eq_table {
void __iomem **eqc_base; /* only for hw v1 */
};
+enum cong_type {
+ CONG_TYPE_DCQCN,
+ CONG_TYPE_LDCP,
+ CONG_TYPE_HC3,
+ CONG_TYPE_DIP,
+};
+
struct hns_roce_caps {
u64 fw_ver;
u8 num_ports;
@@ -759,13 +768,14 @@ struct hns_roce_caps {
int num_other_vectors;
u32 num_mtpts;
u32 num_mtt_segs;
- u32 num_cqe_segs;
u32 num_srqwqe_segs;
u32 num_idx_segs;
int reserved_mrws;
int reserved_uars;
int num_pds;
int reserved_pds;
+ u32 num_xrcds;
+ u32 reserved_xrcds;
u32 mtt_entry_sz;
u32 cqe_sz;
u32 page_size_cap;
@@ -794,6 +804,9 @@ struct hns_roce_caps {
u32 cqc_bt_num;
u32 cqc_timer_bt_num;
u32 mpt_bt_num;
+ u32 eqc_bt_num;
+ u32 smac_bt_num;
+ u32 sgid_bt_num;
u32 sccc_bt_num;
u32 gmv_bt_num;
u32 qpc_ba_pg_sz;
@@ -851,6 +864,7 @@ struct hns_roce_caps {
u16 default_aeq_period;
u16 default_aeq_arm_st;
u16 default_ceq_arm_st;
+ enum cong_type cong_type;
};
struct hns_roce_dfx_hw {
@@ -874,9 +888,10 @@ struct hns_roce_hw {
int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
u16 token, int event);
- int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned int timeout);
- int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev);
- int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
+ int (*poll_mbox_done)(struct hns_roce_dev *hr_dev,
+ unsigned int timeout);
+ bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
+ int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index,
const union ib_gid *gid, const struct ib_gid_attr *attr);
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
@@ -897,33 +912,17 @@ struct hns_roce_hw {
int (*clear_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj,
int step_idx);
- int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state);
- int (*destroy_qp)(struct ib_qp *ibqp, struct ib_udata *udata);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp);
- int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr);
- int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
- const struct ib_recv_wr **bad_recv_wr);
- int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
- int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
struct ib_udata *udata);
int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
- int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
- int (*modify_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
- enum ib_srq_attr_mask srq_attr_mask,
- struct ib_udata *udata);
- int (*query_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
- int (*post_srq_recv)(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr);
const struct ib_device_ops *hns_roce_dev_ops;
const struct ib_device_ops *hns_roce_dev_srq_ops;
};
@@ -945,6 +944,8 @@ struct hns_roce_dev {
enum hns_roce_device_state state;
struct list_head qp_list; /* list of all qps on this dev */
spinlock_t qp_list_lock; /* protect qp_list */
+ struct list_head dip_list; /* list of all dest ips on this dev */
+ spinlock_t dip_list_lock; /* protect dip_list */
struct list_head pgdir_list;
struct mutex pgdir_mutex;
@@ -963,6 +964,7 @@ struct hns_roce_dev {
struct hns_roce_cmdq cmd;
struct hns_roce_bitmap pd_bitmap;
+ struct hns_roce_bitmap xrcd_bitmap;
struct hns_roce_uar_table uar_table;
struct hns_roce_mr_table mr_table;
struct hns_roce_cq_table cq_table;
@@ -986,6 +988,9 @@ struct hns_roce_dev {
void *priv;
struct workqueue_struct *irq_workq;
const struct hns_roce_dfx_hw *dfx;
+ u32 func_num;
+ u32 is_vf;
+ u32 cong_algo_tmpl_id;
};
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
@@ -1004,6 +1009,11 @@ static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
return container_of(ibpd, struct hns_roce_pd, ibpd);
}
+static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
+{
+ return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd);
+}
+
static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
{
return container_of(ibah, struct hns_roce_ah, ibah);
@@ -1136,6 +1146,7 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
+int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev);
@@ -1143,6 +1154,7 @@ void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev);
+void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev);
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
@@ -1207,6 +1219,9 @@ int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
struct ib_udata *udata);
int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
+int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
+
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
@@ -1246,7 +1261,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
-u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
+u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 5346fdca9473..620acf66b22c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -54,7 +54,7 @@
* GID[0][0], GID[1][0],.....GID[N - 1][0],
* And so on
*/
-u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
+u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index)
{
return gid_index * hr_dev->caps.num_ports + port;
}
@@ -345,7 +345,7 @@ out:
doorbell[0] = sq_db.u32_4;
doorbell[1] = sq_db.u32_8;
- hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
+ hns_roce_write64_k(doorbell, qp->sq.db_reg);
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -440,7 +440,7 @@ out:
doorbell[0] = rq_db.u32_4;
doorbell[1] = rq_db.u32_8;
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
+ hns_roce_write64_k(doorbell, hr_qp->rq.db_reg);
}
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
@@ -538,7 +538,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
/*
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
* using 4K page, and shift more 32 because of
- * caculating the high 32 bit value evaluated to hardware.
+ * calculating the high 32 bit value evaluated to hardware.
*/
roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
@@ -711,7 +711,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
int i, j;
u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
u8 phy_port;
- u8 port = 0;
+ u32 port = 0;
u8 sl;
/* Reserved cq for loop qp */
@@ -1189,7 +1189,7 @@ static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
/*
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
* using 4K page, and shift more 32 because of
- * caculating the high 32 bit value evaluated to hardware.
+ * calculating the high 32 bit value evaluated to hardware.
*/
roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
@@ -1382,7 +1382,6 @@ static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
ret = hns_roce_v1_rsv_lp_qp(hr_dev);
if (ret) {
dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
- flush_workqueue(free_mr->free_mr_wq);
destroy_workqueue(free_mr->free_mr_wq);
}
@@ -1394,7 +1393,6 @@ static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv = hr_dev->priv;
struct hns_roce_free_mr *free_mr = &priv->free_mr;
- flush_workqueue(free_mr->free_mr_wq);
destroy_workqueue(free_mr->free_mr_wq);
hns_roce_v1_release_lp_qp(hr_dev);
@@ -1676,7 +1674,7 @@ static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
return 0;
}
-static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
+static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u32 port,
int gid_index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
@@ -1939,7 +1937,7 @@ static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
- hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
+ hns_roce_write64_k(doorbell, hr_cq->db_reg);
}
static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
@@ -2041,7 +2039,7 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
/**
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
* using 4K page, and shift more 32 because of
- * caculating the high 32 bit value evaluated to hardware.
+ * calculating the high 32 bit value evaluated to hardware.
*/
roce_set_field(cq_context->cqc_byte_20,
CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
@@ -2092,7 +2090,7 @@ static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
hr_cq->cqn | notification_flag);
- hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
+ hns_roce_write64_k(doorbell, hr_cq->db_reg);
return 0;
}
@@ -2673,8 +2671,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int ret = -EINVAL;
u64 sq_ba = 0;
u64 rq_ba = 0;
- int port;
- u8 port_num;
+ u32 port;
+ u32 port_num;
u8 *dmac;
u8 *smac;
@@ -3217,12 +3215,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
if (ibqp->uobject) {
- hr_qp->rq.db_reg_l = hr_dev->reg_base +
+ hr_qp->rq.db_reg = hr_dev->reg_base +
hr_dev->odb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index;
}
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
+ hns_roce_write64_k(doorbell, hr_qp->rq.db_reg);
}
hr_qp->state = new_state;
@@ -3449,8 +3447,7 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
((roce_get_bit(context->qpc_bytes_4,
QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
- if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
- hr_qp->ibqp.qp_type == IB_QPT_UC) {
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
struct ib_global_route *grh =
rdma_ah_retrieve_grh(&qp_attr->ah_attr);
@@ -3604,7 +3601,7 @@ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
static void set_eq_cons_index_v1(struct hns_roce_eq *eq, u32 req_not)
{
roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
- (req_not << eq->log_entries), eq->doorbell);
+ (req_not << eq->log_entries), eq->db_reg);
}
static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
@@ -4170,7 +4167,7 @@ static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
* Configure eq extended address 45~49 bit.
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
* using 4K page, and shift more 32 because of
- * caculating the high 32 bit value evaluated to hardware.
+ * calculating the high 32 bit value evaluated to hardware.
*/
roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
@@ -4234,9 +4231,9 @@ static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
ROCEE_CAEP_CEQC_SHIFT_0_REG +
CEQ_REG_OFFSET * i;
eq->type_flag = HNS_ROCE_CEQ;
- eq->doorbell = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
- CEQ_REG_OFFSET * i;
+ eq->db_reg = hr_dev->reg_base +
+ ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
+ CEQ_REG_OFFSET * i;
eq->entries = hr_dev->caps.ceqe_depth;
eq->log_entries = ilog2(eq->entries);
eq->eqe_size = HNS_ROCE_CEQE_SIZE;
@@ -4245,8 +4242,8 @@ static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
eq_table->eqc_base[i] = hr_dev->reg_base +
ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
eq->type_flag = HNS_ROCE_AEQ;
- eq->doorbell = hr_dev->reg_base +
- ROCEE_CAEP_AEQE_CONS_IDX_REG;
+ eq->db_reg = hr_dev->reg_base +
+ ROCEE_CAEP_AEQE_CONS_IDX_REG;
eq->entries = hr_dev->caps.aeqe_depth;
eq->log_entries = ilog2(eq->entries);
eq->eqe_size = HNS_ROCE_AEQE_SIZE;
@@ -4349,7 +4346,7 @@ static const struct hns_roce_hw hns_roce_hw_v1 = {
.hw_init = hns_roce_v1_init,
.hw_exit = hns_roce_v1_exit,
.post_mbox = hns_roce_v1_post_mbox,
- .chk_mbox = hns_roce_v1_chk_mbox,
+ .poll_mbox_done = hns_roce_v1_chk_mbox,
.set_gid = hns_roce_v1_set_gid,
.set_mac = hns_roce_v1_set_mac,
.set_mtu = hns_roce_v1_set_mtu,
@@ -4357,12 +4354,6 @@ static const struct hns_roce_hw hns_roce_hw_v1 = {
.write_cqc = hns_roce_v1_write_cqc,
.clear_hem = hns_roce_v1_clear_hem,
.modify_qp = hns_roce_v1_modify_qp,
- .query_qp = hns_roce_v1_query_qp,
- .destroy_qp = hns_roce_v1_destroy_qp,
- .post_send = hns_roce_v1_post_send,
- .post_recv = hns_roce_v1_post_recv,
- .req_notify_cq = hns_roce_v1_req_notify_cq,
- .poll_cq = hns_roce_v1_poll_cq,
.dereg_mr = hns_roce_v1_dereg_mr,
.destroy_cq = hns_roce_v1_destroy_cq,
.init_eq = hns_roce_v1_init_eq_table,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index ce26f97b2ca2..7652dafe32ec 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -48,6 +48,12 @@
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
+enum {
+ CMD_RST_PRC_OTHERS,
+ CMD_RST_PRC_SUCCESS,
+ CMD_RST_PRC_EBUSY,
+};
+
static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
struct ib_sge *sg)
{
@@ -632,24 +638,60 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
* around the mailbox calls. Hence, use the deferred flush for
* now.
*/
- if (qp->state == IB_QPS_ERR) {
+ if (unlikely(qp->state == IB_QPS_ERR)) {
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
init_flush_work(hr_dev, qp);
} else {
struct hns_roce_v2_db sq_db = {};
- roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
- V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
- roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
- V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
+ roce_set_field(sq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
+ qp->doorbell_qpn);
+ roce_set_field(sq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+ HNS_ROCE_V2_SQ_DB);
+
/* indicates data on new BAR, 0 : SQ doorbell, 1 : DWQE */
roce_set_bit(sq_db.byte_4, V2_DB_FLAG_S, 0);
- roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
- V2_DB_PARAMETER_IDX_S, qp->sq.head);
- roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
- V2_DB_PARAMETER_SL_S, qp->sl);
+ roce_set_field(sq_db.parameter, V2_DB_PRODUCER_IDX_M,
+ V2_DB_PRODUCER_IDX_S, qp->sq.head);
+ roce_set_field(sq_db.parameter, V2_DB_SL_M, V2_DB_SL_S,
+ qp->sl);
+
+ hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
+ }
+}
+
+static inline void update_rq_db(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *qp)
+{
+ /*
+ * Hip08 hardware cannot flush the WQEs in RQ if the QP state
+ * gets into errored mode. Hence, as a workaround to this
+ * hardware limitation, driver needs to assist in flushing. But
+ * the flushing operation uses mailbox to convey the QP state to
+ * the hardware and which can sleep due to the mutex protection
+ * around the mailbox calls. Hence, use the deferred flush for
+ * now.
+ */
+ if (unlikely(qp->state == IB_QPS_ERR)) {
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+ init_flush_work(hr_dev, qp);
+ } else {
+ if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
+ *qp->rdb.db_record =
+ qp->rq.head & V2_DB_PRODUCER_IDX_M;
+ } else {
+ struct hns_roce_v2_db rq_db = {};
- hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
+ roce_set_field(rq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
+ qp->qpn);
+ roce_set_field(rq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+ HNS_ROCE_V2_RQ_DB);
+ roce_set_field(rq_db.parameter, V2_DB_PRODUCER_IDX_M,
+ V2_DB_PRODUCER_IDX_S, qp->rq.head);
+
+ hns_roce_write64(hr_dev, (__le32 *)&rq_db,
+ qp->rq.db_reg);
+ }
}
}
@@ -681,8 +723,7 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M,
V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
- hns_roce_write512(hr_dev, wqe, hr_dev->mem_base +
- HNS_ROCE_DWQE_SIZE * qp->ibqp.qp_num);
+ hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
}
static int hns_roce_v2_post_send(struct ib_qp *ibqp,
@@ -879,22 +920,7 @@ out:
if (likely(nreq)) {
hr_qp->rq.head += nreq;
- /*
- * Hip08 hardware cannot flush the WQEs in RQ if the QP state
- * gets into errored mode. Hence, as a workaround to this
- * hardware limitation, driver needs to assist in flushing. But
- * the flushing operation uses mailbox to convey the QP state to
- * the hardware and which can sleep due to the mutex protection
- * around the mailbox calls. Hence, use the deferred flush for
- * now.
- */
- if (hr_qp->state == IB_QPS_ERR) {
- if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG,
- &hr_qp->flush_flag))
- init_flush_work(hr_dev, hr_qp);
- } else {
- *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
- }
+ update_rq_db(hr_dev, hr_qp);
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
@@ -1016,13 +1042,14 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
}
if (likely(nreq)) {
- srq_db.byte_4 =
- cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
- (srq->srqn & V2_DB_BYTE_4_TAG_M));
- srq_db.parameter =
- cpu_to_le32(srq->idx_que.head & V2_DB_PARAMETER_IDX_M);
+ roce_set_field(srq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
+ srq->srqn);
+ roce_set_field(srq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+ HNS_ROCE_V2_SRQ_DB);
+ roce_set_field(srq_db.parameter, V2_DB_PRODUCER_IDX_M,
+ V2_DB_PRODUCER_IDX_S, srq->idx_que.head);
- hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
+ hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg);
}
spin_unlock_irqrestore(&srq->lock, flags);
@@ -1030,7 +1057,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
return ret;
}
-static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
+static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
{
@@ -1053,7 +1080,7 @@ static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
return CMD_RST_PRC_SUCCESS;
}
-static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
+static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
{
@@ -1081,7 +1108,7 @@ static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
return CMD_RST_PRC_SUCCESS;
}
-static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
+static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
@@ -1098,10 +1125,9 @@ static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
return CMD_RST_PRC_EBUSY;
}
-static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
+static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long instance_stage; /* the current instance stage */
unsigned long reset_stage; /* the current reset stage */
@@ -1109,9 +1135,6 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
bool sw_resetting;
bool hw_resetting;
- if (hr_dev->is_reset)
- return CMD_RST_PRC_SUCCESS;
-
/* Get information about reset from NIC driver or RoCE driver itself,
* the meaning of the following variables from NIC driver are described
* as below:
@@ -1122,19 +1145,53 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
instance_stage = handle->rinfo.instance_state;
reset_stage = handle->rinfo.reset_state;
reset_cnt = ops->ae_dev_reset_cnt(handle);
- hw_resetting = ops->get_cmdq_stat(handle);
- sw_resetting = ops->ae_dev_resetting(handle);
-
if (reset_cnt != hr_dev->reset_cnt)
return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
reset_stage);
- else if (hw_resetting)
+
+ hw_resetting = ops->get_cmdq_stat(handle);
+ if (hw_resetting)
return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
reset_stage);
- else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
+
+ sw_resetting = ops->ae_dev_resetting(handle);
+ if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
return hns_roce_v2_cmd_sw_resetting(hr_dev);
- return 0;
+ return CMD_RST_PRC_OTHERS;
+}
+
+static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle))
+ return true;
+
+ if (ops->get_hw_reset_stat(handle))
+ return true;
+
+ if (ops->ae_dev_resetting(handle))
+ return true;
+
+ return false;
+}
+
+static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ u32 status;
+
+ if (hr_dev->is_reset)
+ status = CMD_RST_PRC_SUCCESS;
+ else
+ status = check_aedev_reset_status(hr_dev, priv->handle);
+
+ *busy = (status == CMD_RST_PRC_EBUSY);
+
+ return status == CMD_RST_PRC_OTHERS;
}
static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
@@ -1152,6 +1209,9 @@ static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
ring->desc_dma_addr = 0;
kfree(ring->desc);
ring->desc = NULL;
+
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to map cmq desc addr.\n");
return -ENOMEM;
}
@@ -1228,14 +1288,16 @@ static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
/* Init CSQ */
ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
if (ret) {
- dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to init CSQ, ret = %d.\n", ret);
return ret;
}
/* Init CRQ */
ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
if (ret) {
- dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to init CRQ, ret = %d.\n", ret);
goto err_crq;
}
@@ -1352,27 +1414,36 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
- int retval;
+ bool busy;
int ret;
- ret = hns_roce_v2_rst_process_cmd(hr_dev);
- if (ret == CMD_RST_PRC_SUCCESS)
- return 0;
- if (ret == CMD_RST_PRC_EBUSY)
- return -EBUSY;
+ if (!v2_chk_mbox_is_avail(hr_dev, &busy))
+ return busy ? -EBUSY : 0;
ret = __hns_roce_cmq_send(hr_dev, desc, num);
if (ret) {
- retval = hns_roce_v2_rst_process_cmd(hr_dev);
- if (retval == CMD_RST_PRC_SUCCESS)
- return 0;
- else if (retval == CMD_RST_PRC_EBUSY)
- return -EBUSY;
+ if (!v2_chk_mbox_is_avail(hr_dev, &busy))
+ return busy ? -EBUSY : 0;
}
return ret;
}
+static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
+ dma_addr_t base_addr, u16 op)
+{
+ struct hns_roce_cmd_mailbox *mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ int ret;
+
+ if (IS_ERR(mbox))
+ return PTR_ERR(mbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, obj, 0, op,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ hns_roce_free_cmd_mailbox(hr_dev, mbox);
+ return ret;
+}
+
static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
{
struct hns_roce_query_version *resp;
@@ -1391,92 +1462,90 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
return 0;
}
-static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
+static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
- unsigned long reset_cnt;
- bool sw_resetting;
- bool hw_resetting;
+ unsigned long end;
- reset_cnt = ops->ae_dev_reset_cnt(handle);
- hw_resetting = ops->get_hw_reset_stat(handle);
- sw_resetting = ops->ae_dev_resetting(handle);
+ hr_dev->dis_db = true;
- if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting)
- return true;
+ dev_warn(hr_dev->dev,
+ "Func clear is pending, device in resetting state.\n");
+ end = HNS_ROCE_V2_HW_RST_TIMEOUT;
+ while (end) {
+ if (!ops->get_hw_reset_stat(handle)) {
+ hr_dev->is_reset = true;
+ dev_info(hr_dev->dev,
+ "Func clear success after reset.\n");
+ return;
+ }
+ msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
+ end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
+ }
- return false;
+ dev_warn(hr_dev->dev, "Func clear failed.\n");
}
-static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
- int flag)
+static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
- unsigned long instance_stage;
- unsigned long reset_cnt;
unsigned long end;
- bool sw_resetting;
- bool hw_resetting;
- instance_stage = handle->rinfo.instance_state;
- reset_cnt = ops->ae_dev_reset_cnt(handle);
- hw_resetting = ops->get_hw_reset_stat(handle);
- sw_resetting = ops->ae_dev_resetting(handle);
+ hr_dev->dis_db = true;
+
+ dev_warn(hr_dev->dev,
+ "Func clear is pending, device in resetting state.\n");
+ end = HNS_ROCE_V2_HW_RST_TIMEOUT;
+ while (end) {
+ if (ops->ae_dev_reset_cnt(handle) !=
+ hr_dev->reset_cnt) {
+ hr_dev->is_reset = true;
+ dev_info(hr_dev->dev,
+ "Func clear success after sw reset\n");
+ return;
+ }
+ msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
+ end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
+ }
+
+ dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
+}
+
+static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
+ int flag)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
- if (reset_cnt != hr_dev->reset_cnt) {
+ if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
hr_dev->dis_db = true;
hr_dev->is_reset = true;
dev_info(hr_dev->dev, "Func clear success after reset.\n");
- } else if (hw_resetting) {
- hr_dev->dis_db = true;
+ return;
+ }
- dev_warn(hr_dev->dev,
- "Func clear is pending, device in resetting state.\n");
- end = HNS_ROCE_V2_HW_RST_TIMEOUT;
- while (end) {
- if (!ops->get_hw_reset_stat(handle)) {
- hr_dev->is_reset = true;
- dev_info(hr_dev->dev,
- "Func clear success after reset.\n");
- return;
- }
- msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
- end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
- }
+ if (ops->get_hw_reset_stat(handle)) {
+ func_clr_hw_resetting_state(hr_dev, handle);
+ return;
+ }
- dev_warn(hr_dev->dev, "Func clear failed.\n");
- } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) {
- hr_dev->dis_db = true;
+ if (ops->ae_dev_resetting(handle) &&
+ handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) {
+ func_clr_sw_resetting_state(hr_dev, handle);
+ return;
+ }
+ if (retval && !flag)
dev_warn(hr_dev->dev,
- "Func clear is pending, device in resetting state.\n");
- end = HNS_ROCE_V2_HW_RST_TIMEOUT;
- while (end) {
- if (ops->ae_dev_reset_cnt(handle) !=
- hr_dev->reset_cnt) {
- hr_dev->is_reset = true;
- dev_info(hr_dev->dev,
- "Func clear success after sw reset\n");
- return;
- }
- msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
- end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
- }
-
- dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
- } else {
- if (retval && !flag)
- dev_warn(hr_dev->dev,
- "Func clear read failed, ret = %d.\n", retval);
+ "Func clear read failed, ret = %d.\n", retval);
- dev_warn(hr_dev->dev, "Func clear failed.\n");
- }
+ dev_warn(hr_dev->dev, "Func clear failed.\n");
}
-static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
+
+static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
{
bool fclr_write_fail_flag = false;
struct hns_roce_func_clear *resp;
@@ -1484,11 +1553,12 @@ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
unsigned long end;
int ret = 0;
- if (hns_roce_func_clr_chk_rst(hr_dev))
+ if (check_device_is_in_reset(hr_dev))
goto out;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
resp = (struct hns_roce_func_clear *)desc.data;
+ resp->rst_funcid_en = cpu_to_le32(vf_id);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
@@ -1501,7 +1571,7 @@ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
while (end) {
- if (hns_roce_func_clr_chk_rst(hr_dev))
+ if (check_device_is_in_reset(hr_dev))
goto out;
msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
@@ -1509,18 +1579,45 @@ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
true);
+ resp->rst_funcid_en = cpu_to_le32(vf_id);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
continue;
if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
- hr_dev->is_reset = true;
+ if (vf_id == 0)
+ hr_dev->is_reset = true;
return;
}
}
out:
- hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
+ hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag);
+}
+
+static void hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
+{
+ enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_cmq_req *req_a;
+
+ req_a = (struct hns_roce_cmq_req *)desc[0].data;
+ hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
+ hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id);
+ hns_roce_cmq_send(hr_dev, desc, 2);
+}
+
+static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
+{
+ int i;
+
+ for (i = hr_dev->func_num - 1; i >= 0; i--) {
+ __hns_roce_function_clear(hr_dev, i);
+ if (i != 0)
+ hns_roce_free_vf_resource(hr_dev, i);
+ }
}
static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
@@ -1540,79 +1637,107 @@ static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
return 0;
}
+static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc;
+ int ret;
+
+ if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) {
+ hr_dev->func_num = 1;
+ return 0;
+ }
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO,
+ true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ hr_dev->func_num = 1;
+ return ret;
+ }
+
+ hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num);
+ hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id);
+
+ return 0;
+}
+
static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_cfg_global_param *req;
struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
false);
- req = (struct hns_roce_cfg_global_param *)desc.data;
- memset(req, 0, sizeof(*req));
- roce_set_field(req->time_cfg_udp_port,
- CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
- CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
- roce_set_field(req->time_cfg_udp_port,
- CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
- CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S,
- ROCE_V2_UDP_DPORT);
+ hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, 0x3e8);
+ hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
-static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
+static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
{
struct hns_roce_cmq_desc desc[2];
- struct hns_roce_pf_res_a *req_a;
- struct hns_roce_pf_res_b *req_b;
+ struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
+ struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ enum hns_roce_opcode_type opcode;
+ u32 func_num;
int ret;
- hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_QUERY_PF_RES,
- true);
- desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ if (is_vf) {
+ opcode = HNS_ROCE_OPC_QUERY_VF_RES;
+ func_num = 1;
+ } else {
+ opcode = HNS_ROCE_OPC_QUERY_PF_RES;
+ func_num = hr_dev->func_num;
+ }
- hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_QUERY_PF_RES,
- true);
+ hns_roce_cmq_setup_basic_desc(&desc[0], opcode, true);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ hns_roce_cmq_setup_basic_desc(&desc[1], opcode, true);
ret = hns_roce_cmq_send(hr_dev, desc, 2);
if (ret)
return ret;
- req_a = (struct hns_roce_pf_res_a *)desc[0].data;
- req_b = (struct hns_roce_pf_res_b *)desc[1].data;
-
- hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
- PF_RES_DATA_1_PF_QPC_BT_NUM_M,
- PF_RES_DATA_1_PF_QPC_BT_NUM_S);
- hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
- PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
- PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
- hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
- PF_RES_DATA_3_PF_CQC_BT_NUM_M,
- PF_RES_DATA_3_PF_CQC_BT_NUM_S);
- hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
- PF_RES_DATA_4_PF_MPT_BT_NUM_M,
- PF_RES_DATA_4_PF_MPT_BT_NUM_S);
-
- hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
- PF_RES_DATA_3_PF_SL_NUM_M,
- PF_RES_DATA_3_PF_SL_NUM_S);
- hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
- PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
- PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
-
- hr_dev->caps.gmv_bt_num = roce_get_field(req_b->gmv_idx_num,
- PF_RES_DATA_5_PF_GMV_BT_NUM_M,
- PF_RES_DATA_5_PF_GMV_BT_NUM_S);
+ caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num;
+ caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num;
+ caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num;
+ caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num;
+ caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num;
+ caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num;
+ caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num;
+ caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num;
+
+ if (is_vf) {
+ caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num;
+ caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) /
+ func_num;
+ } else {
+ caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num;
+ caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) /
+ func_num;
+ }
return 0;
}
+static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
+{
+ return load_func_res_caps(hr_dev, false);
+}
+
+static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
+{
+ return load_func_res_caps(hr_dev, true);
+}
+
static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_pf_timer_res_a *req_a;
struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ struct hns_roce_caps *caps = &hr_dev->caps;
int ret;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
@@ -1622,24 +1747,17 @@ static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
if (ret)
return ret;
- req_a = (struct hns_roce_pf_timer_res_a *)desc.data;
-
- hr_dev->caps.qpc_timer_bt_num =
- roce_get_field(req_a->qpc_timer_bt_idx_num,
- PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
- PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
- hr_dev->caps.cqc_timer_bt_num =
- roce_get_field(req_a->cqc_timer_bt_idx_num,
- PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
- PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
+ caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM);
+ caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM);
return 0;
}
-static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, int vf_id)
+static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
+ u32 vf_id)
{
- struct hns_roce_cmq_desc desc;
struct hns_roce_vf_switch *swt;
+ struct hns_roce_cmq_desc desc;
int ret;
swt = (struct hns_roce_vf_switch *)desc.data;
@@ -1661,153 +1779,127 @@ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, int vf_id)
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
-static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
+static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_cmq_desc desc[2];
- struct hns_roce_vf_res_a *req_a;
- struct hns_roce_vf_res_b *req_b;
+ u32 vf_id;
+ int ret;
+
+ for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
+ ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
- req_a = (struct hns_roce_vf_res_a *)desc[0].data;
- req_b = (struct hns_roce_vf_res_b *)desc[1].data;
+static int __hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
+{
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
+ struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
+ enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
+ struct hns_roce_caps *caps = &hr_dev->caps;
- hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_ALLOC_VF_RES,
- false);
+ hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
- hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_ALLOC_VF_RES,
- false);
+ hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id);
+
+ hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num);
+ hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num);
+ hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num);
+ hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num);
+ hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num);
- roce_set_field(req_a->vf_qpc_bt_idx_num,
- VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
- VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
- roce_set_field(req_a->vf_qpc_bt_idx_num,
- VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
- VF_RES_A_DATA_1_VF_QPC_BT_NUM_S, HNS_ROCE_VF_QPC_BT_NUM);
-
- roce_set_field(req_a->vf_srqc_bt_idx_num,
- VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
- VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
- roce_set_field(req_a->vf_srqc_bt_idx_num,
- VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
- VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
- HNS_ROCE_VF_SRQC_BT_NUM);
-
- roce_set_field(req_a->vf_cqc_bt_idx_num,
- VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
- VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
- roce_set_field(req_a->vf_cqc_bt_idx_num,
- VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
- VF_RES_A_DATA_3_VF_CQC_BT_NUM_S, HNS_ROCE_VF_CQC_BT_NUM);
-
- roce_set_field(req_a->vf_mpt_bt_idx_num,
- VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
- VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
- roce_set_field(req_a->vf_mpt_bt_idx_num,
- VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
- VF_RES_A_DATA_4_VF_MPT_BT_NUM_S, HNS_ROCE_VF_MPT_BT_NUM);
-
- roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_IDX_M,
- VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
- roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_NUM_M,
- VF_RES_A_DATA_5_VF_EQC_NUM_S, HNS_ROCE_VF_EQC_NUM);
-
- roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_IDX_M,
- VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
- roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_NUM_M,
- VF_RES_B_DATA_1_VF_SMAC_NUM_S, HNS_ROCE_VF_SMAC_NUM);
-
- roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_IDX_M,
- VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
- roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_NUM_M,
- VF_RES_B_DATA_2_VF_SGID_NUM_S, HNS_ROCE_VF_SGID_NUM);
-
- roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_QID_IDX_M,
- VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
- roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_SL_NUM_M,
- VF_RES_B_DATA_3_VF_SL_NUM_S, HNS_ROCE_VF_SL_NUM);
-
- roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
- VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
- roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
- VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
- HNS_ROCE_VF_SCCC_BT_NUM);
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX,
+ vf_id * caps->gmv_bt_num);
+ } else {
+ hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_SGID_IDX,
+ vf_id * caps->sgid_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num);
+ hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX,
+ vf_id * caps->smac_bt_num);
+ }
return hns_roce_cmq_send(hr_dev, desc, 2);
}
+static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
+{
+ int vf_id;
+ int ret;
+
+ for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
+ ret = __hns_roce_alloc_vf_resource(hr_dev, vf_id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
{
- u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
- u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
- u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
- u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
- u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
- struct hns_roce_cfg_bt_attr *req;
struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ struct hns_roce_caps *caps = &hr_dev->caps;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
- req = (struct hns_roce_cfg_bt_attr *)desc.data;
- memset(req, 0, sizeof(*req));
-
- roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
- CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
- hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
- CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
- hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
- CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
- qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
-
- roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
- CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
- hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
- CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
- hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
- CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
- srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
-
- roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
- CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
- hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
- CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
- hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
- CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
- cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
-
- roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
- CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
- hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
- CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
- hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
- CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
- mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
-
- roce_set_field(req->vf_sccc_cfg,
- CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
- CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
- hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_sccc_cfg,
- CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
- CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
- hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(req->vf_sccc_cfg,
- CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
- CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
- sccc_hop_num ==
- HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
+
+ hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ,
+ caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ,
+ caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM,
+ to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps));
+
+ hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ,
+ caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ,
+ caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM,
+ to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs));
+
+ hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ,
+ caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ,
+ caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM,
+ to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs));
+
+ hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ,
+ caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ,
+ caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM,
+ to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts));
+
+ hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ,
+ caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ,
+ caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET);
+ hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM,
+ to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps));
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
static void set_default_caps(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_caps *caps = &hr_dev->caps;
caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
@@ -1819,24 +1911,24 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
- caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
- caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
+ caps->num_comp_vectors =
+ min_t(u32, caps->eqc_bt_num - 1,
+ (u32)priv->handle->rinfo.num_vectors - 2);
caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
- caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
+ caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
- caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
@@ -1844,56 +1936,39 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
- caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
caps->reserved_lkey = 0;
caps->reserved_pds = 0;
+ caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
caps->reserved_mrws = 1;
caps->reserved_uars = 0;
caps->reserved_cqs = 0;
caps->reserved_srqs = 0;
caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
- caps->qpc_ba_pg_sz = 0;
- caps->qpc_buf_pg_sz = 0;
caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
- caps->srqc_ba_pg_sz = 0;
- caps->srqc_buf_pg_sz = 0;
caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
- caps->cqc_ba_pg_sz = 0;
- caps->cqc_buf_pg_sz = 0;
caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
- caps->mpt_ba_pg_sz = 0;
- caps->mpt_buf_pg_sz = 0;
caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
- caps->mtt_ba_pg_sz = 0;
- caps->mtt_buf_pg_sz = 0;
caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
+ caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM;
caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM;
caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM;
- caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
- caps->cqe_buf_pg_sz = 0;
caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
- caps->srqwqe_ba_pg_sz = 0;
- caps->srqwqe_buf_pg_sz = 0;
caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
- caps->idx_ba_pg_sz = 0;
- caps->idx_buf_pg_sz = 0;
caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
- caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
+ caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
+ caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
- HNS_ROCE_CAP_FLAG_RECORD_DB |
- HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
+ HNS_ROCE_CAP_FLAG_CQ_RECORD_DB |
+ HNS_ROCE_CAP_FLAG_QP_RECORD_DB;
caps->pkey_table_len[0] = 1;
- caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
- caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
- caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
caps->local_ca_ack_delay = 0;
caps->max_mtu = IB_MTU_4096;
@@ -1902,22 +1977,15 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
- HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
+ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL | HNS_ROCE_CAP_FLAG_XRC;
caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
- caps->qpc_timer_ba_pg_sz = 0;
- caps->qpc_timer_buf_pg_sz = 0;
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
- caps->cqc_timer_ba_pg_sz = 0;
- caps->cqc_timer_buf_pg_sz = 0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
- caps->sccc_ba_pg_sz = 0;
- caps->sccc_buf_pg_sz = 0;
caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
@@ -1930,10 +1998,17 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
caps->gmv_entry_sz);
caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->gmv_ba_pg_sz = 0;
- caps->gmv_buf_pg_sz = 0;
caps->gid_table_len[0] = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
caps->gmv_entry_sz);
+ caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INL_EXT;
+ } else {
+ caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
+ caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
+ caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
+ caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
+ caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
+ caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
+ caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
}
}
@@ -1979,6 +2054,70 @@ static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
*buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
}
+static void set_hem_page_size(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_caps *caps = &hr_dev->caps;
+
+ /* EQ */
+ caps->eqe_ba_pg_sz = 0;
+ caps->eqe_buf_pg_sz = 0;
+
+ /* Link Table */
+ caps->tsq_buf_pg_sz = 0;
+
+ /* MR */
+ caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
+ caps->pbl_buf_pg_sz = 0;
+ calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
+ caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
+ HEM_TYPE_MTPT);
+
+ /* QP */
+ caps->qpc_timer_ba_pg_sz = 0;
+ caps->qpc_timer_buf_pg_sz = 0;
+ caps->mtt_ba_pg_sz = 0;
+ caps->mtt_buf_pg_sz = 0;
+ calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
+ caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
+ HEM_TYPE_QPC);
+
+ if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
+ calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num,
+ caps->sccc_bt_num, &caps->sccc_buf_pg_sz,
+ &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC);
+
+ /* CQ */
+ calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
+ caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
+ HEM_TYPE_CQC);
+ calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num,
+ 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
+
+ if (caps->cqc_timer_entry_sz)
+ calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
+ caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
+ &caps->cqc_timer_buf_pg_sz,
+ &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
+
+ /* SRQ */
+ if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
+ calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz,
+ caps->srqc_hop_num, caps->srqc_bt_num,
+ &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz,
+ HEM_TYPE_SRQC);
+ calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
+ caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
+ &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
+ calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz,
+ caps->idx_hop_num, 1, &caps->idx_buf_pg_sz,
+ &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
+ }
+
+ /* GMV */
+ caps->gmv_ba_pg_sz = 0;
+ caps->gmv_buf_pg_sz = 0;
+}
+
static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
@@ -2062,6 +2201,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
V2_QUERY_PF_CAPS_C_MAX_GID_M,
V2_QUERY_PF_CAPS_C_MAX_GID_S);
+
+ caps->gid_table_len[0] /= hr_dev->func_num;
+
caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
@@ -2079,13 +2221,18 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
+ caps->cong_type = roce_get_field(resp_d->wq_hop_num_max_srqs,
+ V2_QUERY_PF_CAPS_D_CONG_TYPE_M,
+ V2_QUERY_PF_CAPS_D_CONG_TYPE_S);
caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
+
caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
+
caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
@@ -2133,8 +2280,8 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
- caps->mtt_ba_pg_sz = 0;
- caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
+ caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
+ caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
@@ -2166,99 +2313,82 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
caps->gmv_entry_sz);
caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->gmv_ba_pg_sz = 0;
- caps->gmv_buf_pg_sz = 0;
caps->gid_table_len[0] = caps->gmv_bt_num *
(HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
}
- calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
- caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
- HEM_TYPE_QPC);
- calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
- caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
- HEM_TYPE_MTPT);
- calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
- caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
- HEM_TYPE_CQC);
- calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, caps->srqc_hop_num,
- caps->srqc_bt_num, &caps->srqc_buf_pg_sz,
- &caps->srqc_ba_pg_sz, HEM_TYPE_SRQC);
-
- caps->sccc_hop_num = ctx_hop_num;
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
- calc_pg_sz(caps->num_qps, caps->sccc_sz,
- caps->sccc_hop_num, caps->sccc_bt_num,
- &caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz,
- HEM_TYPE_SCCC);
- calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
- caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
- &caps->cqc_timer_buf_pg_sz,
- &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
-
- calc_pg_sz(caps->num_cqe_segs, caps->mtt_entry_sz, caps->cqe_hop_num,
- 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
- calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
- caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
- &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
- calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num,
- 1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
-
return 0;
}
-static int hns_roce_config_qpc_size(struct hns_roce_dev *hr_dev)
+static int config_hem_entry_size(struct hns_roce_dev *hr_dev, u32 type, u32 val)
{
struct hns_roce_cmq_desc desc;
- struct hns_roce_cfg_entry_size *cfg_size =
- (struct hns_roce_cfg_entry_size *)desc.data;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
false);
- cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_QPC_SIZE);
- cfg_size->size = cpu_to_le32(hr_dev->caps.qpc_sz);
-
- return hns_roce_cmq_send(hr_dev, &desc, 1);
-}
-
-static int hns_roce_config_sccc_size(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_cmq_desc desc;
- struct hns_roce_cfg_entry_size *cfg_size =
- (struct hns_roce_cfg_entry_size *)desc.data;
-
- hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
- false);
-
- cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_SCCC_SIZE);
- cfg_size->size = cpu_to_le32(hr_dev->caps.sccc_sz);
+ hr_reg_write(req, CFG_HEM_ENTRY_SIZE_TYPE, type);
+ hr_reg_write(req, CFG_HEM_ENTRY_SIZE_VALUE, val);
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_caps *caps = &hr_dev->caps;
int ret;
if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
return 0;
- ret = hns_roce_config_qpc_size(hr_dev);
+ ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
+ caps->qpc_sz);
if (ret) {
dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
return ret;
}
- ret = hns_roce_config_sccc_size(hr_dev);
+ ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_SCCC_SIZE,
+ caps->sccc_sz);
if (ret)
dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
return ret;
}
+static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+
+ hr_dev->vendor_part_id = hr_dev->pci_dev->device;
+ hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
+ hr_dev->func_num = 1;
+
+ ret = hns_roce_query_vf_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "Query the VF resource fail, ret = %d.\n", ret);
+ return ret;
+ }
+
+ set_default_caps(hr_dev);
+ set_hem_page_size(hr_dev);
+
+ ret = hns_roce_v2_set_bt(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "Configure the VF bt attribute fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
{
struct hns_roce_caps *caps = &hr_dev->caps;
@@ -2278,6 +2408,16 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret;
}
+ if (hr_dev->is_vf)
+ return hns_roce_v2_vf_profile(hr_dev);
+
+ ret = hns_roce_query_func_info(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Query function info fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
ret = hns_roce_config_global_param(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
@@ -2300,7 +2440,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret;
}
- ret = hns_roce_set_vf_switch_param(hr_dev, 0);
+ ret = hns_roce_set_vf_switch_param(hr_dev);
if (ret) {
dev_err(hr_dev->dev,
"failed to set function switch param, ret = %d.\n",
@@ -2311,13 +2451,8 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
hr_dev->vendor_part_id = hr_dev->pci_dev->device;
hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
- caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
- caps->pbl_buf_pg_sz = 0;
caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
- caps->eqe_ba_pg_sz = 0;
- caps->eqe_buf_pg_sz = 0;
caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
- caps->tsq_buf_pg_sz = 0;
ret = hns_roce_query_pf_caps(hr_dev);
if (ret)
@@ -2330,6 +2465,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret;
}
+ set_hem_page_size(hr_dev);
ret = hns_roce_v2_set_bt(hr_dev);
if (ret) {
dev_err(hr_dev->dev,
@@ -2507,6 +2643,22 @@ static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
link_tbl->table.map);
}
+static void free_dip_list(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_dip *hr_dip;
+ struct hns_roce_dip *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
+
+ list_for_each_entry_safe(hr_dip, tmp, &hr_dev->dip_list, node) {
+ list_del(&hr_dip->node);
+ kfree(hr_dip);
+ }
+
+ spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
+}
+
static int get_hem_table(struct hns_roce_dev *hr_dev)
{
unsigned int qpc_count;
@@ -2515,6 +2667,17 @@ static int get_hem_table(struct hns_roce_dev *hr_dev)
int ret;
int i;
+ /* Alloc memory for source address table buffer space chunk */
+ for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
+ gmv_count++) {
+ ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
+ if (ret)
+ goto err_gmv_failed;
+ }
+
+ if (hr_dev->is_vf)
+ return 0;
+
/* Alloc memory for QPC Timer buffer space chunk */
for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
qpc_count++) {
@@ -2537,23 +2700,8 @@ static int get_hem_table(struct hns_roce_dev *hr_dev)
}
}
- /* Alloc memory for GMV(GID/MAC/VLAN) table buffer space chunk */
- for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
- gmv_count++) {
- ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
- if (ret) {
- dev_err(hr_dev->dev,
- "failed to get gmv table, ret = %d.\n", ret);
- goto err_gmv_failed;
- }
- }
-
return 0;
-err_gmv_failed:
- for (i = 0; i < gmv_count; i++)
- hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
-
err_cqc_timer_failed:
for (i = 0; i < cqc_count; i++)
hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
@@ -2562,19 +2710,47 @@ err_qpc_timer_failed:
for (i = 0; i < qpc_count; i++)
hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
+err_gmv_failed:
+ for (i = 0; i < gmv_count; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
+
return ret;
}
+static void put_hem_table(struct hns_roce_dev *hr_dev)
+{
+ int i;
+
+ for (i = 0; i < hr_dev->caps.gmv_entry_num; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
+
+ if (hr_dev->is_vf)
+ return;
+
+ for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
+
+ for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
+}
+
static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
int ret;
+ ret = get_hem_table(hr_dev);
+ if (ret)
+ return ret;
+
+ if (hr_dev->is_vf)
+ return 0;
+
/* TSQ includes SQ doorbell and ack doorbell */
ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
if (ret) {
dev_err(hr_dev->dev, "failed to init TSQ, ret = %d.\n", ret);
- return ret;
+ goto err_tsq_init_failed;
}
ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
@@ -2583,17 +2759,13 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
goto err_tpq_init_failed;
}
- ret = get_hem_table(hr_dev);
- if (ret)
- goto err_get_hem_table_failed;
-
return 0;
-err_get_hem_table_failed:
- hns_roce_free_link_table(hr_dev, &priv->tpq);
+err_tsq_init_failed:
+ put_hem_table(hr_dev);
err_tpq_init_failed:
- hns_roce_free_link_table(hr_dev, &priv->tsq);
+ hns_roce_free_link_table(hr_dev, &priv->tpq);
return ret;
}
@@ -2604,38 +2776,13 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
hns_roce_function_clear(hr_dev);
- hns_roce_free_link_table(hr_dev, &priv->tpq);
- hns_roce_free_link_table(hr_dev, &priv->tsq);
-}
-
-static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_cmq_desc desc;
- struct hns_roce_mbox_status *mb_st =
- (struct hns_roce_mbox_status *)desc.data;
- int status;
-
- hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
-
- status = hns_roce_cmq_send(hr_dev, &desc, 1);
- if (status)
- return status;
-
- return le32_to_cpu(mb_st->mb_status_hw_run);
-}
-
-static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
-{
- u32 status = hns_roce_query_mbox_status(hr_dev);
-
- return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
-}
-
-static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
-{
- u32 status = hns_roce_query_mbox_status(hr_dev);
+ if (!hr_dev->is_vf) {
+ hns_roce_free_link_table(hr_dev, &priv->tpq);
+ hns_roce_free_link_table(hr_dev, &priv->tsq);
+ }
- return status & HNS_ROCE_HW_MB_STATUS_MASK;
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
+ free_dip_list(hr_dev);
}
static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
@@ -2657,58 +2804,97 @@ static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
-static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
- u64 out_param, u32 in_modifier, u8 op_modifier,
- u16 op, u16 token, int event)
+static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
+ u8 *complete_status)
{
- struct device *dev = hr_dev->dev;
+ struct hns_roce_mbox_status *mb_st;
+ struct hns_roce_cmq_desc desc;
unsigned long end;
- int ret;
+ int ret = -EBUSY;
+ u32 status;
+ bool busy;
+
+ mb_st = (struct hns_roce_mbox_status *)desc.data;
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (v2_chk_mbox_is_avail(hr_dev, &busy)) {
+ status = 0;
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST,
+ true);
+ ret = __hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (!ret) {
+ status = le32_to_cpu(mb_st->mb_status_hw_run);
+ /* No pending message exists in ROCEE mbox. */
+ if (!(status & MB_ST_HW_RUN_M))
+ break;
+ } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
+ break;
+ }
- end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
- while (hns_roce_v2_cmd_pending(hr_dev)) {
if (time_after(jiffies, end)) {
- dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
- (int)end);
- return -EAGAIN;
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to wait mbox status 0x%x\n",
+ status);
+ return -ETIMEDOUT;
}
+
cond_resched();
+ ret = -EBUSY;
}
- ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
- op_modifier, op, token, event);
- if (ret)
- dev_err(dev, "Post mailbox fail(%d)\n", ret);
+ if (!ret) {
+ *complete_status = (u8)(status & MB_ST_COMPLETE_M);
+ } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
+ /* Ignore all errors if the mbox is unavailable. */
+ ret = 0;
+ *complete_status = MB_ST_COMPLETE_M;
+ }
return ret;
}
-static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
- unsigned int timeout)
+static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, u32 in_modifier, u8 op_modifier,
+ u16 op, u16 token, int event)
{
- struct device *dev = hr_dev->dev;
- unsigned long end;
- u32 status;
-
- end = msecs_to_jiffies(timeout) + jiffies;
- while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
- cond_resched();
+ u8 status = 0;
+ int ret;
- if (hns_roce_v2_cmd_pending(hr_dev)) {
- dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
- return -ETIMEDOUT;
+ /* Waiting for the mbox to be idle */
+ ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS,
+ &status);
+ if (unlikely(ret)) {
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to check post mbox status = 0x%x, ret = %d.\n",
+ status, ret);
+ return ret;
}
- status = hns_roce_v2_cmd_complete(hr_dev);
- if (status != 0x1) {
- if (status == CMD_RST_PRC_EBUSY)
- return status;
+ /* Post new message to mbox */
+ ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
+ op_modifier, op, token, event);
+ if (ret)
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to post mailbox, ret = %d.\n", ret);
+
+ return ret;
+}
- dev_err(dev, "mailbox status 0x%x!\n", status);
- return -EBUSY;
+static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev, unsigned int timeout)
+{
+ u8 status = 0;
+ int ret;
+
+ ret = v2_wait_mbox_complete(hr_dev, timeout, &status);
+ if (!ret) {
+ if (status != MB_ST_COMPLETE_SUCC)
+ return -EBUSY;
+ } else {
+ dev_err_ratelimited(hr_dev->dev,
+ "failed to check mbox status = 0x%x, ret = %d.\n",
+ status, ret);
}
- return 0;
+ return ret;
}
static void copy_gid(void *dest, const union ib_gid *gid)
@@ -2790,7 +2976,7 @@ static int config_gmv_table(struct hns_roce_dev *hr_dev,
return hns_roce_cmq_send(hr_dev, desc, 2);
}
-static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
+static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port,
int gid_index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
@@ -3079,14 +3265,31 @@ static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
!!(n & hr_cq->cq_depth)) ? cqe : NULL;
}
-static inline void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 ci)
+static inline void update_cq_db(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq)
{
- *hr_cq->set_ci_db = ci & V2_CQ_DB_PARAMETER_CONS_IDX_M;
+ if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) {
+ *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M;
+ } else {
+ struct hns_roce_v2_db cq_db = {};
+
+ roce_set_field(cq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
+ hr_cq->cqn);
+ roce_set_field(cq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+ HNS_ROCE_V2_CQ_DB);
+ roce_set_field(cq_db.parameter, V2_CQ_DB_CONS_IDX_M,
+ V2_CQ_DB_CONS_IDX_S, hr_cq->cons_index);
+ roce_set_field(cq_db.parameter, V2_CQ_DB_CMD_SN_M,
+ V2_CQ_DB_CMD_SN_S, 1);
+
+ hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
+ }
}
static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
struct hns_roce_srq *srq)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
struct hns_roce_v2_cqe *cqe, *dest;
u32 prod_index;
int nfreed = 0;
@@ -3129,7 +3332,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
if (nfreed) {
hr_cq->cons_index += nfreed;
- hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
+ update_cq_db(hr_dev, hr_cq);
}
}
@@ -3224,37 +3427,33 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
- u32 notification_flag;
- __le32 doorbell[2];
+ struct hns_roce_v2_db cq_db = {};
+ u32 notify_flag;
- doorbell[0] = 0;
- doorbell[1] = 0;
-
- notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
- V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
/*
- * flags = 0; Notification Flag = 1, next
- * flags = 1; Notification Flag = 0, solocited
+ * flags = 0, then notify_flag : next
+ * flags = 1, then notify flag : solocited
*/
- roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
- hr_cq->cqn);
- roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
- HNS_ROCE_V2_CQ_DB_NTR);
- roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
- V2_CQ_DB_PARAMETER_CONS_IDX_S, hr_cq->cons_index);
- roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
- V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
- roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
- notification_flag);
-
- hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
+ notify_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
+ V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
+
+ roce_set_field(cq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S, hr_cq->cqn);
+ roce_set_field(cq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+ HNS_ROCE_V2_CQ_DB_NOTIFY);
+ roce_set_field(cq_db.parameter, V2_CQ_DB_CONS_IDX_M,
+ V2_CQ_DB_CONS_IDX_S, hr_cq->cons_index);
+ roce_set_field(cq_db.parameter, V2_CQ_DB_CMD_SN_M,
+ V2_CQ_DB_CMD_SN_S, hr_cq->arm_sn);
+ roce_set_bit(cq_db.parameter, V2_CQ_DB_NOTIFY_TYPE_S, notify_flag);
+
+ hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
return 0;
}
static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
- struct hns_roce_qp **cur_qp,
- struct ib_wc *wc)
+ struct hns_roce_qp *qp,
+ struct ib_wc *wc)
{
struct hns_roce_rinl_sge *sge_list;
u32 wr_num, wr_cnt, sge_num;
@@ -3263,11 +3462,11 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
- wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
+ wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
- sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
- sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
- wqe_buf = hns_roce_get_recv_wqe(*cur_qp, wr_cnt);
+ sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
+ sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
+ wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt);
data_len = wc->byte_len;
for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
@@ -3401,21 +3600,205 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
init_flush_work(hr_dev, qp);
}
+static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
+ struct hns_roce_qp **cur_qp)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+ struct hns_roce_qp *hr_qp = *cur_qp;
+ u32 qpn;
+
+ qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
+ V2_CQE_BYTE_16_LCL_QPN_S) &
+ HNS_ROCE_V2_CQE_QPN_MASK;
+
+ if (!hr_qp || qpn != hr_qp->qpn) {
+ hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
+ if (unlikely(!hr_qp)) {
+ ibdev_err(&hr_dev->ib_dev,
+ "CQ %06lx with entry for unknown QPN %06x\n",
+ hr_cq->cqn, qpn);
+ return -EINVAL;
+ }
+ *cur_qp = hr_qp;
+ }
+
+ return 0;
+}
+
+/*
+ * mapped-value = 1 + real-value
+ * The ib wc opcode's real value is start from 0, In order to distinguish
+ * between initialized and uninitialized map values, we plus 1 to the actual
+ * value when defining the mapping, so that the validity can be identified by
+ * checking whether the mapped value is greater than 0.
+ */
+#define HR_WC_OP_MAP(hr_key, ib_key) \
+ [HNS_ROCE_V2_WQE_OP_ ## hr_key] = 1 + IB_WC_ ## ib_key
+
+static const u32 wc_send_op_map[] = {
+ HR_WC_OP_MAP(SEND, SEND),
+ HR_WC_OP_MAP(SEND_WITH_INV, SEND),
+ HR_WC_OP_MAP(SEND_WITH_IMM, SEND),
+ HR_WC_OP_MAP(RDMA_READ, RDMA_READ),
+ HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE),
+ HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE),
+ HR_WC_OP_MAP(LOCAL_INV, LOCAL_INV),
+ HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP),
+ HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD),
+ HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP),
+ HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD, MASKED_FETCH_ADD),
+ HR_WC_OP_MAP(FAST_REG_PMR, REG_MR),
+ HR_WC_OP_MAP(BIND_MW, REG_MR),
+};
+
+static int to_ib_wc_send_op(u32 hr_opcode)
+{
+ if (hr_opcode >= ARRAY_SIZE(wc_send_op_map))
+ return -EINVAL;
+
+ return wc_send_op_map[hr_opcode] ? wc_send_op_map[hr_opcode] - 1 :
+ -EINVAL;
+}
+
+static const u32 wc_recv_op_map[] = {
+ HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, WITH_IMM),
+ HR_WC_OP_MAP(SEND, RECV),
+ HR_WC_OP_MAP(SEND_WITH_IMM, WITH_IMM),
+ HR_WC_OP_MAP(SEND_WITH_INV, RECV),
+};
+
+static int to_ib_wc_recv_op(u32 hr_opcode)
+{
+ if (hr_opcode >= ARRAY_SIZE(wc_recv_op_map))
+ return -EINVAL;
+
+ return wc_recv_op_map[hr_opcode] ? wc_recv_op_map[hr_opcode] - 1 :
+ -EINVAL;
+}
+
+static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
+{
+ u32 hr_opcode;
+ int ib_opcode;
+
+ wc->wc_flags = 0;
+
+ hr_opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
+ V2_CQE_BYTE_4_OPCODE_S) & 0x1f;
+ switch (hr_opcode) {
+ case HNS_ROCE_V2_WQE_OP_RDMA_READ:
+ wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+ break;
+ case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
+ case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+ case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ break;
+ case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
+ case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
+ case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
+ case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
+ wc->byte_len = 8;
+ break;
+ default:
+ break;
+ }
+
+ ib_opcode = to_ib_wc_send_op(hr_opcode);
+ if (ib_opcode < 0)
+ wc->status = IB_WC_GENERAL_ERR;
+ else
+ wc->opcode = ib_opcode;
+}
+
+static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
+ struct hns_roce_v2_cqe *cqe)
+{
+ return wc->qp->qp_type != IB_QPT_UD &&
+ wc->qp->qp_type != IB_QPT_GSI &&
+ (hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
+ hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
+ hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
+ roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S);
+}
+
+static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
+{
+ struct hns_roce_qp *qp = to_hr_qp(wc->qp);
+ u32 hr_opcode;
+ int ib_opcode;
+ int ret;
+
+ wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+
+ hr_opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
+ V2_CQE_BYTE_4_OPCODE_S) & 0x1f;
+ switch (hr_opcode) {
+ case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
+ case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata));
+ break;
+ case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
+ wc->wc_flags = IB_WC_WITH_INVALIDATE;
+ wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
+ break;
+ default:
+ wc->wc_flags = 0;
+ }
+
+ ib_opcode = to_ib_wc_recv_op(hr_opcode);
+ if (ib_opcode < 0)
+ wc->status = IB_WC_GENERAL_ERR;
+ else
+ wc->opcode = ib_opcode;
+
+ if (is_rq_inl_enabled(wc, hr_opcode, cqe)) {
+ ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ wc->sl = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
+ V2_CQE_BYTE_32_SL_S);
+ wc->src_qp = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_RMT_QPN_M,
+ V2_CQE_BYTE_32_RMT_QPN_S);
+ wc->slid = 0;
+ wc->wc_flags |= roce_get_bit(cqe->byte_32, V2_CQE_BYTE_32_GRH_S) ?
+ IB_WC_GRH : 0;
+ wc->port_num = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_PORTN_M,
+ V2_CQE_BYTE_32_PORTN_S);
+ wc->pkey_index = 0;
+
+ if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
+ wc->vlan_id = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_VID_M,
+ V2_CQE_BYTE_28_VID_S);
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ } else {
+ wc->vlan_id = 0xffff;
+ }
+
+ wc->network_hdr_type = roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_PORT_TYPE_M,
+ V2_CQE_BYTE_28_PORT_TYPE_S);
+
+ return 0;
+}
+
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+ struct hns_roce_qp *qp = *cur_qp;
struct hns_roce_srq *srq = NULL;
struct hns_roce_v2_cqe *cqe;
- struct hns_roce_qp *hr_qp;
struct hns_roce_wq *wq;
int is_send;
- u16 wqe_ctr;
- u32 opcode;
- u32 qpn;
+ u16 wqe_idx;
int ret;
- /* Find cqe according to consumer index */
cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
if (!cqe)
return -EAGAIN;
@@ -3424,189 +3807,50 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
/* Memory barrier */
rmb();
- /* 0->SQ, 1->RQ */
- is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
-
- qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
- V2_CQE_BYTE_16_LCL_QPN_S);
-
- if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
- hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
- if (unlikely(!hr_qp)) {
- ibdev_err(&hr_dev->ib_dev,
- "CQ %06lx with entry for unknown QPN %06x\n",
- hr_cq->cqn, qpn & HNS_ROCE_V2_CQE_QPN_MASK);
- return -EINVAL;
- }
- *cur_qp = hr_qp;
- }
+ ret = get_cur_qp(hr_cq, cqe, &qp);
+ if (ret)
+ return ret;
- wc->qp = &(*cur_qp)->ibqp;
+ wc->qp = &qp->ibqp;
wc->vendor_err = 0;
+ wqe_idx = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
+ V2_CQE_BYTE_4_WQE_INDX_S);
+
+ is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
if (is_send) {
- wq = &(*cur_qp)->sq;
- if ((*cur_qp)->sq_signal_bits) {
- /*
- * If sg_signal_bit is 1,
- * firstly tail pointer updated to wqe
- * which current cqe correspond to
- */
- wqe_ctr = (u16)roce_get_field(cqe->byte_4,
- V2_CQE_BYTE_4_WQE_INDX_M,
- V2_CQE_BYTE_4_WQE_INDX_S);
- wq->tail += (wqe_ctr - (u16)wq->tail) &
+ wq = &qp->sq;
+
+ /* If sg_signal_bit is set, tail pointer will be updated to
+ * the WQE corresponding to the current CQE.
+ */
+ if (qp->sq_signal_bits)
+ wq->tail += (wqe_idx - (u16)wq->tail) &
(wq->wqe_cnt - 1);
- }
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
- } else if ((*cur_qp)->ibqp.srq) {
- srq = to_hr_srq((*cur_qp)->ibqp.srq);
- wqe_ctr = (u16)roce_get_field(cqe->byte_4,
- V2_CQE_BYTE_4_WQE_INDX_M,
- V2_CQE_BYTE_4_WQE_INDX_S);
- wc->wr_id = srq->wrid[wqe_ctr];
- hns_roce_free_srq_wqe(srq, wqe_ctr);
- } else {
- /* Update tail pointer, record wr_id */
- wq = &(*cur_qp)->rq;
- wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
- ++wq->tail;
- }
-
- get_cqe_status(hr_dev, *cur_qp, hr_cq, cqe, wc);
- if (unlikely(wc->status != IB_WC_SUCCESS))
- return 0;
- if (is_send) {
- wc->wc_flags = 0;
- /* SQ corresponding to CQE */
- switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
- V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
- case HNS_ROCE_V2_WQE_OP_SEND:
- wc->opcode = IB_WC_SEND;
- break;
- case HNS_ROCE_V2_WQE_OP_SEND_WITH_INV:
- wc->opcode = IB_WC_SEND;
- break;
- case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
- wc->opcode = IB_WC_SEND;
- wc->wc_flags |= IB_WC_WITH_IMM;
- break;
- case HNS_ROCE_V2_WQE_OP_RDMA_READ:
- wc->opcode = IB_WC_RDMA_READ;
- wc->byte_len = le32_to_cpu(cqe->byte_cnt);
- break;
- case HNS_ROCE_V2_WQE_OP_RDMA_WRITE:
- wc->opcode = IB_WC_RDMA_WRITE;
- break;
- case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
- wc->opcode = IB_WC_RDMA_WRITE;
- wc->wc_flags |= IB_WC_WITH_IMM;
- break;
- case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
- wc->opcode = IB_WC_LOCAL_INV;
- wc->wc_flags |= IB_WC_WITH_INVALIDATE;
- break;
- case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
- wc->opcode = IB_WC_COMP_SWAP;
- wc->byte_len = 8;
- break;
- case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
- wc->opcode = IB_WC_FETCH_ADD;
- wc->byte_len = 8;
- break;
- case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
- wc->opcode = IB_WC_MASKED_COMP_SWAP;
- wc->byte_len = 8;
- break;
- case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
- wc->opcode = IB_WC_MASKED_FETCH_ADD;
- wc->byte_len = 8;
- break;
- case HNS_ROCE_V2_WQE_OP_FAST_REG_PMR:
- wc->opcode = IB_WC_REG_MR;
- break;
- case HNS_ROCE_V2_WQE_OP_BIND_MW:
- wc->opcode = IB_WC_REG_MR;
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
+ fill_send_wc(wc, cqe);
} else {
- /* RQ correspond to CQE */
- wc->byte_len = le32_to_cpu(cqe->byte_cnt);
-
- opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
- V2_CQE_BYTE_4_OPCODE_S);
- switch (opcode & 0x1f) {
- case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
- wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
- wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data =
- cpu_to_be32(le32_to_cpu(cqe->immtdata));
- break;
- case HNS_ROCE_V2_OPCODE_SEND:
- wc->opcode = IB_WC_RECV;
- wc->wc_flags = 0;
- break;
- case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
- wc->opcode = IB_WC_RECV;
- wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data =
- cpu_to_be32(le32_to_cpu(cqe->immtdata));
- break;
- case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
- wc->opcode = IB_WC_RECV;
- wc->wc_flags = IB_WC_WITH_INVALIDATE;
- wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
-
- if ((wc->qp->qp_type == IB_QPT_RC ||
- wc->qp->qp_type == IB_QPT_UC) &&
- (opcode == HNS_ROCE_V2_OPCODE_SEND ||
- opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
- opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
- (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
- ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
- if (unlikely(ret))
- return -EAGAIN;
- }
-
- wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
- V2_CQE_BYTE_32_SL_S);
- wc->src_qp = (u8)roce_get_field(cqe->byte_32,
- V2_CQE_BYTE_32_RMT_QPN_M,
- V2_CQE_BYTE_32_RMT_QPN_S);
- wc->slid = 0;
- wc->wc_flags |= (roce_get_bit(cqe->byte_32,
- V2_CQE_BYTE_32_GRH_S) ?
- IB_WC_GRH : 0);
- wc->port_num = roce_get_field(cqe->byte_32,
- V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
- wc->pkey_index = 0;
-
- if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
- wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
- V2_CQE_BYTE_28_VID_M,
- V2_CQE_BYTE_28_VID_S);
- wc->wc_flags |= IB_WC_WITH_VLAN;
+ if (qp->ibqp.srq) {
+ srq = to_hr_srq(qp->ibqp.srq);
+ wc->wr_id = srq->wrid[wqe_idx];
+ hns_roce_free_srq_wqe(srq, wqe_idx);
} else {
- wc->vlan_id = 0xffff;
+ wq = &qp->rq;
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ ++wq->tail;
}
- wc->network_hdr_type = roce_get_field(cqe->byte_28,
- V2_CQE_BYTE_28_PORT_TYPE_M,
- V2_CQE_BYTE_28_PORT_TYPE_S);
+ ret = fill_recv_wc(wc, cqe);
}
- return 0;
+ get_cqe_status(hr_dev, qp, hr_cq, cqe, wc);
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ return 0;
+
+ return ret;
}
static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
@@ -3638,7 +3882,7 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
}
if (npolled)
- hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
+ update_cq_db(hr_dev, hr_cq);
out:
spin_unlock_irqrestore(&hr_cq->lock, flags);
@@ -3647,12 +3891,9 @@ out:
}
static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
- int step_idx)
+ int step_idx, u16 *mbox_op)
{
- int op;
-
- if (type == HEM_TYPE_SCCC && step_idx)
- return -EINVAL;
+ u16 op;
switch (type) {
case HEM_TYPE_QPC:
@@ -3677,51 +3918,49 @@ static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
break;
default:
- dev_warn(hr_dev->dev,
- "table %u not to be written by mailbox!\n", type);
+ dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type);
return -EINVAL;
}
- return op + step_idx;
+ *mbox_op = op + step_idx;
+
+ return 0;
}
-static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, u64 bt_ba,
- u32 hem_type, int step_idx)
+static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
+ dma_addr_t base_addr)
{
- struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_cmq_desc desc;
- struct hns_roce_cfg_gmv_bt *gmv_bt =
- (struct hns_roce_cfg_gmv_bt *)desc.data;
- int ret;
- int op;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ u32 idx = obj / (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz);
+ u64 addr = to_hr_hw_page_addr(base_addr);
- if (hem_type == HEM_TYPE_GMV) {
- hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT,
- false);
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
- gmv_bt->gmv_ba_l = cpu_to_le32(bt_ba >> HNS_HW_PAGE_SHIFT);
- gmv_bt->gmv_ba_h = cpu_to_le32(bt_ba >> (HNS_HW_PAGE_SHIFT +
- 32));
- gmv_bt->gmv_bt_idx = cpu_to_le32(obj /
- (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz));
+ hr_reg_write(req, CFG_GMV_BT_BA_L, lower_32_bits(addr));
+ hr_reg_write(req, CFG_GMV_BT_BA_H, upper_32_bits(addr));
+ hr_reg_write(req, CFG_GMV_BT_IDX, idx);
- return hns_roce_cmq_send(hr_dev, &desc, 1);
- }
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
- op = get_op_for_set_hem(hr_dev, hem_type, step_idx);
- if (op < 0)
- return 0;
+static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj,
+ dma_addr_t base_addr, u32 hem_type, int step_idx)
+{
+ int ret;
+ u16 op;
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
+ if (unlikely(hem_type == HEM_TYPE_GMV))
+ return config_gmv_ba_to_hw(hr_dev, obj, base_addr);
- ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
- 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx))
+ return 0;
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &op);
+ if (ret < 0)
+ return ret;
- return ret;
+ return config_hem_ba_to_hw(hr_dev, obj, base_addr, op);
}
static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
@@ -3911,6 +4150,16 @@ static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
ilog2(hr_qp->rq.wqe_cnt));
}
+static inline int get_cqn(struct ib_cq *ib_cq)
+{
+ return ib_cq ? to_hr_cq(ib_cq)->cqn : 0;
+}
+
+static inline int get_pdn(struct ib_pd *ib_pd)
+{
+ return ib_pd ? to_hr_pd(ib_pd)->pdn : 0;
+}
+
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -3927,13 +4176,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
* 0 at the same time, else set them to 0x1.
*/
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
- V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
+ V2_QPC_BYTE_4_TST_S, to_hr_qp_type(ibqp->qp_type));
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
- V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
+ V2_QPC_BYTE_16_PD_S, get_pdn(ibqp->pd));
roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
@@ -3944,6 +4193,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
+ if (ibqp->qp_type == IB_QPT_XRC_TGT) {
+ context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn);
+
+ roce_set_bit(context->byte_80_rnr_rx_cqn,
+ V2_QPC_BYTE_80_XRC_QP_TYPE_S, 1);
+ }
+
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
roce_set_bit(context->byte_68_rq_db,
V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
@@ -3954,23 +4210,27 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
((u32)hr_qp->rdb.dma) >> 1);
context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
- (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
+ if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
+ roce_set_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RQIE_S,
+ !!(hr_dev->caps.flags &
+ HNS_ROCE_CAP_FLAG_RQ_INLINE));
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
- V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+ V2_QPC_BYTE_80_RX_CQN_S, get_cqn(ibqp->recv_cq));
+
if (ibqp->srq) {
+ roce_set_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQ_EN_S, 1);
roce_set_field(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
to_hr_srq(ibqp->srq)->srqn);
- roce_set_bit(context->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_SRQ_EN_S, 1);
}
roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
- V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
+ V2_QPC_BYTE_252_TX_CQN_S, get_cqn(ibqp->send_cq));
if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ)
return;
@@ -3993,22 +4253,23 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
* 0 at the same time, else set them to 0x1.
*/
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
- V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
+ V2_QPC_BYTE_4_TST_S, to_hr_qp_type(ibqp->qp_type));
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
- V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
+ V2_QPC_BYTE_16_PD_S, get_pdn(ibqp->pd));
+
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
V2_QPC_BYTE_16_PD_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
- V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+ V2_QPC_BYTE_80_RX_CQN_S, get_cqn(ibqp->recv_cq));
roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
V2_QPC_BYTE_80_RX_CQN_S, 0);
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
- V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
+ V2_QPC_BYTE_252_TX_CQN_S, get_cqn(ibqp->send_cq));
roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, 0);
@@ -4133,17 +4394,6 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
- roce_set_field(context->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
-
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
- V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
-
return 0;
}
@@ -4240,7 +4490,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
u64 *mtts;
u8 *dmac;
u8 *smac;
- int port;
+ u32 port;
int ret;
ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
@@ -4454,6 +4704,143 @@ static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
return rdma_flow_label_to_udp_sport(fl);
}
+static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ u32 *dip_idx)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_dip *hr_dip;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
+
+ list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
+ if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16))
+ goto out;
+ }
+
+ /* If no dgid is found, a new dip and a mapping between dgid and
+ * dip_idx will be created.
+ */
+ hr_dip = kzalloc(sizeof(*hr_dip), GFP_ATOMIC);
+ if (!hr_dip) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ hr_dip->dip_idx = *dip_idx = ibqp->qp_num;
+ list_add_tail(&hr_dip->node, &hr_dev->dip_list);
+
+out:
+ spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
+ return ret;
+}
+
+enum {
+ CONG_DCQCN,
+ CONG_WINDOW,
+};
+
+enum {
+ UNSUPPORT_CONG_LEVEL,
+ SUPPORT_CONG_LEVEL,
+};
+
+enum {
+ CONG_LDCP,
+ CONG_HC3,
+};
+
+enum {
+ DIP_INVALID,
+ DIP_VALID,
+};
+
+static int check_cong_type(struct ib_qp *ibqp,
+ struct hns_roce_congestion_algorithm *cong_alg)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+
+ /* different congestion types match different configurations */
+ switch (hr_dev->caps.cong_type) {
+ case CONG_TYPE_DCQCN:
+ cong_alg->alg_sel = CONG_DCQCN;
+ cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
+ cong_alg->dip_vld = DIP_INVALID;
+ break;
+ case CONG_TYPE_LDCP:
+ cong_alg->alg_sel = CONG_WINDOW;
+ cong_alg->alg_sub_sel = CONG_LDCP;
+ cong_alg->dip_vld = DIP_INVALID;
+ break;
+ case CONG_TYPE_HC3:
+ cong_alg->alg_sel = CONG_WINDOW;
+ cong_alg->alg_sub_sel = CONG_HC3;
+ cong_alg->dip_vld = DIP_INVALID;
+ break;
+ case CONG_TYPE_DIP:
+ cong_alg->alg_sel = CONG_DCQCN;
+ cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
+ cong_alg->dip_vld = DIP_VALID;
+ break;
+ default:
+ ibdev_err(&hr_dev->ib_dev,
+ "error type(%u) for congestion selection.\n",
+ hr_dev->caps.cong_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_congestion_algorithm cong_field;
+ struct ib_device *ibdev = ibqp->device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ u32 dip_idx = 0;
+ int ret;
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ||
+ grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE)
+ return 0;
+
+ ret = check_cong_type(ibqp, &cong_field);
+ if (ret)
+ return ret;
+
+ hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
+ hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
+ hr_reg_write(qpc_mask, QPC_CONG_ALGO_TMPL_ID, 0);
+ hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
+ hr_reg_write(&qpc_mask->ext, QPCEX_CONG_ALG_SEL, 0);
+ hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL,
+ cong_field.alg_sub_sel);
+ hr_reg_write(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL, 0);
+ hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld);
+ hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD, 0);
+
+ /* if dip is disabled, there is no need to set dip idx */
+ if (cong_field.dip_vld == 0)
+ return 0;
+
+ ret = get_dip_ctx_idx(ibqp, attr, &dip_idx);
+ if (ret) {
+ ibdev_err(ibdev, "failed to fill cong field, ret = %d.\n", ret);
+ return ret;
+ }
+
+ hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX, dip_idx);
+ hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX, 0);
+
+ return 0;
+}
+
static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4537,6 +4924,10 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
+ ret = fill_cong_field(ibqp, attr, context, qpc_mask);
+ if (ret)
+ return ret;
+
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, get_tclass(&attr->ah_attr.grh));
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
@@ -4687,7 +5078,6 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
V2_QPC_BYTE_244_RNR_CNT_S, 0);
}
- /* RC&UC&UD required attr */
if (attr_mask & IB_QP_SQ_PSN) {
roce_set_field(context->byte_172_sq_psn,
V2_QPC_BYTE_172_SQ_CUR_PSN_M,
@@ -4765,7 +5155,6 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
}
- /* RC&UC required attr */
if (attr_mask & IB_QP_RQ_PSN) {
roce_set_field(context->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_EPSN_M,
@@ -4808,6 +5197,29 @@ static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
}
}
+static void clear_qp(struct hns_roce_qp *hr_qp)
+{
+ struct ib_qp *ibqp = &hr_qp->ibqp;
+
+ if (ibqp->send_cq)
+ hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
+ hr_qp->qpn, NULL);
+
+ if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq)
+ hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq),
+ hr_qp->qpn, ibqp->srq ?
+ to_hr_srq(ibqp->srq) : NULL);
+
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
+ *hr_qp->rdb.db_record = 0;
+
+ hr_qp->rq.head = 0;
+ hr_qp->rq.tail = 0;
+ hr_qp->sq.head = 0;
+ hr_qp->sq.tail = 0;
+ hr_qp->next_sge = 0;
+}
+
static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
@@ -4842,19 +5254,23 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
/* When QP state is err, SQ and RQ WQE should be flushed */
if (new_state == IB_QPS_ERR) {
- spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
- hr_qp->state = IB_QPS_ERR;
- roce_set_field(context->byte_160_sq_ci_pi,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
- hr_qp->sq.head);
- roce_set_field(qpc_mask->byte_160_sq_ci_pi,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
- spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
-
- if (!ibqp->srq) {
+ if (ibqp->qp_type != IB_QPT_XRC_TGT) {
+ spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
+ hr_qp->state = IB_QPS_ERR;
+ roce_set_field(context->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
+ hr_qp->sq.head);
+ roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+ spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
+ }
+
+ if (!ibqp->srq && ibqp->qp_type != IB_QPT_XRC_INI &&
+ ibqp->qp_type != IB_QPT_XRC_TGT) {
spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
+ hr_qp->state = IB_QPS_ERR;
roce_set_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
@@ -4873,7 +5289,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
goto out;
roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
- ibqp->srq ? 1 : 0);
+ ((to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC) ||
+ ibqp->srq) ? 1 : 0);
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_INV_CREDIT_S, 0);
@@ -4894,21 +5311,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
- if (new_state == IB_QPS_RESET && !ibqp->uobject) {
- hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
- ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
- if (ibqp->send_cq != ibqp->recv_cq)
- hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
- hr_qp->qpn, NULL);
-
- hr_qp->rq.head = 0;
- hr_qp->rq.tail = 0;
- hr_qp->sq.head = 0;
- hr_qp->sq.tail = 0;
- hr_qp->next_sge = 0;
- if (hr_qp->rq.wqe_cnt)
- *hr_qp->rdb.db_record = 0;
- }
+ if (new_state == IB_QPS_RESET && !ibqp->uobject)
+ clear_qp(hr_qp);
out:
return ret;
@@ -5019,7 +5423,8 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
- hr_qp->ibqp.qp_type == IB_QPT_UC) {
+ hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
struct ib_global_route *grh =
rdma_ah_retrieve_grh(&qp_attr->ah_attr);
@@ -5051,6 +5456,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
V2_QPC_BYTE_140_RR_MAX_M,
V2_QPC_BYTE_140_RR_MAX_S);
+
qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
V2_QPC_BYTE_80_MIN_RNR_TIME_M,
V2_QPC_BYTE_80_MIN_RNR_TIME_S);
@@ -5068,6 +5474,7 @@ done:
qp_attr->cur_qp_state = qp_attr->qp_state;
qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
+ qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
if (!ibqp->uobject) {
qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
@@ -5085,6 +5492,15 @@ out:
return ret;
}
+static inline int modify_qp_is_ok(struct hns_roce_qp *hr_qp)
+{
+ return ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
+ hr_qp->ibqp.qp_type == IB_QPT_UD ||
+ hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
+ hr_qp->state != IB_QPS_RESET);
+}
+
static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
struct ib_udata *udata)
@@ -5094,9 +5510,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
unsigned long flags;
int ret = 0;
- if ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
- hr_qp->ibqp.qp_type == IB_QPT_UD) &&
- hr_qp->state != IB_QPS_RESET) {
+ if (modify_qp_is_ok(hr_qp)) {
/* Modify qp to reset before destroying qp */
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET);
@@ -5275,9 +5689,11 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
}
hr_reg_write(ctx, SRQC_SRQ_ST, 1);
+ hr_reg_write(ctx, SRQC_SRQ_TYPE,
+ !!(srq->ibsrq.srq_type == IB_SRQT_XRC));
hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn);
hr_reg_write(ctx, SRQC_SRQN, srq->srqn);
- hr_reg_write(ctx, SRQC_XRCD, 0);
+ hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn);
hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn);
hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt));
hr_reg_write(ctx, SRQC_RQWS,
@@ -5481,6 +5897,12 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
case HNS_ROCE_EVENT_TYPE_FLR:
ibdev_warn(ibdev, "Function level reset.\n");
break;
+ case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
+ ibdev_err(ibdev, "xrc domain violation error.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
+ ibdev_err(ibdev, "invalid xrceth error.\n");
+ break;
default:
break;
}
@@ -5505,33 +5927,30 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
queue_work(hr_dev->irq_workq, &(irq_work->work));
}
-static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
+static void update_eq_db(struct hns_roce_eq *eq)
{
struct hns_roce_dev *hr_dev = eq->hr_dev;
- __le32 doorbell[2] = {};
+ struct hns_roce_v2_db eq_db = {};
if (eq->type_flag == HNS_ROCE_AEQ) {
- roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
- HNS_ROCE_V2_EQ_DB_CMD_S,
+ roce_set_field(eq_db.byte_4, V2_EQ_DB_CMD_M, V2_EQ_DB_CMD_S,
eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
HNS_ROCE_EQ_DB_CMD_AEQ :
HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
} else {
- roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
- HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
+ roce_set_field(eq_db.byte_4, V2_EQ_DB_TAG_M, V2_EQ_DB_TAG_S,
+ eq->eqn);
- roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
- HNS_ROCE_V2_EQ_DB_CMD_S,
+ roce_set_field(eq_db.byte_4, V2_EQ_DB_CMD_M, V2_EQ_DB_CMD_S,
eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
HNS_ROCE_EQ_DB_CMD_CEQ :
HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
}
- roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
- HNS_ROCE_V2_EQ_DB_PARA_S,
- (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
+ roce_set_field(eq_db.parameter, V2_EQ_DB_CONS_IDX_M,
+ V2_EQ_DB_CONS_IDX_S, eq->cons_index);
- hns_roce_write64(hr_dev, doorbell, eq->doorbell);
+ hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg);
}
static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
@@ -5581,6 +6000,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
+ case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
hns_roce_qp_event(hr_dev, queue_num, event_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
@@ -5616,7 +6037,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
aeqe = next_aeqe_sw_v2(eq);
}
- set_eq_cons_index_v2(eq);
+ update_eq_db(eq);
return aeqe_found;
}
@@ -5656,7 +6077,7 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
ceqe = next_ceqe_sw_v2(eq);
}
- set_eq_cons_index_v2(eq);
+ update_eq_db(eq);
return ceqe_found;
}
@@ -5710,58 +6131,34 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
int_work = 1;
- } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
- dev_err(dev, "BUS ERR!\n");
-
- int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
-
- int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
-
- int_work = 1;
- } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
- dev_err(dev, "OTHER ERR!\n");
+ } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_RAS_INT_S)) {
+ dev_err(dev, "RAS interrupt!\n");
- int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S;
+ int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_RAS_INT_S;
roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
int_work = 1;
- } else
+ } else {
dev_err(dev, "There is no abnormal irq found!\n");
+ }
return IRQ_RETVAL(int_work);
}
static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
- int eq_num, int enable_flag)
+ int eq_num, u32 enable_flag)
{
int i;
- if (enable_flag == EQ_ENABLE) {
- for (i = 0; i < eq_num; i++)
- roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
- i * EQ_REG_OFFSET,
- HNS_ROCE_V2_VF_EVENT_INT_EN_M);
-
- roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
- HNS_ROCE_V2_VF_ABN_INT_EN_M);
- roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
- HNS_ROCE_V2_VF_ABN_INT_CFG_M);
- } else {
- for (i = 0; i < eq_num; i++)
- roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
- i * EQ_REG_OFFSET,
- HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
+ for (i = 0; i < eq_num; i++)
+ roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
+ i * EQ_REG_OFFSET, enable_flag);
- roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
- HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
- roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
- HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
- }
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, enable_flag);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
}
static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
@@ -5786,6 +6183,16 @@ static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
hns_roce_mtr_destroy(hr_dev, &eq->mtr);
}
+static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+{
+ eq->db_reg = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
+ eq->cons_index = 0;
+ eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
+ eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
+ eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
+ eq->shift = ilog2((unsigned int)eq->entries);
+}
+
static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
void *mb_buf)
{
@@ -5797,13 +6204,7 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
eqc = mb_buf;
memset(eqc, 0, sizeof(struct hns_roce_eq_context));
- /* init eqc */
- eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
- eq->cons_index = 0;
- eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
- eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
- eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
- eq->shift = ilog2((unsigned int)eq->entries);
+ init_eq_config(hr_dev, eq);
/* if not multi-hop, eqe buffer only use one trunk */
count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
@@ -5813,102 +6214,34 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
return -ENOBUFS;
}
- /* set eqc state */
- roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S,
- HNS_ROCE_V2_EQ_STATE_VALID);
-
- /* set eqe hop num */
- roce_set_field(eqc->byte_4, HNS_ROCE_EQC_HOP_NUM_M,
- HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
-
- /* set eqc over_ignore */
- roce_set_field(eqc->byte_4, HNS_ROCE_EQC_OVER_IGNORE_M,
- HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
-
- /* set eqc coalesce */
- roce_set_field(eqc->byte_4, HNS_ROCE_EQC_COALESCE_M,
- HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
-
- /* set eqc arm_state */
- roce_set_field(eqc->byte_4, HNS_ROCE_EQC_ARM_ST_M,
- HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
-
- /* set eqn */
- roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQN_M, HNS_ROCE_EQC_EQN_S,
- eq->eqn);
-
- /* set eqe_cnt */
- roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQE_CNT_M,
- HNS_ROCE_EQC_EQE_CNT_S, HNS_ROCE_EQ_INIT_EQE_CNT);
-
- /* set eqe_ba_pg_sz */
- roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M,
- HNS_ROCE_EQC_BA_PG_SZ_S,
- to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
-
- /* set eqe_buf_pg_sz */
- roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M,
- HNS_ROCE_EQC_BUF_PG_SZ_S,
- to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
-
- /* set eq_producer_idx */
- roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M,
- HNS_ROCE_EQC_PROD_INDX_S, HNS_ROCE_EQ_INIT_PROD_IDX);
-
- /* set eq_max_cnt */
- roce_set_field(eqc->byte_12, HNS_ROCE_EQC_MAX_CNT_M,
- HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
-
- /* set eq_period */
- roce_set_field(eqc->byte_12, HNS_ROCE_EQC_PERIOD_M,
- HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
-
- /* set eqe_report_timer */
- roce_set_field(eqc->eqe_report_timer, HNS_ROCE_EQC_REPORT_TIMER_M,
- HNS_ROCE_EQC_REPORT_TIMER_S,
- HNS_ROCE_EQ_INIT_REPORT_TIMER);
-
- /* set bt_ba [34:3] */
- roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M,
- HNS_ROCE_EQC_EQE_BA_L_S, bt_ba >> 3);
-
- /* set bt_ba [64:35] */
- roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M,
- HNS_ROCE_EQC_EQE_BA_H_S, bt_ba >> 35);
-
- /* set eq shift */
- roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S,
- eq->shift);
-
- /* set eq MSI_IDX */
- roce_set_field(eqc->byte_28, HNS_ROCE_EQC_MSI_INDX_M,
- HNS_ROCE_EQC_MSI_INDX_S, HNS_ROCE_EQ_INIT_MSI_IDX);
-
- /* set cur_eqe_ba [27:12] */
- roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M,
- HNS_ROCE_EQC_CUR_EQE_BA_L_S, eqe_ba[0] >> 12);
-
- /* set cur_eqe_ba [59:28] */
- roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M,
- HNS_ROCE_EQC_CUR_EQE_BA_M_S, eqe_ba[0] >> 28);
-
- /* set cur_eqe_ba [63:60] */
- roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M,
- HNS_ROCE_EQC_CUR_EQE_BA_H_S, eqe_ba[0] >> 60);
-
- /* set eq consumer idx */
- roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
- HNS_ROCE_EQC_CONS_INDX_S, HNS_ROCE_EQ_INIT_CONS_IDX);
-
- roce_set_field(eqc->byte_40, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
- HNS_ROCE_EQC_NXT_EQE_BA_L_S, eqe_ba[1] >> 12);
-
- roce_set_field(eqc->byte_44, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
- HNS_ROCE_EQC_NXT_EQE_BA_H_S, eqe_ba[1] >> 44);
-
- roce_set_field(eqc->byte_44, HNS_ROCE_EQC_EQE_SIZE_M,
- HNS_ROCE_EQC_EQE_SIZE_S,
- eq->eqe_size == HNS_ROCE_V3_EQE_SIZE ? 1 : 0);
+ hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
+ hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
+ hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
+ hr_reg_write(eqc, EQC_COALESCE, eq->coalesce);
+ hr_reg_write(eqc, EQC_ARM_ST, eq->arm_st);
+ hr_reg_write(eqc, EQC_EQN, eq->eqn);
+ hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT);
+ hr_reg_write(eqc, EQC_EQE_BA_PG_SZ,
+ to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
+ hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ,
+ to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
+ hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
+ hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
+
+ hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
+ hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
+ hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
+ hr_reg_write(eqc, EQC_EQE_BA_H, bt_ba >> 35);
+ hr_reg_write(eqc, EQC_SHIFT, eq->shift);
+ hr_reg_write(eqc, EQC_MSI_INDX, HNS_ROCE_EQ_INIT_MSI_IDX);
+ hr_reg_write(eqc, EQC_CUR_EQE_BA_L, eqe_ba[0] >> 12);
+ hr_reg_write(eqc, EQC_CUR_EQE_BA_M, eqe_ba[0] >> 28);
+ hr_reg_write(eqc, EQC_CUR_EQE_BA_H, eqe_ba[0] >> 60);
+ hr_reg_write(eqc, EQC_EQ_CONS_INDX, HNS_ROCE_EQ_INIT_CONS_IDX);
+ hr_reg_write(eqc, EQC_NEX_EQE_BA_L, eqe_ba[1] >> 12);
+ hr_reg_write(eqc, EQC_NEX_EQE_BA_H, eqe_ba[1] >> 44);
+ hr_reg_write(eqc, EQC_EQE_SIZE,
+ !!(eq->eqe_size == HNS_ROCE_V3_EQE_SIZE));
return 0;
}
@@ -6166,6 +6499,7 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
__hns_roce_free_irq(hr_dev);
+ destroy_workqueue(hr_dev->irq_workq);
for (i = 0; i < eq_num; i++) {
hns_roce_v2_destroy_eqc(hr_dev, i);
@@ -6174,9 +6508,6 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
}
kfree(eq_table->eq);
-
- flush_workqueue(hr_dev->irq_workq);
- destroy_workqueue(hr_dev->irq_workq);
}
static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
@@ -6205,9 +6536,9 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.hw_profile = hns_roce_v2_profile,
.hw_init = hns_roce_v2_init,
.hw_exit = hns_roce_v2_exit,
- .post_mbox = hns_roce_v2_post_mbox,
- .chk_mbox = hns_roce_v2_chk_mbox,
- .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
+ .post_mbox = v2_post_mbox,
+ .poll_mbox_done = v2_poll_mbox_done,
+ .chk_mbox_avail = v2_chk_mbox_is_avail,
.set_gid = hns_roce_v2_set_gid,
.set_mac = hns_roce_v2_set_mac,
.write_mtpt = hns_roce_v2_write_mtpt,
@@ -6218,20 +6549,10 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.set_hem = hns_roce_v2_set_hem,
.clear_hem = hns_roce_v2_clear_hem,
.modify_qp = hns_roce_v2_modify_qp,
- .query_qp = hns_roce_v2_query_qp,
- .destroy_qp = hns_roce_v2_destroy_qp,
.qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
- .modify_cq = hns_roce_v2_modify_cq,
- .post_send = hns_roce_v2_post_send,
- .post_recv = hns_roce_v2_post_recv,
- .req_notify_cq = hns_roce_v2_req_notify_cq,
- .poll_cq = hns_roce_v2_poll_cq,
.init_eq = hns_roce_v2_init_eq_table,
.cleanup_eq = hns_roce_v2_cleanup_eq_table,
.write_srqc = hns_roce_v2_write_srqc,
- .modify_srq = hns_roce_v2_modify_srq,
- .query_srq = hns_roce_v2_query_srq,
- .post_srq_recv = hns_roce_v2_post_srq_recv,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
};
@@ -6243,6 +6564,8 @@ static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
};
@@ -6253,9 +6576,12 @@ static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
+ const struct pci_device_id *id;
int i;
hr_dev->pci_dev = handle->pdev;
+ id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
+ hr_dev->is_vf = id->driver_data;
hr_dev->dev = &handle->pdev->dev;
hr_dev->hw = &hns_roce_hw_v2;
hr_dev->dfx = &hns_roce_dfx_hw_v2;
@@ -6272,7 +6598,7 @@ static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
hr_dev->iboe.netdevs[0]->dev_addr);
- for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
+ for (i = 0; i < handle->rinfo.num_vectors; i++)
hr_dev->irq[i] = pci_irq_vector(handle->pdev,
i + handle->rinfo.base_vector);
@@ -6356,6 +6682,9 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
if (!id)
return 0;
+ if (id->driver_data && handle->pdev->revision < PCI_REVISION_ID_HIP09)
+ return 0;
+
ret = __hns_roce_hw_v2_init_instance(handle);
if (ret) {
handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 39621fb6ec16..a2100a629859 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -40,13 +40,11 @@
#define HNS_ROCE_VF_SRQC_BT_NUM 64
#define HNS_ROCE_VF_CQC_BT_NUM 64
#define HNS_ROCE_VF_MPT_BT_NUM 64
-#define HNS_ROCE_VF_EQC_NUM 64
#define HNS_ROCE_VF_SMAC_NUM 32
-#define HNS_ROCE_VF_SGID_NUM 32
#define HNS_ROCE_VF_SL_NUM 8
#define HNS_ROCE_VF_GMV_BT_NUM 256
-#define HNS_ROCE_V2_MAX_QP_NUM 0x100000
+#define HNS_ROCE_V2_MAX_QP_NUM 0x1000
#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_SRQ 0x100000
@@ -61,6 +59,7 @@
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
+#define HNS_ROCE_V2_MAX_SQ_INL_EXT 0x400
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
@@ -74,6 +73,8 @@
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
+#define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000
+#define HNS_ROCE_V2_RSV_XRCD_NUM 0
#define HNS_ROCE_V2_MAX_QP_INIT_RDMA 128
#define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128
#define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64
@@ -121,7 +122,7 @@
#define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K 6
#define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K 2
-#define HNS_ROCE_V2_GID_INDEX_NUM 256
+#define HNS_ROCE_V2_GID_INDEX_NUM 16
#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18)
@@ -143,6 +144,8 @@
#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5
+#define HNS_ROCE_CONG_SIZE 64
+
#define check_whether_last_step(hop_num, step_idx) \
((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \
(step_idx == 1 && hop_num == 1) || \
@@ -195,11 +198,11 @@ enum {
};
enum {
- HNS_ROCE_V2_SQ_DB = 0x0,
- HNS_ROCE_V2_RQ_DB = 0x1,
- HNS_ROCE_V2_SRQ_DB = 0x2,
- HNS_ROCE_V2_CQ_DB_PTR = 0x3,
- HNS_ROCE_V2_CQ_DB_NTR = 0x4,
+ HNS_ROCE_V2_SQ_DB,
+ HNS_ROCE_V2_RQ_DB,
+ HNS_ROCE_V2_SRQ_DB,
+ HNS_ROCE_V2_CQ_DB,
+ HNS_ROCE_V2_CQ_DB_NOTIFY
};
enum {
@@ -233,6 +236,7 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404,
HNS_ROCE_OPC_QUERY_PF_TIMER_RES = 0x8406,
+ HNS_ROCE_OPC_QUERY_FUNC_INFO = 0x8407,
HNS_ROCE_OPC_QUERY_PF_CAPS_NUM = 0x8408,
HNS_ROCE_OPC_CFG_ENTRY_SIZE = 0x8409,
HNS_ROCE_OPC_CFG_SGID_TB = 0x8500,
@@ -244,6 +248,7 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_CLR_SCCC = 0x8509,
HNS_ROCE_OPC_QUERY_SCCC = 0x850a,
HNS_ROCE_OPC_RESET_SCCC = 0x850b,
+ HNS_ROCE_OPC_QUERY_VF_RES = 0x850e,
HNS_ROCE_OPC_CFG_GMV_TBL = 0x850f,
HNS_ROCE_OPC_CFG_GMV_BT = 0x8510,
HNS_SWITCH_PARAMETER_CFG = 0x1033,
@@ -255,10 +260,20 @@ enum {
};
enum hns_roce_cmd_return_status {
- CMD_EXEC_SUCCESS = 0,
- CMD_NO_AUTH = 1,
- CMD_NOT_EXEC = 2,
- CMD_QUEUE_FULL = 3,
+ CMD_EXEC_SUCCESS,
+ CMD_NO_AUTH,
+ CMD_NOT_EXIST,
+ CMD_CRQ_FULL,
+ CMD_NEXT_ERR,
+ CMD_NOT_EXEC,
+ CMD_PARA_ERR,
+ CMD_RESULT_ERR,
+ CMD_TIMEOUT,
+ CMD_HILINK_ERR,
+ CMD_INFO_ILLEGAL,
+ CMD_INVALID,
+ CMD_ROH_CHECK_FAIL,
+ CMD_OTHER_ERR = 0xff
};
enum hns_roce_sgid_type {
@@ -399,7 +414,8 @@ struct hns_roce_srq_context {
#define SRQC_CONSUMER_IDX SRQC_FIELD_LOC(127, 112)
#define SRQC_WQE_BT_BA_L SRQC_FIELD_LOC(159, 128)
#define SRQC_WQE_BT_BA_H SRQC_FIELD_LOC(188, 160)
-#define SRQC_RSV2 SRQC_FIELD_LOC(191, 189)
+#define SRQC_RSV2 SRQC_FIELD_LOC(190, 189)
+#define SRQC_SRQ_TYPE SRQC_FIELD_LOC(191, 191)
#define SRQC_PD SRQC_FIELD_LOC(215, 192)
#define SRQC_RQWS SRQC_FIELD_LOC(219, 216)
#define SRQC_RSV3 SRQC_FIELD_LOC(223, 220)
@@ -572,6 +588,10 @@ struct hns_roce_v2_qp_context {
struct hns_roce_v2_qp_context_ex ext;
};
+#define QPC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_qp_context, h, l)
+
+#define QPC_CONG_ALGO_TMPL_ID QPC_FIELD_LOC(455, 448)
+
#define V2_QPC_BYTE_4_TST_S 0
#define V2_QPC_BYTE_4_TST_M GENMASK(2, 0)
@@ -663,9 +683,6 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_56_LP_PKTN_INI_S 28
#define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28)
-#define V2_QPC_BYTE_60_TEMPID_S 0
-#define V2_QPC_BYTE_60_TEMPID_M GENMASK(7, 0)
-
#define V2_QPC_BYTE_60_SCC_TOKEN_S 8
#define V2_QPC_BYTE_60_SCC_TOKEN_M GENMASK(26, 8)
@@ -698,6 +715,8 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_80_RX_CQN_S 0
#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
+#define V2_QPC_BYTE_80_XRC_QP_TYPE_S 24
+
#define V2_QPC_BYTE_80_MIN_RNR_TIME_S 27
#define V2_QPC_BYTE_80_MIN_RNR_TIME_M GENMASK(31, 27)
@@ -940,6 +959,10 @@ struct hns_roce_v2_qp_context {
#define QPCEX_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_qp_context_ex, h, l)
+#define QPCEX_CONG_ALG_SEL QPCEX_FIELD_LOC(0, 0)
+#define QPCEX_CONG_ALG_SUB_SEL QPCEX_FIELD_LOC(1, 1)
+#define QPCEX_DIP_CTX_IDX_VLD QPCEX_FIELD_LOC(2, 2)
+#define QPCEX_DIP_CTX_IDX QPCEX_FIELD_LOC(22, 3)
#define QPCEX_STASH QPCEX_FIELD_LOC(82, 82)
#define V2_QP_RWE_S 1 /* rdma write enable */
@@ -1130,33 +1153,27 @@ struct hns_roce_v2_mpt_entry {
#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S 28
#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M GENMASK(31, 28)
-#define V2_DB_BYTE_4_TAG_S 0
-#define V2_DB_BYTE_4_TAG_M GENMASK(23, 0)
+#define V2_DB_TAG_S 0
+#define V2_DB_TAG_M GENMASK(23, 0)
-#define V2_DB_BYTE_4_CMD_S 24
-#define V2_DB_BYTE_4_CMD_M GENMASK(27, 24)
+#define V2_DB_CMD_S 24
+#define V2_DB_CMD_M GENMASK(27, 24)
#define V2_DB_FLAG_S 31
-#define V2_DB_PARAMETER_IDX_S 0
-#define V2_DB_PARAMETER_IDX_M GENMASK(15, 0)
+#define V2_DB_PRODUCER_IDX_S 0
+#define V2_DB_PRODUCER_IDX_M GENMASK(15, 0)
-#define V2_DB_PARAMETER_SL_S 16
-#define V2_DB_PARAMETER_SL_M GENMASK(18, 16)
+#define V2_DB_SL_S 16
+#define V2_DB_SL_M GENMASK(18, 16)
-#define V2_CQ_DB_BYTE_4_TAG_S 0
-#define V2_CQ_DB_BYTE_4_TAG_M GENMASK(23, 0)
+#define V2_CQ_DB_CONS_IDX_S 0
+#define V2_CQ_DB_CONS_IDX_M GENMASK(23, 0)
-#define V2_CQ_DB_BYTE_4_CMD_S 24
-#define V2_CQ_DB_BYTE_4_CMD_M GENMASK(27, 24)
+#define V2_CQ_DB_NOTIFY_TYPE_S 24
-#define V2_CQ_DB_PARAMETER_CONS_IDX_S 0
-#define V2_CQ_DB_PARAMETER_CONS_IDX_M GENMASK(23, 0)
-
-#define V2_CQ_DB_PARAMETER_CMD_SN_S 25
-#define V2_CQ_DB_PARAMETER_CMD_SN_M GENMASK(26, 25)
-
-#define V2_CQ_DB_PARAMETER_NOTIFY_S 24
+#define V2_CQ_DB_CMD_SN_S 25
+#define V2_CQ_DB_CMD_SN_M GENMASK(26, 25)
struct hns_roce_v2_ud_send_wqe {
__le32 byte_4;
@@ -1359,194 +1376,44 @@ struct hns_roce_cfg_llm_b {
#define CFG_LLM_TAIL_PTR_S 0
#define CFG_LLM_TAIL_PTR_M GENMASK(11, 0)
-struct hns_roce_cfg_global_param {
- __le32 time_cfg_udp_port;
- __le32 rsv[5];
-};
-
-#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S 0
-#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M GENMASK(9, 0)
-
-#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S 16
-#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M GENMASK(31, 16)
-
-struct hns_roce_pf_res_a {
- __le32 rsv;
- __le32 qpc_bt_idx_num;
- __le32 srqc_bt_idx_num;
- __le32 cqc_bt_idx_num;
- __le32 mpt_bt_idx_num;
- __le32 eqc_bt_idx_num;
-};
-
-#define PF_RES_DATA_1_PF_QPC_BT_IDX_S 0
-#define PF_RES_DATA_1_PF_QPC_BT_IDX_M GENMASK(10, 0)
-
-#define PF_RES_DATA_1_PF_QPC_BT_NUM_S 16
-#define PF_RES_DATA_1_PF_QPC_BT_NUM_M GENMASK(27, 16)
-
-#define PF_RES_DATA_2_PF_SRQC_BT_IDX_S 0
-#define PF_RES_DATA_2_PF_SRQC_BT_IDX_M GENMASK(8, 0)
-
-#define PF_RES_DATA_2_PF_SRQC_BT_NUM_S 16
-#define PF_RES_DATA_2_PF_SRQC_BT_NUM_M GENMASK(25, 16)
-
-#define PF_RES_DATA_3_PF_CQC_BT_IDX_S 0
-#define PF_RES_DATA_3_PF_CQC_BT_IDX_M GENMASK(8, 0)
-
-#define PF_RES_DATA_3_PF_CQC_BT_NUM_S 16
-#define PF_RES_DATA_3_PF_CQC_BT_NUM_M GENMASK(25, 16)
-
-#define PF_RES_DATA_4_PF_MPT_BT_IDX_S 0
-#define PF_RES_DATA_4_PF_MPT_BT_IDX_M GENMASK(8, 0)
-
-#define PF_RES_DATA_4_PF_MPT_BT_NUM_S 16
-#define PF_RES_DATA_4_PF_MPT_BT_NUM_M GENMASK(25, 16)
-
-#define PF_RES_DATA_5_PF_EQC_BT_IDX_S 0
-#define PF_RES_DATA_5_PF_EQC_BT_IDX_M GENMASK(8, 0)
-
-#define PF_RES_DATA_5_PF_EQC_BT_NUM_S 16
-#define PF_RES_DATA_5_PF_EQC_BT_NUM_M GENMASK(25, 16)
-
-struct hns_roce_pf_res_b {
- __le32 rsv0;
- __le32 smac_idx_num;
- __le32 sgid_idx_num;
- __le32 qid_idx_sl_num;
- __le32 sccc_bt_idx_num;
- __le32 gmv_idx_num;
-};
-
-#define PF_RES_DATA_1_PF_SMAC_IDX_S 0
-#define PF_RES_DATA_1_PF_SMAC_IDX_M GENMASK(7, 0)
-
-#define PF_RES_DATA_1_PF_SMAC_NUM_S 8
-#define PF_RES_DATA_1_PF_SMAC_NUM_M GENMASK(16, 8)
-
-#define PF_RES_DATA_2_PF_SGID_IDX_S 0
-#define PF_RES_DATA_2_PF_SGID_IDX_M GENMASK(7, 0)
-
-#define PF_RES_DATA_2_PF_SGID_NUM_S 8
-#define PF_RES_DATA_2_PF_SGID_NUM_M GENMASK(16, 8)
-
-#define PF_RES_DATA_3_PF_QID_IDX_S 0
-#define PF_RES_DATA_3_PF_QID_IDX_M GENMASK(9, 0)
-
-#define PF_RES_DATA_3_PF_SL_NUM_S 16
-#define PF_RES_DATA_3_PF_SL_NUM_M GENMASK(26, 16)
-
-#define PF_RES_DATA_4_PF_SCCC_BT_IDX_S 0
-#define PF_RES_DATA_4_PF_SCCC_BT_IDX_M GENMASK(8, 0)
-
-#define PF_RES_DATA_4_PF_SCCC_BT_NUM_S 9
-#define PF_RES_DATA_4_PF_SCCC_BT_NUM_M GENMASK(17, 9)
-
-#define PF_RES_DATA_5_PF_GMV_BT_IDX_S 0
-#define PF_RES_DATA_5_PF_GMV_BT_IDX_M GENMASK(7, 0)
+/* Fields of HNS_ROCE_OPC_CFG_GLOBAL_PARAM */
+#define CFG_GLOBAL_PARAM_1US_CYCLES CMQ_REQ_FIELD_LOC(9, 0)
+#define CFG_GLOBAL_PARAM_UDP_PORT CMQ_REQ_FIELD_LOC(31, 16)
-#define PF_RES_DATA_5_PF_GMV_BT_NUM_S 8
-#define PF_RES_DATA_5_PF_GMV_BT_NUM_M GENMASK(16, 8)
-
-struct hns_roce_pf_timer_res_a {
- __le32 rsv0;
- __le32 qpc_timer_bt_idx_num;
- __le32 cqc_timer_bt_idx_num;
- __le32 rsv[3];
-};
-
-#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_S 0
-#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_M GENMASK(11, 0)
-
-#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S 16
-#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M GENMASK(28, 16)
-
-#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_S 0
-#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_M GENMASK(10, 0)
-
-#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S 16
-#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M GENMASK(27, 16)
-
-struct hns_roce_vf_res_a {
- __le32 vf_id;
- __le32 vf_qpc_bt_idx_num;
- __le32 vf_srqc_bt_idx_num;
- __le32 vf_cqc_bt_idx_num;
- __le32 vf_mpt_bt_idx_num;
- __le32 vf_eqc_bt_idx_num;
-};
-
-#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_S 0
-#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_M GENMASK(10, 0)
-
-#define VF_RES_A_DATA_1_VF_QPC_BT_NUM_S 16
-#define VF_RES_A_DATA_1_VF_QPC_BT_NUM_M GENMASK(27, 16)
-
-#define VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S 0
-#define VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M GENMASK(8, 0)
-
-#define VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S 16
-#define VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M GENMASK(25, 16)
-
-#define VF_RES_A_DATA_3_VF_CQC_BT_IDX_S 0
-#define VF_RES_A_DATA_3_VF_CQC_BT_IDX_M GENMASK(8, 0)
-
-#define VF_RES_A_DATA_3_VF_CQC_BT_NUM_S 16
-#define VF_RES_A_DATA_3_VF_CQC_BT_NUM_M GENMASK(25, 16)
-
-#define VF_RES_A_DATA_4_VF_MPT_BT_IDX_S 0
-#define VF_RES_A_DATA_4_VF_MPT_BT_IDX_M GENMASK(8, 0)
-
-#define VF_RES_A_DATA_4_VF_MPT_BT_NUM_S 16
-#define VF_RES_A_DATA_4_VF_MPT_BT_NUM_M GENMASK(25, 16)
-
-#define VF_RES_A_DATA_5_VF_EQC_IDX_S 0
-#define VF_RES_A_DATA_5_VF_EQC_IDX_M GENMASK(8, 0)
-
-#define VF_RES_A_DATA_5_VF_EQC_NUM_S 16
-#define VF_RES_A_DATA_5_VF_EQC_NUM_M GENMASK(25, 16)
-
-struct hns_roce_vf_res_b {
- __le32 rsv0;
- __le32 vf_smac_idx_num;
- __le32 vf_sgid_idx_num;
- __le32 vf_qid_idx_sl_num;
- __le32 vf_sccc_idx_num;
- __le32 vf_gmv_idx_num;
-};
-
-#define VF_RES_B_DATA_0_VF_ID_S 0
-#define VF_RES_B_DATA_0_VF_ID_M GENMASK(7, 0)
-
-#define VF_RES_B_DATA_1_VF_SMAC_IDX_S 0
-#define VF_RES_B_DATA_1_VF_SMAC_IDX_M GENMASK(7, 0)
-
-#define VF_RES_B_DATA_1_VF_SMAC_NUM_S 8
-#define VF_RES_B_DATA_1_VF_SMAC_NUM_M GENMASK(16, 8)
-
-#define VF_RES_B_DATA_2_VF_SGID_IDX_S 0
-#define VF_RES_B_DATA_2_VF_SGID_IDX_M GENMASK(7, 0)
-
-#define VF_RES_B_DATA_2_VF_SGID_NUM_S 8
-#define VF_RES_B_DATA_2_VF_SGID_NUM_M GENMASK(16, 8)
-
-#define VF_RES_B_DATA_3_VF_QID_IDX_S 0
-#define VF_RES_B_DATA_3_VF_QID_IDX_M GENMASK(9, 0)
-
-#define VF_RES_B_DATA_3_VF_SL_NUM_S 16
-#define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
-
-#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S 0
-#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M GENMASK(8, 0)
-
-#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S 9
-#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M GENMASK(17, 9)
-
-#define VF_RES_B_DATA_5_VF_GMV_BT_IDX_S 0
-#define VF_RES_B_DATA_5_VF_GMV_BT_IDX_M GENMASK(7, 0)
-
-#define VF_RES_B_DATA_5_VF_GMV_BT_NUM_S 16
-#define VF_RES_B_DATA_5_VF_GMV_BT_NUM_M GENMASK(24, 16)
+/*
+ * Fields of HNS_ROCE_OPC_QUERY_PF_RES, HNS_ROCE_OPC_QUERY_VF_RES
+ * and HNS_ROCE_OPC_ALLOC_VF_RES
+ */
+#define FUNC_RES_A_VF_ID CMQ_REQ_FIELD_LOC(7, 0)
+#define FUNC_RES_A_QPC_BT_IDX CMQ_REQ_FIELD_LOC(42, 32)
+#define FUNC_RES_A_QPC_BT_NUM CMQ_REQ_FIELD_LOC(59, 48)
+#define FUNC_RES_A_SRQC_BT_IDX CMQ_REQ_FIELD_LOC(72, 64)
+#define FUNC_RES_A_SRQC_BT_NUM CMQ_REQ_FIELD_LOC(89, 80)
+#define FUNC_RES_A_CQC_BT_IDX CMQ_REQ_FIELD_LOC(104, 96)
+#define FUNC_RES_A_CQC_BT_NUM CMQ_REQ_FIELD_LOC(121, 112)
+#define FUNC_RES_A_MPT_BT_IDX CMQ_REQ_FIELD_LOC(136, 128)
+#define FUNC_RES_A_MPT_BT_NUM CMQ_REQ_FIELD_LOC(153, 144)
+#define FUNC_RES_A_EQC_BT_IDX CMQ_REQ_FIELD_LOC(168, 160)
+#define FUNC_RES_A_EQC_BT_NUM CMQ_REQ_FIELD_LOC(185, 176)
+#define FUNC_RES_B_SMAC_IDX CMQ_REQ_FIELD_LOC(39, 32)
+#define FUNC_RES_B_SMAC_NUM CMQ_REQ_FIELD_LOC(48, 40)
+#define FUNC_RES_B_SGID_IDX CMQ_REQ_FIELD_LOC(71, 64)
+#define FUNC_RES_B_SGID_NUM CMQ_REQ_FIELD_LOC(80, 72)
+#define FUNC_RES_B_QID_IDX CMQ_REQ_FIELD_LOC(105, 96)
+#define FUNC_RES_B_QID_NUM CMQ_REQ_FIELD_LOC(122, 112)
+#define FUNC_RES_V_QID_NUM CMQ_REQ_FIELD_LOC(115, 112)
+
+#define FUNC_RES_B_SCCC_BT_IDX CMQ_REQ_FIELD_LOC(136, 128)
+#define FUNC_RES_B_SCCC_BT_NUM CMQ_REQ_FIELD_LOC(145, 137)
+#define FUNC_RES_B_GMV_BT_IDX CMQ_REQ_FIELD_LOC(167, 160)
+#define FUNC_RES_B_GMV_BT_NUM CMQ_REQ_FIELD_LOC(176, 168)
+#define FUNC_RES_V_GMV_BT_NUM CMQ_REQ_FIELD_LOC(184, 176)
+
+/* Fields of HNS_ROCE_OPC_QUERY_PF_TIMER_RES */
+#define PF_TIMER_RES_QPC_ITEM_IDX CMQ_REQ_FIELD_LOC(43, 32)
+#define PF_TIMER_RES_QPC_ITEM_NUM CMQ_REQ_FIELD_LOC(60, 48)
+#define PF_TIMER_RES_CQC_ITEM_IDX CMQ_REQ_FIELD_LOC(74, 64)
+#define PF_TIMER_RES_CQC_ITEM_NUM CMQ_REQ_FIELD_LOC(91, 80)
struct hns_roce_vf_switch {
__le32 rocee_sel;
@@ -1578,59 +1445,43 @@ struct hns_roce_mbox_status {
__le32 rsv[5];
};
-struct hns_roce_cfg_bt_attr {
- __le32 vf_qpc_cfg;
- __le32 vf_srqc_cfg;
- __le32 vf_cqc_cfg;
- __le32 vf_mpt_cfg;
- __le32 vf_sccc_cfg;
- __le32 rsv;
+#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
+
+#define MB_ST_HW_RUN_M BIT(31)
+#define MB_ST_COMPLETE_M GENMASK(7, 0)
+
+#define MB_ST_COMPLETE_SUCC 1
+
+/* Fields of HNS_ROCE_OPC_CFG_BT_ATTR */
+#define CFG_BT_ATTR_QPC_BA_PGSZ CMQ_REQ_FIELD_LOC(3, 0)
+#define CFG_BT_ATTR_QPC_BUF_PGSZ CMQ_REQ_FIELD_LOC(7, 4)
+#define CFG_BT_ATTR_QPC_HOPNUM CMQ_REQ_FIELD_LOC(9, 8)
+#define CFG_BT_ATTR_SRQC_BA_PGSZ CMQ_REQ_FIELD_LOC(35, 32)
+#define CFG_BT_ATTR_SRQC_BUF_PGSZ CMQ_REQ_FIELD_LOC(39, 36)
+#define CFG_BT_ATTR_SRQC_HOPNUM CMQ_REQ_FIELD_LOC(41, 40)
+#define CFG_BT_ATTR_CQC_BA_PGSZ CMQ_REQ_FIELD_LOC(67, 64)
+#define CFG_BT_ATTR_CQC_BUF_PGSZ CMQ_REQ_FIELD_LOC(71, 68)
+#define CFG_BT_ATTR_CQC_HOPNUM CMQ_REQ_FIELD_LOC(73, 72)
+#define CFG_BT_ATTR_MPT_BA_PGSZ CMQ_REQ_FIELD_LOC(99, 96)
+#define CFG_BT_ATTR_MPT_BUF_PGSZ CMQ_REQ_FIELD_LOC(103, 100)
+#define CFG_BT_ATTR_MPT_HOPNUM CMQ_REQ_FIELD_LOC(105, 104)
+#define CFG_BT_ATTR_SCCC_BA_PGSZ CMQ_REQ_FIELD_LOC(131, 128)
+#define CFG_BT_ATTR_SCCC_BUF_PGSZ CMQ_REQ_FIELD_LOC(135, 132)
+#define CFG_BT_ATTR_SCCC_HOPNUM CMQ_REQ_FIELD_LOC(137, 136)
+
+/* Fields of HNS_ROCE_OPC_CFG_ENTRY_SIZE */
+#define CFG_HEM_ENTRY_SIZE_TYPE CMQ_REQ_FIELD_LOC(31, 0)
+enum {
+ HNS_ROCE_CFG_QPC_SIZE = BIT(0),
+ HNS_ROCE_CFG_SCCC_SIZE = BIT(1),
};
-#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0
-#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M GENMASK(3, 0)
-
-#define CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S 4
-#define CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M GENMASK(7, 4)
-
-#define CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S 8
-#define CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M GENMASK(9, 8)
-
-#define CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S 0
-#define CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M GENMASK(3, 0)
-
-#define CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S 4
-#define CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M GENMASK(7, 4)
-
-#define CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S 8
-#define CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M GENMASK(9, 8)
-
-#define CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S 0
-#define CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M GENMASK(3, 0)
-
-#define CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S 4
-#define CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M GENMASK(7, 4)
-
-#define CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S 8
-#define CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M GENMASK(9, 8)
-
-#define CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S 0
-#define CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M GENMASK(3, 0)
-
-#define CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S 4
-#define CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M GENMASK(7, 4)
-
-#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8
-#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
-
-#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S 0
-#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M GENMASK(3, 0)
-
-#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S 4
-#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M GENMASK(7, 4)
+#define CFG_HEM_ENTRY_SIZE_VALUE CMQ_REQ_FIELD_LOC(191, 160)
-#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S 8
-#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M GENMASK(9, 8)
+/* Fields of HNS_ROCE_OPC_CFG_GMV_BT */
+#define CFG_GMV_BT_BA_L CMQ_REQ_FIELD_LOC(31, 0)
+#define CFG_GMV_BT_BA_H CMQ_REQ_FIELD_LOC(51, 32)
+#define CFG_GMV_BT_IDX CMQ_REQ_FIELD_LOC(95, 64)
struct hns_roce_cfg_sgid_tb {
__le32 table_idx_rsv;
@@ -1641,17 +1492,6 @@ struct hns_roce_cfg_sgid_tb {
__le32 vf_sgid_type_rsv;
};
-enum {
- HNS_ROCE_CFG_QPC_SIZE = BIT(0),
- HNS_ROCE_CFG_SCCC_SIZE = BIT(1),
-};
-
-struct hns_roce_cfg_entry_size {
- __le32 type;
- __le32 rsv[4];
- __le32 size;
-};
-
#define CFG_SGID_TB_TABLE_IDX_S 0
#define CFG_SGID_TB_TABLE_IDX_M GENMASK(7, 0)
@@ -1670,16 +1510,6 @@ struct hns_roce_cfg_smac_tb {
#define CFG_SMAC_TB_VF_SMAC_H_S 0
#define CFG_SMAC_TB_VF_SMAC_H_M GENMASK(15, 0)
-struct hns_roce_cfg_gmv_bt {
- __le32 gmv_ba_l;
- __le32 gmv_ba_h;
- __le32 gmv_bt_idx;
- __le32 rsv[3];
-};
-
-#define CFG_GMV_BA_H_S 0
-#define CFG_GMV_BA_H_M GENMASK(19, 0)
-
struct hns_roce_cfg_gmv_tb_a {
__le32 vf_sgid_l;
__le32 vf_sgid_ml;
@@ -1805,6 +1635,14 @@ struct hns_roce_query_pf_caps_d {
#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S 24
#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M GENMASK(25, 24)
+#define V2_QUERY_PF_CAPS_D_CONG_TYPE_S 26
+#define V2_QUERY_PF_CAPS_D_CONG_TYPE_M GENMASK(29, 26)
+
+struct hns_roce_congestion_algorithm {
+ u8 alg_sel;
+ u8 alg_sub_sel;
+ u8 dip_vld;
+};
#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S 0
#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M GENMASK(21, 0)
@@ -1859,18 +1697,27 @@ struct hns_roce_query_pf_caps_e {
#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_S 0
#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_M GENMASK(19, 0)
+struct hns_roce_cmq_req {
+ __le32 data[6];
+};
+
+#define CMQ_REQ_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cmq_req, h, l)
+
struct hns_roce_cmq_desc {
__le16 opcode;
__le16 flag;
__le16 retval;
__le16 rsv;
- __le32 data[6];
-};
-
-#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
+ union {
+ __le32 data[6];
+ struct {
+ __le32 own_func_num;
+ __le32 own_mac_id;
+ __le32 rsv[4];
+ } func_info;
+ };
-#define HNS_ROCE_HW_RUN_BIT_SHIFT 31
-#define HNS_ROCE_HW_MB_STATUS_MASK 0xFF
+};
struct hns_roce_v2_cmq_ring {
dma_addr_t desc_dma_addr;
@@ -1932,6 +1779,12 @@ struct hns_roce_eq_context {
__le32 rsv[5];
};
+struct hns_roce_dip {
+ u8 dgid[GID_LEN_V2];
+ u8 dip_idx;
+ struct list_head node; /* all dips are on a list */
+};
+
#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
#define HNS_ROCE_AEQ_DEFAULT_INTERVAL 0x0
#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0
@@ -1966,8 +1819,7 @@ struct hns_roce_eq_context {
#define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000
#define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0
-#define HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S 1
-#define HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S 2
+#define HNS_ROCE_V2_VF_INT_ST_RAS_INT_S 1
#define HNS_ROCE_EQ_DB_CMD_AEQ 0x0
#define HNS_ROCE_EQ_DB_CMD_AEQ_ARMED 0x1
@@ -1982,96 +1834,38 @@ struct hns_roce_eq_context {
#define HNS_ROCE_INT_NAME_LEN 32
#define HNS_ROCE_V2_EQN_M GENMASK(23, 0)
-#define HNS_ROCE_V2_CONS_IDX_M GENMASK(23, 0)
-
#define HNS_ROCE_V2_VF_ABN_INT_EN_S 0
#define HNS_ROCE_V2_VF_ABN_INT_EN_M GENMASK(0, 0)
#define HNS_ROCE_V2_VF_ABN_INT_ST_M GENMASK(2, 0)
#define HNS_ROCE_V2_VF_ABN_INT_CFG_M GENMASK(2, 0)
#define HNS_ROCE_V2_VF_EVENT_INT_EN_M GENMASK(0, 0)
-/* WORD0 */
-#define HNS_ROCE_EQC_EQ_ST_S 0
-#define HNS_ROCE_EQC_EQ_ST_M GENMASK(1, 0)
-
-#define HNS_ROCE_EQC_HOP_NUM_S 2
-#define HNS_ROCE_EQC_HOP_NUM_M GENMASK(3, 2)
-
-#define HNS_ROCE_EQC_OVER_IGNORE_S 4
-#define HNS_ROCE_EQC_OVER_IGNORE_M GENMASK(4, 4)
-
-#define HNS_ROCE_EQC_COALESCE_S 5
-#define HNS_ROCE_EQC_COALESCE_M GENMASK(5, 5)
-
-#define HNS_ROCE_EQC_ARM_ST_S 6
-#define HNS_ROCE_EQC_ARM_ST_M GENMASK(7, 6)
-
-#define HNS_ROCE_EQC_EQN_S 8
-#define HNS_ROCE_EQC_EQN_M GENMASK(15, 8)
-
-#define HNS_ROCE_EQC_EQE_CNT_S 16
-#define HNS_ROCE_EQC_EQE_CNT_M GENMASK(31, 16)
-
-/* WORD1 */
-#define HNS_ROCE_EQC_BA_PG_SZ_S 0
-#define HNS_ROCE_EQC_BA_PG_SZ_M GENMASK(3, 0)
-
-#define HNS_ROCE_EQC_BUF_PG_SZ_S 4
-#define HNS_ROCE_EQC_BUF_PG_SZ_M GENMASK(7, 4)
-
-#define HNS_ROCE_EQC_PROD_INDX_S 8
-#define HNS_ROCE_EQC_PROD_INDX_M GENMASK(31, 8)
-
-/* WORD2 */
-#define HNS_ROCE_EQC_MAX_CNT_S 0
-#define HNS_ROCE_EQC_MAX_CNT_M GENMASK(15, 0)
-
-#define HNS_ROCE_EQC_PERIOD_S 16
-#define HNS_ROCE_EQC_PERIOD_M GENMASK(31, 16)
-
-/* WORD3 */
-#define HNS_ROCE_EQC_REPORT_TIMER_S 0
-#define HNS_ROCE_EQC_REPORT_TIMER_M GENMASK(31, 0)
-
-/* WORD4 */
-#define HNS_ROCE_EQC_EQE_BA_L_S 0
-#define HNS_ROCE_EQC_EQE_BA_L_M GENMASK(31, 0)
-
-/* WORD5 */
-#define HNS_ROCE_EQC_EQE_BA_H_S 0
-#define HNS_ROCE_EQC_EQE_BA_H_M GENMASK(28, 0)
-
-/* WORD6 */
-#define HNS_ROCE_EQC_SHIFT_S 0
-#define HNS_ROCE_EQC_SHIFT_M GENMASK(7, 0)
-
-#define HNS_ROCE_EQC_MSI_INDX_S 8
-#define HNS_ROCE_EQC_MSI_INDX_M GENMASK(15, 8)
-
-#define HNS_ROCE_EQC_CUR_EQE_BA_L_S 16
-#define HNS_ROCE_EQC_CUR_EQE_BA_L_M GENMASK(31, 16)
-
-/* WORD7 */
-#define HNS_ROCE_EQC_CUR_EQE_BA_M_S 0
-#define HNS_ROCE_EQC_CUR_EQE_BA_M_M GENMASK(31, 0)
-
-/* WORD8 */
-#define HNS_ROCE_EQC_CUR_EQE_BA_H_S 0
-#define HNS_ROCE_EQC_CUR_EQE_BA_H_M GENMASK(3, 0)
-
-#define HNS_ROCE_EQC_CONS_INDX_S 8
-#define HNS_ROCE_EQC_CONS_INDX_M GENMASK(31, 8)
-
-/* WORD9 */
-#define HNS_ROCE_EQC_NXT_EQE_BA_L_S 0
-#define HNS_ROCE_EQC_NXT_EQE_BA_L_M GENMASK(31, 0)
-
-/* WORD10 */
-#define HNS_ROCE_EQC_NXT_EQE_BA_H_S 0
-#define HNS_ROCE_EQC_NXT_EQE_BA_H_M GENMASK(19, 0)
-
-#define HNS_ROCE_EQC_EQE_SIZE_S 20
-#define HNS_ROCE_EQC_EQE_SIZE_M GENMASK(21, 20)
+#define EQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_eq_context, h, l)
+
+#define EQC_EQ_ST EQC_FIELD_LOC(1, 0)
+#define EQC_EQE_HOP_NUM EQC_FIELD_LOC(3, 2)
+#define EQC_OVER_IGNORE EQC_FIELD_LOC(4, 4)
+#define EQC_COALESCE EQC_FIELD_LOC(5, 5)
+#define EQC_ARM_ST EQC_FIELD_LOC(7, 6)
+#define EQC_EQN EQC_FIELD_LOC(15, 8)
+#define EQC_EQE_CNT EQC_FIELD_LOC(31, 16)
+#define EQC_EQE_BA_PG_SZ EQC_FIELD_LOC(35, 32)
+#define EQC_EQE_BUF_PG_SZ EQC_FIELD_LOC(39, 36)
+#define EQC_EQ_PROD_INDX EQC_FIELD_LOC(63, 40)
+#define EQC_EQ_MAX_CNT EQC_FIELD_LOC(79, 64)
+#define EQC_EQ_PERIOD EQC_FIELD_LOC(95, 80)
+#define EQC_EQE_REPORT_TIMER EQC_FIELD_LOC(127, 96)
+#define EQC_EQE_BA_L EQC_FIELD_LOC(159, 128)
+#define EQC_EQE_BA_H EQC_FIELD_LOC(188, 160)
+#define EQC_SHIFT EQC_FIELD_LOC(199, 192)
+#define EQC_MSI_INDX EQC_FIELD_LOC(207, 200)
+#define EQC_CUR_EQE_BA_L EQC_FIELD_LOC(223, 208)
+#define EQC_CUR_EQE_BA_M EQC_FIELD_LOC(255, 224)
+#define EQC_CUR_EQE_BA_H EQC_FIELD_LOC(259, 256)
+#define EQC_EQ_CONS_INDX EQC_FIELD_LOC(287, 264)
+#define EQC_NEX_EQE_BA_L EQC_FIELD_LOC(319, 288)
+#define EQC_NEX_EQE_BA_H EQC_FIELD_LOC(339, 320)
+#define EQC_EQE_SIZE EQC_FIELD_LOC(341, 340)
#define HNS_ROCE_V2_CEQE_COMP_CQN_S 0
#define HNS_ROCE_V2_CEQE_COMP_CQN_M GENMASK(23, 0)
@@ -2082,14 +1876,14 @@ struct hns_roce_eq_context {
#define HNS_ROCE_V2_AEQE_SUB_TYPE_S 8
#define HNS_ROCE_V2_AEQE_SUB_TYPE_M GENMASK(15, 8)
-#define HNS_ROCE_V2_EQ_DB_CMD_S 16
-#define HNS_ROCE_V2_EQ_DB_CMD_M GENMASK(17, 16)
+#define V2_EQ_DB_TAG_S 0
+#define V2_EQ_DB_TAG_M GENMASK(7, 0)
-#define HNS_ROCE_V2_EQ_DB_TAG_S 0
-#define HNS_ROCE_V2_EQ_DB_TAG_M GENMASK(7, 0)
+#define V2_EQ_DB_CMD_S 16
+#define V2_EQ_DB_CMD_M GENMASK(17, 16)
-#define HNS_ROCE_V2_EQ_DB_PARA_S 0
-#define HNS_ROCE_V2_EQ_DB_PARA_M GENMASK(23, 0)
+#define V2_EQ_DB_CONS_IDX_S 0
+#define V2_EQ_DB_CONS_IDX_M GENMASK(23, 0)
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c9c0836394a2..6c6e82b11d8b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -42,7 +42,7 @@
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
-static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
+static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port, u8 *addr)
{
u8 phy_port;
u32 i;
@@ -63,7 +63,7 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
- u8 port = attr->port_num - 1;
+ u32 port = attr->port_num - 1;
int ret;
if (port >= hr_dev->caps.num_ports)
@@ -77,7 +77,7 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
- u8 port = attr->port_num - 1;
+ u32 port = attr->port_num - 1;
int ret;
if (port >= hr_dev->caps.num_ports)
@@ -88,7 +88,7 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
return ret;
}
-static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
+static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
unsigned long event)
{
struct device *dev = hr_dev->dev;
@@ -128,7 +128,7 @@ static int hns_roce_netdev_event(struct notifier_block *self,
struct hns_roce_ib_iboe *iboe = NULL;
struct hns_roce_dev *hr_dev = NULL;
int ret;
- u8 port;
+ u32 port;
hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
iboe = &hr_dev->iboe;
@@ -207,10 +207,13 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ props->device_cap_flags |= IB_DEVICE_XRC;
+
return 0;
}
-static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
+static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
struct ib_port_attr *props)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
@@ -218,7 +221,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
struct net_device *net_dev;
unsigned long flags;
enum ib_mtu mtu;
- u8 port;
+ u32 port;
port = port_num - 1;
@@ -258,12 +261,12 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
}
static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
-static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
+static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index,
u16 *pkey)
{
*pkey = PKEY_ID;
@@ -300,12 +303,14 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
return -EAGAIN;
resp.qp_tab_size = hr_dev->caps.num_qps;
+ resp.srq_tab_size = hr_dev->caps.num_srqs;
ret = hns_roce_uar_alloc(hr_dev, &context->uar);
if (ret)
goto error_fail_uar_alloc;
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+ hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
INIT_LIST_HEAD(&context->page_list);
mutex_init(&context->page_mutex);
}
@@ -365,7 +370,7 @@ static int hns_roce_mmap(struct ib_ucontext *context,
}
}
-static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
+static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -390,6 +395,19 @@ static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
}
+static void hns_roce_get_fw_ver(struct ib_device *device, char *str)
+{
+ u64 fw_ver = to_hr_dev(device)->caps.fw_ver;
+ unsigned int major, minor, sub_minor;
+
+ major = upper_32_bits(fw_ver);
+ minor = high_16_bits(lower_32_bits(fw_ver));
+ sub_minor = low_16_bits(fw_ver);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%04u", major, minor,
+ sub_minor);
+}
+
static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
{
struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
@@ -405,6 +423,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.uverbs_abi_ver = 1,
.uverbs_no_driver_id_binding = 1,
+ .get_dev_fw_str = hns_roce_get_fw_ver,
.add_gid = hns_roce_add_gid,
.alloc_pd = hns_roce_alloc_pd,
.alloc_ucontext = hns_roce_alloc_ucontext,
@@ -461,6 +480,13 @@ static const struct ib_device_ops hns_roce_dev_srq_ops = {
INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
};
+static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
+ .alloc_xrcd = hns_roce_alloc_xrcd,
+ .dealloc_xrcd = hns_roce_dealloc_xrcd,
+
+ INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd),
+};
+
static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
{
int ret;
@@ -484,20 +510,20 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR)
ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
- /* MW */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW)
ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
- /* FRMR */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
- /* SRQ */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops);
+
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
for (i = 0; i < hr_dev->caps.num_ports; i++) {
@@ -704,7 +730,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
spin_lock_init(&hr_dev->sm_lock);
spin_lock_init(&hr_dev->bt_cmd_lock);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
+ hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
INIT_LIST_HEAD(&hr_dev->pgdir_list);
mutex_init(&hr_dev->pgdir_mutex);
}
@@ -727,10 +754,19 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
goto err_uar_alloc_free;
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
+ ret = hns_roce_init_xrcd_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to init xrcd table, ret = %d.\n",
+ ret);
+ goto err_pd_table_free;
+ }
+ }
+
ret = hns_roce_init_mr_table(hr_dev);
if (ret) {
dev_err(dev, "Failed to init memory region table.\n");
- goto err_pd_table_free;
+ goto err_xrcd_table_free;
}
hns_roce_init_cq_table(hr_dev);
@@ -759,6 +795,10 @@ err_cq_table_free:
hns_roce_cleanup_cq_table(hr_dev);
hns_roce_cleanup_mr_table(hr_dev);
+err_xrcd_table_free:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ hns_roce_cleanup_xrcd_table(hr_dev);
+
err_pd_table_free:
hns_roce_cleanup_pd_table(hr_dev);
@@ -886,6 +926,8 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
INIT_LIST_HEAD(&hr_dev->qp_list);
spin_lock_init(&hr_dev->qp_list_lock);
+ INIT_LIST_HEAD(&hr_dev->dip_list);
+ spin_lock_init(&hr_dev->dip_list_lock);
ret = hns_roce_register_device(hr_dev);
if (ret)
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index cca818d05a8f..a5813bf567b2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -137,3 +137,62 @@ void hns_roce_cleanup_uar_table(struct hns_roce_dev *hr_dev)
{
hns_roce_bitmap_cleanup(&hr_dev->uar_table.bitmap);
}
+
+static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev, u32 *xrcdn)
+{
+ unsigned long obj;
+ int ret;
+
+ ret = hns_roce_bitmap_alloc(&hr_dev->xrcd_bitmap, &obj);
+ if (ret)
+ return ret;
+
+ *xrcdn = obj;
+
+ return 0;
+}
+
+static void hns_roce_xrcd_free(struct hns_roce_dev *hr_dev,
+ u32 xrcdn)
+{
+ hns_roce_bitmap_free(&hr_dev->xrcd_bitmap, xrcdn, BITMAP_NO_RR);
+}
+
+int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev)
+{
+ return hns_roce_bitmap_init(&hr_dev->xrcd_bitmap,
+ hr_dev->caps.num_xrcds,
+ hr_dev->caps.num_xrcds - 1,
+ hr_dev->caps.reserved_xrcds, 0);
+}
+
+void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_bitmap_cleanup(&hr_dev->xrcd_bitmap);
+}
+
+int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_xrcd->device);
+ struct hns_roce_xrcd *xrcd = to_hr_xrcd(ib_xrcd);
+ int ret;
+
+ if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
+ return -EINVAL;
+
+ ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to alloc xrcdn, ret = %d.\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
+{
+ hns_roce_xrcd_free(to_hr_dev(ib_xrcd->device),
+ to_hr_xrcd(ib_xrcd)->xrcdn);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 004aca9086ab..230a909ba9bc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -98,7 +98,9 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
(event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
- event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) {
+ event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
+ event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) {
qp->state = IB_QPS_ERR;
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
init_flush_work(hr_dev, qp);
@@ -142,6 +144,8 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
event.event = IB_EVENT_QP_REQ_ERR;
break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
+ case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
event.event = IB_EVENT_QP_ACCESS_ERR;
break;
default:
@@ -366,8 +370,13 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
unsigned long flags;
list_del(&hr_qp->node);
- list_del(&hr_qp->sq_node);
- list_del(&hr_qp->rq_node);
+
+ if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
+ list_del(&hr_qp->sq_node);
+
+ if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI &&
+ hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
+ list_del(&hr_qp->rq_node);
xa_lock_irqsave(xa, flags);
__xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
@@ -478,7 +487,9 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
hr_qp->rq.max_gs);
hr_qp->rq.wqe_cnt = cnt;
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
+ hr_qp->ibqp.qp_type != IB_QPT_UD &&
+ hr_qp->ibqp.qp_type != IB_QPT_GSI)
hr_qp->rq_inl_buf.wqe_cnt = cnt;
else
hr_qp->rq_inl_buf.wqe_cnt = 0;
@@ -776,7 +787,7 @@ static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
struct hns_roce_ib_create_qp_resp *resp,
struct hns_roce_ib_create_qp *ucmd)
{
- return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
hns_roce_qp_has_sq(init_attr) &&
udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
@@ -787,7 +798,7 @@ static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
struct ib_udata *udata,
struct hns_roce_ib_create_qp_resp *resp)
{
- return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
hns_roce_qp_has_rq(init_attr));
}
@@ -795,7 +806,7 @@ static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
struct ib_qp_init_attr *init_attr)
{
- return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
hns_roce_qp_has_rq(init_attr));
}
@@ -840,11 +851,16 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
}
} else {
- /* QP doorbell register address */
- hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
- hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ hr_qp->sq.db_reg = hr_dev->mem_base +
+ HNS_ROCE_DWQE_SIZE * hr_qp->qpn;
+ else
+ hr_qp->sq.db_reg =
+ hr_dev->reg_base + hr_dev->sdb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
+
+ hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
if (kernel_qp_has_rdb(hr_dev, init_attr)) {
ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
@@ -1011,36 +1027,36 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
}
- ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
- if (ret) {
- ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
- ret);
- goto err_wrid;
- }
-
ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
if (ret) {
ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
- goto err_db;
+ goto err_buf;
}
ret = alloc_qpn(hr_dev, hr_qp);
if (ret) {
ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
- goto err_buf;
+ goto err_qpn;
+ }
+
+ ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
+ ret);
+ goto err_db;
}
ret = alloc_qpc(hr_dev, hr_qp);
if (ret) {
ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
ret);
- goto err_qpn;
+ goto err_qpc;
}
ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
if (ret) {
ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
- goto err_qpc;
+ goto err_store;
}
if (udata) {
@@ -1055,7 +1071,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
if (ret)
- goto err_store;
+ goto err_flow_ctrl;
}
hr_qp->ibqp.qp_num = hr_qp->qpn;
@@ -1065,17 +1081,17 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
return 0;
-err_store:
+err_flow_ctrl:
hns_roce_qp_remove(hr_dev, hr_qp);
-err_qpc:
+err_store:
free_qpc(hr_dev, hr_qp);
-err_qpn:
+err_qpc:
+ free_qp_db(hr_dev, hr_qp, udata);
+err_db:
free_qpn(hr_dev, hr_qp);
-err_buf:
+err_qpn:
free_qp_buf(hr_dev, hr_qp);
-err_db:
- free_qp_db(hr_dev, hr_qp, udata);
-err_wrid:
+err_buf:
free_kernel_wrid(hr_qp);
return ret;
}
@@ -1100,11 +1116,16 @@ static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
bool is_user)
{
switch (type) {
+ case IB_QPT_XRC_INI:
+ case IB_QPT_XRC_TGT:
+ if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
+ goto out;
+ break;
case IB_QPT_UD:
if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 &&
is_user)
goto out;
- fallthrough;
+ break;
case IB_QPT_RC:
case IB_QPT_GSI:
break;
@@ -1124,8 +1145,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
- struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct ib_device *ibdev = pd ? pd->device : init_attr->xrcd->device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
struct hns_roce_qp *hr_qp;
int ret;
@@ -1137,6 +1158,15 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
if (!hr_qp)
return ERR_PTR(-ENOMEM);
+ if (init_attr->qp_type == IB_QPT_XRC_INI)
+ init_attr->recv_cq = NULL;
+
+ if (init_attr->qp_type == IB_QPT_XRC_TGT) {
+ hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
+ init_attr->recv_cq = NULL;
+ init_attr->send_cq = NULL;
+ }
+
if (init_attr->qp_type == IB_QPT_GSI) {
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
@@ -1156,20 +1186,18 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
int to_hr_qp_type(int qp_type)
{
- int transport_type;
-
- if (qp_type == IB_QPT_RC)
- transport_type = SERV_TYPE_RC;
- else if (qp_type == IB_QPT_UC)
- transport_type = SERV_TYPE_UC;
- else if (qp_type == IB_QPT_UD)
- transport_type = SERV_TYPE_UD;
- else if (qp_type == IB_QPT_GSI)
- transport_type = SERV_TYPE_UD;
- else
- transport_type = -1;
-
- return transport_type;
+ switch (qp_type) {
+ case IB_QPT_RC:
+ return SERV_TYPE_RC;
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ return SERV_TYPE_UD;
+ case IB_QPT_XRC_INI:
+ case IB_QPT_XRC_TGT:
+ return SERV_TYPE_XRC;
+ default:
+ return -1;
+ }
}
static int check_mtu_validate(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index d5a6de0e7095..546d182c577a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -314,6 +314,9 @@ static void set_srq_ext_param(struct hns_roce_srq *srq,
{
srq->cqn = ib_srq_has_cq(init_attr->srq_type) ?
to_hr_cq(init_attr->ext.cq)->cqn : 0;
+
+ srq->xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
+ to_hr_xrcd(init_attr->ext.xrc.xrcd)->xrcdn : 0;
}
static int set_srq_param(struct hns_roce_srq *srq,
@@ -412,7 +415,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
}
}
- srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
+ srq->db_reg = hr_dev->reg_base + SRQ_DB_REG;
srq->event = hns_roce_ib_srq_event;
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 6a79502c8b53..be4094ac4fac 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -504,15 +504,6 @@ static inline void i40iw_free_resource(struct i40iw_device *iwdev,
spin_unlock_irqrestore(&iwdev->resource_lock, flags);
}
-/**
- * to_iwhdl - Get the handler from the device pointer
- * @iwdev: device pointer
- **/
-static inline struct i40iw_handler *to_iwhdl(struct i40iw_device *iw_dev)
-{
- return container_of(iw_dev, struct i40iw_handler, device);
-}
-
struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev);
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index ac65c8237b2e..2450b7dd51f6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -905,7 +905,7 @@ static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node,
}
/**
- * recv_mpa - process an IETF MPA frame
+ * i40iw_parse_mpa - process an IETF MPA frame
* @cm_node: connection's node
* @buffer: Data pointer
* @type: to return accept or reject
@@ -4360,7 +4360,7 @@ void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
}
/**
- * i40iw_ifdown_notify - process an ifdown on an interface
+ * i40iw_if_notify - process an ifdown on an interface
* @iwdev: device pointer
* @netdev: network interface device structure
* @ipaddr: Pointer to IPv4 or IPv6 address
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.c b/drivers/infiniband/hw/i40iw/i40iw_hmc.c
index 8bd72af9e099..b44bfc1d239b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hmc.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hmc.c
@@ -285,7 +285,7 @@ static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *d
}
/**
- * i40iw_create_iw_hmc_obj - allocate backing store for hmc objects
+ * i40iw_sc_create_hmc_obj - allocate backing store for hmc objects
* @dev: pointer to the device structure
* @info: pointer to i40iw_hmc_iw_create_obj_info struct
*
@@ -434,7 +434,7 @@ static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
}
/**
- * i40iw_del_iw_hmc_obj - remove pe hmc objects
+ * i40iw_sc_del_hmc_obj - remove pe hmc objects
* @dev: pointer to the device structure
* @info: pointer to i40iw_hmc_del_obj_info struct
* @reset: true if called before reset
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index ab4cb11950dc..b496f30ce066 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -78,7 +78,7 @@ static struct i40e_client i40iw_client;
static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
static LIST_HEAD(i40iw_handlers);
-static spinlock_t i40iw_handler_lock;
+static DEFINE_SPINLOCK(i40iw_handler_lock);
static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
u32 vf_id, u8 *msg, u16 len);
@@ -251,7 +251,7 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
}
/**
- * i40iw_disable_irqs - disable device interrupts
+ * i40iw_disable_irq - disable device interrupts
* @dev: hardware control device structure
* @msix_vec: msix vector to disable irq
* @dev_id: parameter to pass to free_irq (used during irq setup)
@@ -2043,7 +2043,6 @@ static int __init i40iw_init_module(void)
i40iw_client.ops = &i40e_ops;
memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
i40iw_client.type = I40E_CLIENT_IWARP;
- spin_lock_init(&i40iw_handler_lock);
ret = i40e_register_client(&i40iw_client);
i40iw_register_notifiers();
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
index d474aad62a81..d938ccb195b1 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
@@ -51,17 +51,6 @@ static inline void set_64bit_val(u64 *wqe_words, u32 byte_index, u64 value)
}
/**
- * set_32bit_val - set 32 value to hw wqe
- * @wqe_words: wqe addr to write
- * @byte_index: index in wqe
- * @value: value to write
- **/
-static inline void set_32bit_val(u32 *wqe_words, u32 byte_index, u32 value)
-{
- wqe_words[byte_index >> 2] = value;
-}
-
-/**
* get_64bit_val - read 64 bit value from wqe
* @wqe_words: wqe addr
* @byte_index: index to read from
@@ -72,17 +61,6 @@ static inline void get_64bit_val(u64 *wqe_words, u32 byte_index, u64 *value)
*value = wqe_words[byte_index >> 3];
}
-/**
- * get_32bit_val - read 32 bit value from wqe
- * @wqe_words: wqe addr
- * @byte_index: index to reaad from
- * @value: return 32 bit value
- **/
-static inline void get_32bit_val(u32 *wqe_words, u32 byte_index, u32 *value)
-{
- *value = wqe_words[byte_index >> 2];
-}
-
struct i40iw_dma_mem {
void *va;
dma_addr_t pa;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
index 53e5cd1a2bd6..146a4148219b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
@@ -393,12 +393,9 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
pble_rsrc->unallocated_pble -= (chunk->size >> 3);
- list_add(&chunk->list, &pble_rsrc->pinfo.clist);
sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
- if (sd_entry->valid)
- return 0;
- if (dev->is_pf) {
+ if (dev->is_pf && !sd_entry->valid) {
ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
sd_reg_val, idx->sd_idx,
sd_entry->entry_type, true);
@@ -409,6 +406,7 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
}
sd_entry->valid = true;
+ list_add(&chunk->list, &pble_rsrc->pinfo.clist);
return 0;
error:
kfree(chunk);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index d1c8cc0a6236..88fb68e866ba 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -1000,7 +1000,7 @@ static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx)
}
/**
- * i40iw_ieq_get_fpdu - given length return fpdu length
+ * i40iw_ieq_get_fpdu_length - given length return fpdu length
* @length: length if fpdu
*/
static u16 i40iw_ieq_get_fpdu_length(u16 length)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 76f052b12c14..9ff825f7860b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -890,7 +890,7 @@ void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
}
/**
- * i40iw_terminate_imeout - timeout happened
+ * i40iw_terminate_timeout - timeout happened
* @t: points to iwarp qp
*/
static void i40iw_terminate_timeout(struct timer_list *t)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index f18d146a6079..b876d722fcc8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -94,7 +94,7 @@ static int i40iw_query_device(struct ib_device *ibdev,
* @props: returning device attributes
*/
static int i40iw_query_port(struct ib_device *ibdev,
- u8 port,
+ u32 port,
struct ib_port_attr *props)
{
props->lid = 1;
@@ -647,7 +647,7 @@ error:
}
/**
- * i40iw_query - query qp attributes
+ * i40iw_query_qp - query qp attributes
* @ibqp: qp pointer
* @attr: attributes pointer
* @attr_mask: Not used
@@ -1846,7 +1846,7 @@ static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
}
/**
- * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
+ * i40iw_del_memlist - Deleting pbl list entries for CQ/QP
* @iwmr: iwmr for IB's user page addresses
* @ucontext: ptr to user context
*/
@@ -2347,7 +2347,7 @@ static int i40iw_req_notify_cq(struct ib_cq *ibcq,
* @port_num: port number
* @immutable: immutable data for the port return
*/
-static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int i40iw_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -2446,7 +2446,7 @@ static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str)
* @port_num: port number
*/
static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
struct i40iw_device *iwdev = to_iwdev(ibdev);
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -2477,7 +2477,7 @@ static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
*/
static int i40iw_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port_num, int index)
+ u32 port_num, int index)
{
struct i40iw_device *iwdev = to_iwdev(ibdev);
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -2504,7 +2504,7 @@ static int i40iw_get_hw_stats(struct ib_device *ibdev,
* @gid: Global ID
*/
static int i40iw_query_gid(struct ib_device *ibdev,
- u8 port,
+ u32 port,
int index,
union ib_gid *gid)
{
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
index aca9061688ae..e34a1522132c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
@@ -333,7 +333,7 @@ static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback
}
/**
- * pf_add_hmc_obj - Callback for Add HMC Object
+ * pf_add_hmc_obj_callback - Callback for Add HMC Object
* @work_vf_dev: pointer to the VF Device
*/
static void pf_add_hmc_obj_callback(void *work_vf_dev)
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index cca414ecfcd5..571d9c542024 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -73,12 +73,12 @@ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
int *resched_delay_sec);
void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
- u8 port_num, u8 *p_data)
+ u32 port_num, u8 *p_data)
{
int i;
u64 guid_indexes;
int slave_id;
- int port_index = port_num - 1;
+ u32 port_index = port_num - 1;
if (!mlx4_is_master(dev->dev))
return;
@@ -86,7 +86,7 @@ void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
ports_guid[port_num - 1].
all_rec_per_port[block_num].guid_indexes);
- pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);
+ pr_debug("port: %u, guid_indexes: 0x%llx\n", port_num, guid_indexes);
for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
/* The location of the specific index starts from bit number 4
@@ -184,7 +184,7 @@ unlock:
* port_number - 1 or 2
*/
void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
- int block_num, u8 port_num,
+ int block_num, u32 port_num,
u8 *p_data)
{
int i;
@@ -206,7 +206,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
ports_guid[port_num - 1].
all_rec_per_port[block_num].guid_indexes);
- pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);
+ pr_debug("port: %u, guid_indexes: 0x%llx\n", port_num, guid_indexes);
/*calculate the slaves and notify them*/
for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
@@ -260,11 +260,11 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
&gen_event);
- pr_debug("slave: %d, port: %d prev_port_state: %d,"
+ pr_debug("slave: %d, port: %u prev_port_state: %d,"
" new_port_state: %d, gen_event: %d\n",
slave_id, port_num, prev_state, new_state, gen_event);
if (gen_event == SLAVE_PORT_GEN_EVENT_UP) {
- pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
+ pr_debug("sending PORT_UP event to slave: %d, port: %u\n",
slave_id, port_num);
mlx4_gen_port_state_change_eqe(dev->dev, slave_id,
port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE);
@@ -274,7 +274,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
&gen_event);
if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) {
- pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
+ pr_debug("sending PORT DOWN event to slave: %d, port: %u\n",
slave_id, port_num);
mlx4_gen_port_state_change_eqe(dev->dev,
slave_id,
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index f3ace85552f3..d13ecbdd4391 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -88,8 +88,8 @@ struct mlx4_rcv_tunnel_mad {
struct ib_mad mad;
} __packed;
-static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
-static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
+static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u32 port_num);
+static void handle_lid_change_event(struct mlx4_ib_dev *dev, u32 port_num);
static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
int block, u32 change_bitmap);
@@ -186,7 +186,7 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
return err;
}
-static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
+static void update_sm_ah(struct mlx4_ib_dev *dev, u32 port_num, u16 lid, u8 sl)
{
struct ib_ah *new_ah;
struct rdma_ah_attr ah_attr;
@@ -217,8 +217,8 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
* Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
* synthesize LID change, Client-Rereg, GID change, and P_Key change events.
*/
-static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad,
- u16 prev_lid)
+static void smp_snoop(struct ib_device *ibdev, u32 port_num,
+ const struct ib_mad *mad, u16 prev_lid)
{
struct ib_port_info *pinfo;
u16 lid;
@@ -274,7 +274,7 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
be16_to_cpu(base[i]);
}
}
- pr_debug("PKEY Change event: port=%d, "
+ pr_debug("PKEY Change event: port=%u, "
"block=0x%x, change_bitmap=0x%x\n",
port_num, bn, pkey_change_bitmap);
@@ -380,7 +380,8 @@ static void node_desc_override(struct ib_device *dev,
}
}
-static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad)
+static void forward_trap(struct mlx4_ib_dev *dev, u32 port_num,
+ const struct ib_mad *mad)
{
int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
struct ib_mad_send_buf *send_buf;
@@ -429,7 +430,7 @@ static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave
return ret;
}
-int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
+int mlx4_ib_find_real_gid(struct ib_device *ibdev, u32 port, __be64 guid)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
int i;
@@ -443,7 +444,7 @@ int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
- u8 port, u16 pkey, u16 *ix)
+ u32 port, u16 pkey, u16 *ix)
{
int i, ret;
u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
@@ -507,7 +508,7 @@ static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
return (qpn >= proxy_start && qpn <= proxy_start + 1);
}
-int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
+int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u32 port,
enum ib_qp_type dest_qpt, struct ib_wc *wc,
struct ib_grh *grh, struct ib_mad *mad)
{
@@ -678,7 +679,7 @@ end:
return ret;
}
-static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
+static int mlx4_ib_demux_mad(struct ib_device *ibdev, u32 port,
struct ib_wc *wc, struct ib_grh *grh,
struct ib_mad *mad)
{
@@ -818,7 +819,7 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
return 0;
}
-static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
@@ -932,9 +933,10 @@ static int iboe_process_mad_port_info(void *out_mad)
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
-static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad *in_mad, struct ib_mad *out_mad)
+static int iboe_process_mad(struct ib_device *ibdev, int mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
struct mlx4_counter counter_stats;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
@@ -979,7 +981,7 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return err;
}
-int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index)
@@ -1073,7 +1075,7 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
}
}
-static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
+static void handle_lid_change_event(struct mlx4_ib_dev *dev, u32 port_num)
{
mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
@@ -1082,7 +1084,7 @@ static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
}
-static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
+static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u32 port_num)
{
/* re-configure the alias-guid and mcg's */
if (mlx4_is_master(dev->dev)) {
@@ -1121,7 +1123,7 @@ static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
GET_MASK_FROM_EQE(eqe));
}
-static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
+static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u32 port_num,
u32 guid_tbl_blk_num, u32 change_bitmap)
{
struct ib_smp *in_mad = NULL;
@@ -1177,7 +1179,7 @@ void handle_port_mgmt_change_event(struct work_struct *work)
struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
struct mlx4_ib_dev *dev = ew->ib_dev;
struct mlx4_eqe *eqe = &(ew->ib_eqe);
- u8 port = eqe->event.port_mgmt_change.port;
+ u32 port = eqe->event.port_mgmt_change.port;
u32 changed_attr;
u32 tbl_block;
u32 change_bitmap;
@@ -1274,7 +1276,7 @@ void handle_port_mgmt_change_event(struct work_struct *work)
kfree(ew);
}
-void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
+void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u32 port_num,
enum ib_event_type type)
{
struct ib_event event;
@@ -1351,7 +1353,7 @@ static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
return ret;
}
-int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
+int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u32 port,
enum ib_qp_type dest_qpt, u16 pkey_index,
u32 remote_qpn, u32 qkey, struct rdma_ah_attr *attr,
u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f26a0d920842..22898d97ecbd 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -81,7 +81,7 @@ static const char mlx4_ib_version[] =
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
- u8 port_num);
+ u32 port_num);
static struct workqueue_struct *wq;
@@ -129,7 +129,8 @@ static int num_ib_ports(struct mlx4_dev *dev)
return ib_ports;
}
-static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
+static struct net_device *mlx4_ib_get_netdev(struct ib_device *device,
+ u32 port_num)
{
struct mlx4_ib_dev *ibdev = to_mdev(device);
struct net_device *dev;
@@ -160,7 +161,7 @@ static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_n
static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
struct mlx4_ib_dev *ibdev,
- u8 port_num)
+ u32 port_num)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
@@ -193,7 +194,7 @@ static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
struct mlx4_ib_dev *ibdev,
- u8 port_num)
+ u32 port_num)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
@@ -238,7 +239,7 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
static int mlx4_ib_update_gids(struct gid_entry *gids,
struct mlx4_ib_dev *ibdev,
- u8 port_num)
+ u32 port_num)
{
if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
@@ -407,7 +408,7 @@ int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
int real_index = -EINVAL;
int i;
unsigned long flags;
- u8 port_num = attr->port_num;
+ u32 port_num = attr->port_num;
if (port_num > MLX4_MAX_PORTS)
return -EINVAL;
@@ -649,7 +650,7 @@ out:
}
static enum rdma_link_layer
-mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
+mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num)
{
struct mlx4_dev *dev = to_mdev(device)->dev;
@@ -657,7 +658,7 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
}
-static int ib_link_query_port(struct ib_device *ibdev, u8 port,
+static int ib_link_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props, int netw_view)
{
struct ib_smp *in_mad = NULL;
@@ -753,7 +754,7 @@ static u8 state_to_phys_state(enum ib_port_state state)
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
}
-static int eth_link_query_port(struct ib_device *ibdev, u8 port,
+static int eth_link_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
@@ -814,7 +815,7 @@ out:
return err;
}
-int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props, int netw_view)
{
int err;
@@ -828,14 +829,14 @@ int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
return err;
}
-static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+static int mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
/* returns host view */
return __mlx4_ib_query_port(ibdev, port, props, 0);
}
-int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid, int netw_view)
{
struct ib_smp *in_mad = NULL;
@@ -891,7 +892,7 @@ out:
return err;
}
-static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+static int mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
if (rdma_protocol_ib(ibdev, port))
@@ -899,7 +900,8 @@ static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
return 0;
}
-static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
+static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port,
+ u64 *sl2vl_tbl)
{
union sl2vl_tbl_to_u64 sl2vl64;
struct ib_smp *in_mad = NULL;
@@ -959,7 +961,7 @@ static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
}
}
-int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey, int netw_view)
{
struct ib_smp *in_mad = NULL;
@@ -992,7 +994,8 @@ out:
return err;
}
-static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+static int mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
{
return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
}
@@ -1033,8 +1036,8 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
return 0;
}
-static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
- u32 cap_mask)
+static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u32 port,
+ int reset_qkey_viols, u32 cap_mask)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
@@ -1059,7 +1062,7 @@ static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_vio
return err;
}
-static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
+static int mlx4_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
struct ib_port_modify *props)
{
struct mlx4_ib_dev *mdev = to_mdev(ibdev);
@@ -2103,7 +2106,7 @@ static const struct diag_counter diag_device_only[] = {
};
static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
@@ -2118,7 +2121,7 @@ static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port, int index)
+ u32 port, int index)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
@@ -2466,7 +2469,7 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
ibdev->eq_table = NULL;
}
-static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int mlx4_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 78c9bb79ec75..e856cf23a0a1 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -429,7 +429,7 @@ struct mlx4_sriov_alias_guid_port_rec_det {
struct mlx4_sriov_alias_guid_info_rec_det all_rec_per_port[NUM_ALIAS_GUID_REC_IN_PORT];
struct workqueue_struct *wq;
struct delayed_work alias_guid_work;
- u8 port;
+ u32 port;
u32 state_flags;
struct mlx4_sriov_alias_guid *parent;
struct list_head cb_list;
@@ -657,7 +657,7 @@ struct mlx4_ib_qp_tunnel_init_attr {
struct ib_qp_init_attr init_attr;
int slave;
enum ib_qp_type proxy_qp_type;
- u8 port;
+ u32 port;
};
struct mlx4_uverbs_ex_query_device {
@@ -810,24 +810,24 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad);
-int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index);
int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
-int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props, int netw_view);
-int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey, int netw_view);
-int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid, int netw_view);
static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
{
- u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
+ u32 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
return true;
@@ -841,7 +841,7 @@ void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave);
int mlx4_ib_mcg_init(void);
void mlx4_ib_mcg_destroy(void);
-int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid);
+int mlx4_ib_find_real_gid(struct ib_device *ibdev, u32 port, __be64 guid);
int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, int slave,
struct ib_sa_mad *sa_mad);
@@ -851,16 +851,16 @@ int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
union ib_gid *gid);
-void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
+void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u32 port_num,
enum ib_event_type type);
void mlx4_ib_tunnels_update_work(struct work_struct *work);
-int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
+int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u32 port,
enum ib_qp_type qpt, struct ib_wc *wc,
struct ib_grh *grh, struct ib_mad *mad);
-int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
+int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u32 port,
enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
u32 qkey, struct rdma_ah_attr *attr, u8 *s_mac,
u16 vlan_id, struct ib_mad *mad);
@@ -884,10 +884,10 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port);
void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
int block_num,
- u8 port_num, u8 *p_data);
+ u32 port_num, u8 *p_data);
void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev,
- int block_num, u8 port_num,
+ int block_num, u32 port_num,
u8 *p_data);
int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 651785bd57f2..92ddbcc00eb2 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -3135,7 +3135,6 @@ static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr,
}
if (is_eth) {
- struct in6_addr in6;
u16 ether_type;
u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
@@ -3148,8 +3147,6 @@ static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr,
memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2);
memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
- memcpy(&in6, sgid.raw, sizeof(in6));
-
if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index b4c009bb0db6..f43380106bd0 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -6,6 +6,7 @@ mlx5_ib-y := ah.o \
cong.o \
counters.o \
cq.o \
+ dm.o \
doorbell.o \
gsi.o \
ib_virt.o \
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 234f29912ba9..a8db8a051170 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -47,107 +47,6 @@ int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
}
-int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
- u64 length, u32 alignment)
-{
- struct mlx5_core_dev *dev = dm->dev;
- u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
- >> PAGE_SHIFT;
- u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
- u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
- u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
- u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
- u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
- u32 mlx5_alignment;
- u64 page_idx = 0;
- int ret = 0;
-
- if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
- return -EINVAL;
-
- /* mlx5 device sets alignment as 64*2^driver_value
- * so normalizing is needed.
- */
- mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
- alignment - MLX5_MEMIC_BASE_ALIGN;
- if (mlx5_alignment > max_alignment)
- return -EINVAL;
-
- MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
- MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
- MLX5_SET(alloc_memic_in, in, memic_size, length);
- MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
- mlx5_alignment);
-
- while (page_idx < num_memic_hw_pages) {
- spin_lock(&dm->lock);
- page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
- num_memic_hw_pages,
- page_idx,
- num_pages, 0);
-
- if (page_idx < num_memic_hw_pages)
- bitmap_set(dm->memic_alloc_pages,
- page_idx, num_pages);
-
- spin_unlock(&dm->lock);
-
- if (page_idx >= num_memic_hw_pages)
- break;
-
- MLX5_SET64(alloc_memic_in, in, range_start_addr,
- hw_start_addr + (page_idx * PAGE_SIZE));
-
- ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
- if (ret) {
- spin_lock(&dm->lock);
- bitmap_clear(dm->memic_alloc_pages,
- page_idx, num_pages);
- spin_unlock(&dm->lock);
-
- if (ret == -EAGAIN) {
- page_idx++;
- continue;
- }
-
- return ret;
- }
-
- *addr = dev->bar_addr +
- MLX5_GET64(alloc_memic_out, out, memic_start_addr);
-
- return 0;
- }
-
- return -ENOMEM;
-}
-
-void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
-{
- struct mlx5_core_dev *dev = dm->dev;
- u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
- u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
- u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
- u64 start_page_idx;
- int err;
-
- addr -= dev->bar_addr;
- start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
-
- MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
- MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
- MLX5_SET(dealloc_memic_in, in, memic_size, length);
-
- err = mlx5_cmd_exec_in(dev, dealloc_memic, in);
- if (err)
- return;
-
- spin_lock(&dm->lock);
- bitmap_clear(dm->memic_alloc_pages,
- start_page_idx, num_pages);
- spin_unlock(&dm->lock);
-}
-
void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
{
u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 88ea6ef8f2cb..66c96292ed43 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -41,9 +41,6 @@ int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey);
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
void *out);
-int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
- u64 length, u32 alignment);
-void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid);
void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index b9291e482428..0b61df52332a 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -267,7 +267,7 @@ static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
}
}
-static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
+static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u32 port_num,
int offset, u32 *var)
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_params_out);
@@ -304,7 +304,7 @@ alloc_err:
return err;
}
-static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
+static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u32 port_num,
int offset, u32 var)
{
int inlen = MLX5_ST_SZ_BYTES(modify_cong_params_in);
@@ -397,7 +397,7 @@ static const struct file_operations dbg_cc_fops = {
.read = get_param,
};
-void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
{
if (!mlx5_debugfs_root ||
!dev->port[port_num].dbg_cc_params ||
@@ -409,7 +409,7 @@ void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
dev->port[port_num].dbg_cc_params = NULL;
}
-void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
+void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
{
struct mlx5_ib_dbg_cc_params *dbg_cc_params;
struct mlx5_core_dev *mdev;
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 084652e2b15a..e365341057cb 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -139,7 +139,7 @@ static int mlx5_ib_create_counters(struct ib_counters *counters,
static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
- u8 port_num)
+ u32 port_num)
{
return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts :
&dev->port[port_num].cnts;
@@ -154,7 +154,7 @@ static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
* device port combination in switchdev and non switchdev mode of the
* parent device.
*/
-u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num)
+u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num)
{
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
@@ -162,7 +162,7 @@ u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num)
}
static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct mlx5_ib_counters *cnts;
@@ -236,13 +236,13 @@ free:
static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port_num, int index)
+ u32 port_num, int index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
struct mlx5_core_dev *mdev;
int ret, num_counters;
- u8 mdev_port_num;
+ u32 mdev_port_num;
if (!stats)
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/counters.h b/drivers/infiniband/hw/mlx5/counters.h
index 1aa30c2f3f4d..6bcaaa52e2b2 100644
--- a/drivers/infiniband/hw/mlx5/counters.h
+++ b/drivers/infiniband/hw/mlx5/counters.h
@@ -13,5 +13,5 @@ void mlx5_ib_counters_cleanup(struct mlx5_ib_dev *dev);
void mlx5_ib_counters_clear_description(struct ib_counters *counters);
int mlx5_ib_flow_counters_set_data(struct ib_counters *ibcounters,
struct mlx5_ib_create_flow *ucmd);
-u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
+u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num);
#endif /* _MLX5_IB_COUNTERS_H */
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 07b8350929cd..a0b677accd96 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2185,27 +2185,69 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
return 0;
}
+static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
+ unsigned long pgsz_bitmap)
+{
+ unsigned long page_size;
+
+ /* Don't bother checking larger page sizes as offset must be zero and
+ * total DEVX umem length must be equal to total umem length.
+ */
+ pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length),
+ PAGE_SHIFT),
+ MLX5_ADAPTER_PAGE_SHIFT);
+ if (!pgsz_bitmap)
+ return 0;
+
+ page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, U64_MAX);
+ if (!page_size)
+ return 0;
+
+ /* If the page_size is less than the CPU page size then we can use the
+ * offset and create a umem which is a subset of the page list.
+ * For larger page sizes we can't be sure the DMA list reflects the
+ * VA so we must ensure that the umem extent is exactly equal to the
+ * page list. Reduce the page size until one of these cases is true.
+ */
+ while ((ib_umem_dma_offset(umem, page_size) != 0 ||
+ (umem->length % page_size) != 0) &&
+ page_size > PAGE_SIZE)
+ page_size /= 2;
+
+ return page_size;
+}
+
static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
struct uverbs_attr_bundle *attrs,
struct devx_umem *obj,
struct devx_umem_reg_cmd *cmd)
{
+ unsigned long pgsz_bitmap;
unsigned int page_size;
__be64 *mtt;
void *umem;
+ int ret;
/*
- * We don't know what the user intends to use this umem for, but the HW
- * restrictions must be met. MR, doorbell records, QP, WQ and CQ all
- * have different requirements. Since we have no idea how to sort this
- * out, only support PAGE_SIZE with the expectation that userspace will
- * provide the necessary alignments inside the known PAGE_SIZE and that
- * FW will check everything.
+ * If the user does not pass in pgsz_bitmap then the user promises not
+ * to use umem_offset!=0 in any commands that allocate on top of the
+ * umem.
+ *
+ * If the user wants to use a umem_offset then it must pass in
+ * pgsz_bitmap which guides the maximum page size and thus maximum
+ * object alignment inside the umem. See the PRM.
+ *
+ * Users are not allowed to use IOVA here, mkeys are not supported on
+ * umem.
*/
- page_size = ib_umem_find_best_pgoff(
- obj->umem, PAGE_SIZE,
- __mlx5_page_offset_to_bitmask(__mlx5_bit_sz(umem, page_offset),
- 0));
+ ret = uverbs_get_const_default(&pgsz_bitmap, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
+ GENMASK_ULL(63,
+ min(PAGE_SHIFT, MLX5_ADAPTER_PAGE_SHIFT)));
+ if (ret)
+ return ret;
+
+ page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap);
if (!page_size)
return -EINVAL;
@@ -2791,6 +2833,8 @@ DECLARE_UVERBS_NAMED_METHOD(
UA_MANDATORY),
UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
enum ib_access_flags),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
+ u64),
UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
UVERBS_ATTR_TYPE(u32),
UA_MANDATORY));
diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c
new file mode 100644
index 000000000000..094bf85589db
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/dm.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "dm.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
+ u64 length, u32 alignment)
+{
+ struct mlx5_core_dev *dev = dm->dev;
+ u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
+ >> PAGE_SHIFT;
+ u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
+ u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
+ u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+ u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
+ u32 mlx5_alignment;
+ u64 page_idx = 0;
+ int ret = 0;
+
+ if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
+ return -EINVAL;
+
+ /* mlx5 device sets alignment as 64*2^driver_value
+ * so normalizing is needed.
+ */
+ mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
+ alignment - MLX5_MEMIC_BASE_ALIGN;
+ if (mlx5_alignment > max_alignment)
+ return -EINVAL;
+
+ MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
+ MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
+ MLX5_SET(alloc_memic_in, in, memic_size, length);
+ MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
+ mlx5_alignment);
+
+ while (page_idx < num_memic_hw_pages) {
+ spin_lock(&dm->lock);
+ page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
+ num_memic_hw_pages,
+ page_idx,
+ num_pages, 0);
+
+ if (page_idx < num_memic_hw_pages)
+ bitmap_set(dm->memic_alloc_pages,
+ page_idx, num_pages);
+
+ spin_unlock(&dm->lock);
+
+ if (page_idx >= num_memic_hw_pages)
+ break;
+
+ MLX5_SET64(alloc_memic_in, in, range_start_addr,
+ hw_start_addr + (page_idx * PAGE_SIZE));
+
+ ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
+ if (ret) {
+ spin_lock(&dm->lock);
+ bitmap_clear(dm->memic_alloc_pages,
+ page_idx, num_pages);
+ spin_unlock(&dm->lock);
+
+ if (ret == -EAGAIN) {
+ page_idx++;
+ continue;
+ }
+
+ return ret;
+ }
+
+ *addr = dev->bar_addr +
+ MLX5_GET64(alloc_memic_out, out, memic_start_addr);
+
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr,
+ u64 length)
+{
+ struct mlx5_core_dev *dev = dm->dev;
+ u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
+ u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+ u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
+ u64 start_page_idx;
+ int err;
+
+ addr -= dev->bar_addr;
+ start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
+
+ MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
+ MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
+ MLX5_SET(dealloc_memic_in, in, memic_size, length);
+
+ err = mlx5_cmd_exec_in(dev, dealloc_memic, in);
+ if (err)
+ return;
+
+ spin_lock(&dm->lock);
+ bitmap_clear(dm->memic_alloc_pages,
+ start_page_idx, num_pages);
+ spin_unlock(&dm->lock);
+}
+
+void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+ u8 operation)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
+ struct mlx5_core_dev *dev = dm->dev;
+
+ MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
+ MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC);
+ MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
+ MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
+
+ mlx5_cmd_exec_in(dev, modify_memic, in);
+}
+
+static int mlx5_cmd_alloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+ u8 operation, phys_addr_t *op_addr)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_memic_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
+ struct mlx5_core_dev *dev = dm->dev;
+ int err;
+
+ MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
+ MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_ALLOC);
+ MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
+ MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
+
+ err = mlx5_cmd_exec_inout(dev, modify_memic, in, out);
+ if (err)
+ return err;
+
+ *op_addr = dev->bar_addr +
+ MLX5_GET64(modify_memic_out, out, memic_operation_addr);
+ return 0;
+}
+
+static int add_dm_mmap_entry(struct ib_ucontext *context,
+ struct mlx5_user_mmap_entry *mentry, u8 mmap_flag,
+ size_t size, u64 address)
+{
+ mentry->mmap_flag = mmap_flag;
+ mentry->address = address;
+
+ return rdma_user_mmap_entry_insert_range(
+ context, &mentry->rdma_entry, size,
+ MLX5_IB_MMAP_DEVICE_MEM << 16,
+ (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
+}
+
+static void mlx5_ib_dm_memic_free(struct kref *kref)
+{
+ struct mlx5_ib_dm_memic *dm =
+ container_of(kref, struct mlx5_ib_dm_memic, ref);
+ struct mlx5_ib_dev *dev = to_mdev(dm->base.ibdm.device);
+
+ mlx5_cmd_dealloc_memic(&dev->dm, dm->base.dev_addr, dm->base.size);
+ kfree(dm);
+}
+
+static int copy_op_to_user(struct mlx5_ib_dm_op_entry *op_entry,
+ struct uverbs_attr_bundle *attrs)
+{
+ u64 start_offset;
+ u16 page_idx;
+ int err;
+
+ page_idx = op_entry->mentry.rdma_entry.start_pgoff & 0xFFFF;
+ start_offset = op_entry->op_addr & ~PAGE_MASK;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
+ &page_idx, sizeof(page_idx));
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs,
+ MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
+ &start_offset, sizeof(start_offset));
+}
+
+static int map_existing_op(struct mlx5_ib_dm_memic *dm, u8 op,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_dm_op_entry *op_entry;
+
+ op_entry = xa_load(&dm->ops, op);
+ if (!op_entry)
+ return -ENOENT;
+
+ return copy_op_to_user(op_entry, attrs);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE);
+ struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+ struct ib_dm *ibdm = uobj->object;
+ struct mlx5_ib_dm_memic *dm = to_memic(ibdm);
+ struct mlx5_ib_dm_op_entry *op_entry;
+ int err;
+ u8 op;
+
+ err = uverbs_copy_from(&op, attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP);
+ if (err)
+ return err;
+
+ if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&dm->ops_xa_lock);
+ err = map_existing_op(dm, op, attrs);
+ if (!err || err != -ENOENT)
+ goto err_unlock;
+
+ op_entry = kzalloc(sizeof(*op_entry), GFP_KERNEL);
+ if (!op_entry)
+ goto err_unlock;
+
+ err = mlx5_cmd_alloc_memic_op(&dev->dm, dm->base.dev_addr, op,
+ &op_entry->op_addr);
+ if (err) {
+ kfree(op_entry);
+ goto err_unlock;
+ }
+ op_entry->op = op;
+ op_entry->dm = dm;
+
+ err = add_dm_mmap_entry(uobj->context, &op_entry->mentry,
+ MLX5_IB_MMAP_TYPE_MEMIC_OP, dm->base.size,
+ op_entry->op_addr & PAGE_MASK);
+ if (err) {
+ mlx5_cmd_dealloc_memic_op(&dev->dm, dm->base.dev_addr, op);
+ kfree(op_entry);
+ goto err_unlock;
+ }
+ /* From this point, entry will be freed by mmap_free */
+ kref_get(&dm->ref);
+
+ err = copy_op_to_user(op_entry, attrs);
+ if (err)
+ goto err_remove;
+
+ err = xa_insert(&dm->ops, op, op_entry, GFP_KERNEL);
+ if (err)
+ goto err_remove;
+ mutex_unlock(&dm->ops_xa_lock);
+
+ return 0;
+
+err_remove:
+ rdma_user_mmap_entry_remove(&op_entry->mentry.rdma_entry);
+err_unlock:
+ mutex_unlock(&dm->ops_xa_lock);
+
+ return err;
+}
+
+static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
+ struct mlx5_ib_dm_memic *dm;
+ u64 start_offset;
+ u16 page_idx;
+ int err;
+ u64 address;
+
+ if (!MLX5_CAP_DEV_MEM(dm_db->dev, memic))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return ERR_PTR(-ENOMEM);
+
+ dm->base.type = MLX5_IB_UAPI_DM_TYPE_MEMIC;
+ dm->base.size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
+ dm->base.ibdm.device = ctx->device;
+
+ kref_init(&dm->ref);
+ xa_init(&dm->ops);
+ mutex_init(&dm->ops_xa_lock);
+ dm->req_length = attr->length;
+
+ err = mlx5_cmd_alloc_memic(dm_db, &dm->base.dev_addr,
+ dm->base.size, attr->alignment);
+ if (err) {
+ kfree(dm);
+ return ERR_PTR(err);
+ }
+
+ address = dm->base.dev_addr & PAGE_MASK;
+ err = add_dm_mmap_entry(ctx, &dm->mentry, MLX5_IB_MMAP_TYPE_MEMIC,
+ dm->base.size, address);
+ if (err) {
+ mlx5_cmd_dealloc_memic(dm_db, dm->base.dev_addr, dm->base.size);
+ kfree(dm);
+ return ERR_PTR(err);
+ }
+
+ page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+ &page_idx, sizeof(page_idx));
+ if (err)
+ goto err_copy;
+
+ start_offset = dm->base.dev_addr & ~PAGE_MASK;
+ err = uverbs_copy_to(attrs,
+ MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ &start_offset, sizeof(start_offset));
+ if (err)
+ goto err_copy;
+
+ return &dm->base.ibdm;
+
+err_copy:
+ rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
+ return ERR_PTR(err);
+}
+
+static enum mlx5_sw_icm_type get_icm_type(int uapi_type)
+{
+ return uapi_type == MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM ?
+ MLX5_SW_ICM_TYPE_STEERING :
+ MLX5_SW_ICM_TYPE_HEADER_MODIFY;
+}
+
+static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs,
+ int type)
+{
+ struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
+ enum mlx5_sw_icm_type icm_type = get_icm_type(type);
+ struct mlx5_ib_dm_icm *dm;
+ u64 act_size;
+ int err;
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return ERR_PTR(-ENOMEM);
+
+ dm->base.type = type;
+ dm->base.ibdm.device = ctx->device;
+
+ if (!capable(CAP_SYS_RAWIO) || !capable(CAP_NET_RAW)) {
+ err = -EPERM;
+ goto free;
+ }
+
+ if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2))) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+
+ /* Allocation size must a multiple of the basic block size
+ * and a power of 2.
+ */
+ act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
+ act_size = roundup_pow_of_two(act_size);
+
+ dm->base.size = act_size;
+ err = mlx5_dm_sw_icm_alloc(dev, icm_type, act_size, attr->alignment,
+ to_mucontext(ctx)->devx_uid,
+ &dm->base.dev_addr, &dm->obj_id);
+ if (err)
+ goto free;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ &dm->base.dev_addr, sizeof(dm->base.dev_addr));
+ if (err) {
+ mlx5_dm_sw_icm_dealloc(dev, icm_type, dm->base.size,
+ to_mucontext(ctx)->devx_uid,
+ dm->base.dev_addr, dm->obj_id);
+ goto free;
+ }
+ return &dm->base.ibdm;
+free:
+ kfree(dm);
+ return ERR_PTR(err);
+}
+
+struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ enum mlx5_ib_uapi_dm_type type;
+ int err;
+
+ err = uverbs_get_const_default(&type, attrs,
+ MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+ MLX5_IB_UAPI_DM_TYPE_MEMIC);
+ if (err)
+ return ERR_PTR(err);
+
+ mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
+ type, attr->length, attr->alignment);
+
+ switch (type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ return handle_alloc_dm_memic(context, attr, attrs);
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ return handle_alloc_dm_sw_icm(context, attr, attrs, type);
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ return handle_alloc_dm_sw_icm(context, attr, attrs, type);
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
+
+static void dm_memic_remove_ops(struct mlx5_ib_dm_memic *dm)
+{
+ struct mlx5_ib_dm_op_entry *entry;
+ unsigned long idx;
+
+ mutex_lock(&dm->ops_xa_lock);
+ xa_for_each(&dm->ops, idx, entry) {
+ xa_erase(&dm->ops, idx);
+ rdma_user_mmap_entry_remove(&entry->mentry.rdma_entry);
+ }
+ mutex_unlock(&dm->ops_xa_lock);
+}
+
+static void mlx5_dm_memic_dealloc(struct mlx5_ib_dm_memic *dm)
+{
+ dm_memic_remove_ops(dm);
+ rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
+}
+
+static int mlx5_dm_icm_dealloc(struct mlx5_ib_ucontext *ctx,
+ struct mlx5_ib_dm_icm *dm)
+{
+ enum mlx5_sw_icm_type type = get_icm_type(dm->base.type);
+ struct mlx5_core_dev *dev = to_mdev(dm->base.ibdm.device)->mdev;
+ int err;
+
+ err = mlx5_dm_sw_icm_dealloc(dev, type, dm->base.size, ctx->devx_uid,
+ dm->base.dev_addr, dm->obj_id);
+ if (!err)
+ kfree(dm);
+ return 0;
+}
+
+static int mlx5_ib_dealloc_dm(struct ib_dm *ibdm,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dm *dm = to_mdm(ibdm);
+
+ switch (dm->type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ mlx5_dm_memic_dealloc(to_memic(ibdm));
+ return 0;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ return mlx5_dm_icm_dealloc(ctx, to_icm(ibdm));
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_QUERY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_dm *ibdm =
+ uverbs_attr_get_obj(attrs, MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE);
+ struct mlx5_ib_dm *dm = to_mdm(ibdm);
+ struct mlx5_ib_dm_memic *memic;
+ u64 start_offset;
+ u16 page_idx;
+ int err;
+
+ if (dm->type != MLX5_IB_UAPI_DM_TYPE_MEMIC)
+ return -EOPNOTSUPP;
+
+ memic = to_memic(ibdm);
+ page_idx = memic->mentry.rdma_entry.start_pgoff & 0xFFFF;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
+ &page_idx, sizeof(page_idx));
+ if (err)
+ return err;
+
+ start_offset = memic->base.dev_addr & ~PAGE_MASK;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
+ &start_offset, sizeof(start_offset));
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
+ &memic->req_length,
+ sizeof(memic->req_length));
+}
+
+void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev,
+ struct mlx5_user_mmap_entry *mentry)
+{
+ struct mlx5_ib_dm_op_entry *op_entry;
+ struct mlx5_ib_dm_memic *mdm;
+
+ switch (mentry->mmap_flag) {
+ case MLX5_IB_MMAP_TYPE_MEMIC:
+ mdm = container_of(mentry, struct mlx5_ib_dm_memic, mentry);
+ kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
+ break;
+ case MLX5_IB_MMAP_TYPE_MEMIC_OP:
+ op_entry = container_of(mentry, struct mlx5_ib_dm_op_entry,
+ mentry);
+ mdm = op_entry->dm;
+ mlx5_cmd_dealloc_memic_op(&dev->dm, mdm->base.dev_addr,
+ op_entry->op);
+ kfree(op_entry);
+ kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
+ break;
+ default:
+ WARN_ON(true);
+ }
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DM_QUERY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE, UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_READ, UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY));
+
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_dm, UVERBS_OBJECT_DM, UVERBS_METHOD_DM_ALLOC,
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16), UA_OPTIONAL),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+ enum mlx5_ib_uapi_dm_type, UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DM_MAP_OP_ADDR,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE,
+ UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP,
+ UVERBS_ATTR_TYPE(u8),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16),
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DM,
+ &UVERBS_METHOD(MLX5_IB_METHOD_DM_MAP_OP_ADDR),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DM_QUERY));
+
+const struct uapi_definition mlx5_ib_dm_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DM),
+ {},
+};
+
+const struct ib_device_ops mlx5_ib_dev_dm_ops = {
+ .alloc_dm = mlx5_ib_alloc_dm,
+ .dealloc_dm = mlx5_ib_dealloc_dm,
+ .reg_dm_mr = mlx5_ib_reg_dm_mr,
+};
diff --git a/drivers/infiniband/hw/mlx5/dm.h b/drivers/infiniband/hw/mlx5/dm.h
new file mode 100644
index 000000000000..9674a80d8d70
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/dm.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_DM_H
+#define _MLX5_IB_DM_H
+
+#include "mlx5_ib.h"
+
+extern const struct ib_device_ops mlx5_ib_dev_dm_ops;
+extern const struct uapi_definition mlx5_ib_dm_defs[];
+
+struct mlx5_ib_dm {
+ struct ib_dm ibdm;
+ u32 type;
+ phys_addr_t dev_addr;
+ size_t size;
+};
+
+struct mlx5_ib_dm_op_entry {
+ struct mlx5_user_mmap_entry mentry;
+ phys_addr_t op_addr;
+ struct mlx5_ib_dm_memic *dm;
+ u8 op;
+};
+
+struct mlx5_ib_dm_memic {
+ struct mlx5_ib_dm base;
+ struct mlx5_user_mmap_entry mentry;
+ struct xarray ops;
+ struct mutex ops_xa_lock;
+ struct kref ref;
+ size_t req_length;
+};
+
+struct mlx5_ib_dm_icm {
+ struct mlx5_ib_dm base;
+ u32 obj_id;
+};
+
+static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
+{
+ return container_of(ibdm, struct mlx5_ib_dm, ibdm);
+}
+
+static inline struct mlx5_ib_dm_memic *to_memic(struct ib_dm *ibdm)
+{
+ return container_of(ibdm, struct mlx5_ib_dm_memic, base.ibdm);
+}
+
+static inline struct mlx5_ib_dm_icm *to_icm(struct ib_dm *ibdm)
+{
+ return container_of(ibdm, struct mlx5_ib_dm_icm, base.ibdm);
+}
+
+struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev,
+ struct mlx5_user_mmap_entry *mentry);
+void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr,
+ u64 length);
+void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+ u8 operation);
+
+#endif /* _MLX5_IB_DM_H */
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 01370d9a871a..2fc6a60c4e77 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -1528,8 +1528,8 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
dst_num++;
}
- handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
- flow_context, flow_act,
+ handler = _create_raw_flow_rule(dev, ft_prio, dst_num ? dst : NULL,
+ fs_matcher, flow_context, flow_act,
cmd_in, inlen, dst_num);
if (IS_ERR(handler)) {
@@ -1885,8 +1885,9 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
else
*dest_id = mqp->raw_packet_qp.rq.tirn;
*dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
- fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
+ } else if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) &&
+ !(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)) {
*dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
}
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index db5de720bb12..b25e0b33a11a 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -29,7 +29,7 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
static int
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- int num_ports = mlx5_eswitch_get_total_vports(dev);
+ u32 num_ports = mlx5_eswitch_get_total_vports(dev);
const struct mlx5_ib_profile *profile;
struct mlx5_ib_dev *ibdev;
int vport_index;
@@ -110,7 +110,7 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq,
- u16 port)
+ u32 port)
{
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
struct mlx5_eswitch_rep *rep;
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
index ce1dcb105dbd..9c55e5c528b4 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.h
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -16,7 +16,7 @@ int mlx5r_rep_init(void);
void mlx5r_rep_cleanup(void);
struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq,
- u16 port);
+ u32 port);
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
u16 vport_num);
#else /* CONFIG_MLX5_ESWITCH */
@@ -25,7 +25,7 @@ static inline void mlx5r_rep_cleanup(void) {}
static inline
struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq,
- u16 port)
+ u32 port)
{
return NULL;
}
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
index 46b2d370fb3f..f2f62875d072 100644
--- a/drivers/infiniband/hw/mlx5/ib_virt.c
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -48,7 +48,7 @@ static inline u32 mlx_to_net_policy(enum port_state_policy mlx_policy)
}
}
-int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u32 port,
struct ifla_vf_info *info)
{
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -91,7 +91,7 @@ static inline enum port_state_policy net_to_mlx_policy(int policy)
}
int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
- u8 port, int state)
+ u32 port, int state)
{
struct mlx5_ib_dev *dev = to_mdev(device);
struct mlx5_core_dev *mdev = dev->mdev;
@@ -119,7 +119,7 @@ out:
}
int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
- u8 port, struct ifla_vf_stats *stats)
+ u32 port, struct ifla_vf_stats *stats)
{
int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
struct mlx5_core_dev *mdev;
@@ -149,7 +149,8 @@ ex:
return err;
}
-static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
+static int set_vf_node_guid(struct ib_device *device, int vf, u32 port,
+ u64 guid)
{
struct mlx5_ib_dev *dev = to_mdev(device);
struct mlx5_core_dev *mdev = dev->mdev;
@@ -172,7 +173,8 @@ static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
return err;
}
-static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
+static int set_vf_port_guid(struct ib_device *device, int vf, u32 port,
+ u64 guid)
{
struct mlx5_ib_dev *dev = to_mdev(device);
struct mlx5_core_dev *mdev = dev->mdev;
@@ -195,7 +197,7 @@ static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
return err;
}
-int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
u64 guid, int type)
{
if (type == IFLA_VF_IB_NODE_GUID)
@@ -206,7 +208,7 @@ int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
return -EINVAL;
}
-int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid)
{
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 652c6ccf1881..ec242a5a17a3 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -42,7 +42,7 @@ enum {
MLX5_IB_VENDOR_CLASS2 = 0xa
};
-static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num,
+static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u32 port_num,
struct ib_mad *in_mad)
{
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED &&
@@ -52,7 +52,7 @@ static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num,
}
static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
- int ignore_bkey, u8 port, const struct ib_wc *in_wc,
+ int ignore_bkey, u32 port, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const void *in_mad,
void *response_mad)
{
@@ -147,12 +147,12 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
vl_15_dropped);
}
-static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
+static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
struct mlx5_core_dev *mdev;
bool native_port = true;
- u8 mdev_port_num;
+ u32 mdev_port_num;
void *out_cnt;
int err;
@@ -216,7 +216,7 @@ done:
return err;
}
-int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index)
@@ -444,7 +444,7 @@ out:
return err;
}
-int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
struct ib_smp *in_mad = NULL;
@@ -473,7 +473,7 @@ out:
return err;
}
-int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
struct ib_smp *in_mad = NULL;
@@ -513,7 +513,7 @@ out:
return err;
}
-int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 7a7f6ccd02a5..6d1dd09a4388 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -34,6 +34,7 @@
#include "ib_rep.h"
#include "cmd.h"
#include "devx.h"
+#include "dm.h"
#include "fs.h"
#include "srq.h"
#include "qp.h"
@@ -42,6 +43,7 @@
#include "counters.h"
#include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h>
+#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
#include <rdma/ib_umem_odp.h>
@@ -100,7 +102,7 @@ mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
}
static enum rdma_link_layer
-mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
+mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num)
{
struct mlx5_ib_dev *dev = to_mdev(device);
int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
@@ -109,7 +111,7 @@ mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
}
static int get_port_state(struct ib_device *ibdev,
- u8 port_num,
+ u32 port_num,
enum ib_port_state *state)
{
struct ib_port_attr attr;
@@ -124,7 +126,7 @@ static int get_port_state(struct ib_device *ibdev,
static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
struct net_device *ndev,
- u8 *port_num)
+ u32 *port_num)
{
struct net_device *rep_ndev;
struct mlx5_ib_port *port;
@@ -154,7 +156,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
{
struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- u8 port_num = roce->native_port_num;
+ u32 port_num = roce->native_port_num;
struct mlx5_core_dev *mdev;
struct mlx5_ib_dev *ibdev;
@@ -233,7 +235,7 @@ done:
}
static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
struct mlx5_ib_dev *ibdev = to_mdev(device);
struct net_device *ndev;
@@ -261,8 +263,8 @@ out:
}
struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
- u8 ib_port_num,
- u8 *native_port_num)
+ u32 ib_port_num,
+ u32 *native_port_num)
{
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
ib_port_num);
@@ -296,7 +298,7 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
return mdev;
}
-void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num)
{
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
port_num);
@@ -452,7 +454,7 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed,
active_width);
}
-static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
+static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -462,7 +464,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
enum ib_mtu ndev_ib_mtu;
bool put_mdev = true;
u32 eth_prot_oper;
- u8 mdev_port_num;
+ u32 mdev_port_num;
bool ext;
int err;
@@ -498,7 +500,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width, ext);
- if (!dev->is_rep && mlx5_is_roce_enabled(mdev)) {
+ if (!dev->is_rep && dev->mdev->roce.roce_en) {
u16 qkey_viol_cntr;
props->port_cap_flags |= IB_PORT_CM_SUP;
@@ -549,19 +551,19 @@ out:
return err;
}
-static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
+static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
- enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
+ enum ib_gid_type gid_type;
u16 vlan_id = 0xffff;
u8 roce_version = 0;
u8 roce_l3_type = 0;
u8 mac[ETH_ALEN];
int ret;
+ gid_type = attr->gid_type;
if (gid) {
- gid_type = attr->gid_type;
ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
if (ret)
return ret;
@@ -573,7 +575,7 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
break;
case IB_GID_TYPE_ROCE_UDP_ENCAP:
roce_version = MLX5_ROCE_VERSION_2;
- if (ipv6_addr_v4mapped((void *)gid))
+ if (gid && ipv6_addr_v4mapped((void *)gid))
roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
else
roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
@@ -600,7 +602,7 @@ static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
__always_unused void **context)
{
return set_roce_addr(to_mdev(attr->device), attr->port_num,
- attr->index, NULL, NULL);
+ attr->index, NULL, attr);
}
__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
@@ -1267,7 +1269,7 @@ static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
return 0;
}
-static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
+static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
@@ -1335,7 +1337,7 @@ out:
return err;
}
-int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
+int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
unsigned int count;
@@ -1380,13 +1382,13 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
return ret;
}
-static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
+static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
return mlx5_query_port_roce(ibdev, port, props);
}
-static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
/* Default special Pkey for representor device port as per the
@@ -1396,7 +1398,7 @@ static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
return 0;
}
-static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
@@ -1415,13 +1417,13 @@ static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
}
-static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
+static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port,
u16 index, u16 *pkey)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev;
bool put_mdev = true;
- u8 mdev_port_num;
+ u32 mdev_port_num;
int err;
mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
@@ -1442,7 +1444,7 @@ static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
return err;
}
-static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
switch (mlx5_get_vport_access_method(ibdev)) {
@@ -1486,12 +1488,12 @@ static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
return err;
}
-static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
+static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask,
u32 value)
{
struct mlx5_hca_vport_context ctx = {};
struct mlx5_core_dev *mdev;
- u8 mdev_port_num;
+ u32 mdev_port_num;
int err;
mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
@@ -1520,7 +1522,7 @@ out:
return err;
}
-static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
+static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
struct ib_port_modify *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
@@ -1929,7 +1931,7 @@ uar_done:
print_lib_caps(dev, context->lib_caps);
if (mlx5_ib_lag_should_assign_affinity(dev)) {
- u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
+ u32 port = mlx5_core_native_port_num(dev->mdev) - 1;
atomic_set(&context->tx_port_affinity,
atomic_add_return(
@@ -2087,14 +2089,11 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
struct mlx5_var_table *var_table = &dev->var_table;
- struct mlx5_ib_dm *mdm;
switch (mentry->mmap_flag) {
case MLX5_IB_MMAP_TYPE_MEMIC:
- mdm = container_of(mentry, struct mlx5_ib_dm, mentry);
- mlx5_cmd_dealloc_memic(&dev->dm, mdm->dev_addr,
- mdm->size);
- kfree(mdm);
+ case MLX5_IB_MMAP_TYPE_MEMIC_OP:
+ mlx5_ib_dm_mmap_free(dev, mentry);
break;
case MLX5_IB_MMAP_TYPE_VAR:
mutex_lock(&var_table->bitmap_lock);
@@ -2219,19 +2218,6 @@ free_bfreg:
return err;
}
-static int add_dm_mmap_entry(struct ib_ucontext *context,
- struct mlx5_ib_dm *mdm,
- u64 address)
-{
- mdm->mentry.mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC;
- mdm->mentry.address = address;
- return rdma_user_mmap_entry_insert_range(
- context, &mdm->mentry.rdma_entry,
- mdm->size,
- MLX5_IB_MMAP_DEVICE_MEM << 16,
- (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
-}
-
static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
{
unsigned long idx;
@@ -2333,206 +2319,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
return 0;
}
-static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
- u32 type)
-{
- switch (type) {
- case MLX5_IB_UAPI_DM_TYPE_MEMIC:
- if (!MLX5_CAP_DEV_MEM(dev->mdev, memic))
- return -EOPNOTSUPP;
- break;
- case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
- case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- if (!capable(CAP_SYS_RAWIO) ||
- !capable(CAP_NET_RAW))
- return -EPERM;
-
- if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner) ||
- MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2) ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner_v2)))
- return -EOPNOTSUPP;
- break;
- }
-
- return 0;
-}
-
-static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
- struct mlx5_ib_dm *dm,
- struct ib_dm_alloc_attr *attr,
- struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
- u64 start_offset;
- u16 page_idx;
- int err;
- u64 address;
-
- dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
-
- err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr,
- dm->size, attr->alignment);
- if (err)
- return err;
-
- address = dm->dev_addr & PAGE_MASK;
- err = add_dm_mmap_entry(ctx, dm, address);
- if (err)
- goto err_dealloc;
-
- page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
- err = uverbs_copy_to(attrs,
- MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
- &page_idx,
- sizeof(page_idx));
- if (err)
- goto err_copy;
-
- start_offset = dm->dev_addr & ~PAGE_MASK;
- err = uverbs_copy_to(attrs,
- MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
- &start_offset, sizeof(start_offset));
- if (err)
- goto err_copy;
-
- return 0;
-
-err_copy:
- rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
-err_dealloc:
- mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
-
- return err;
-}
-
-static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
- struct mlx5_ib_dm *dm,
- struct ib_dm_alloc_attr *attr,
- struct uverbs_attr_bundle *attrs,
- int type)
-{
- struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
- u64 act_size;
- int err;
-
- /* Allocation size must a multiple of the basic block size
- * and a power of 2.
- */
- act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
- act_size = roundup_pow_of_two(act_size);
-
- dm->size = act_size;
- err = mlx5_dm_sw_icm_alloc(dev, type, act_size, attr->alignment,
- to_mucontext(ctx)->devx_uid, &dm->dev_addr,
- &dm->icm_dm.obj_id);
- if (err)
- return err;
-
- err = uverbs_copy_to(attrs,
- MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
- &dm->dev_addr, sizeof(dm->dev_addr));
- if (err)
- mlx5_dm_sw_icm_dealloc(dev, type, dm->size,
- to_mucontext(ctx)->devx_uid, dm->dev_addr,
- dm->icm_dm.obj_id);
-
- return err;
-}
-
-struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_dm_alloc_attr *attr,
- struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_ib_dm *dm;
- enum mlx5_ib_uapi_dm_type type;
- int err;
-
- err = uverbs_get_const_default(&type, attrs,
- MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
- MLX5_IB_UAPI_DM_TYPE_MEMIC);
- if (err)
- return ERR_PTR(err);
-
- mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
- type, attr->length, attr->alignment);
-
- err = check_dm_type_support(to_mdev(ibdev), type);
- if (err)
- return ERR_PTR(err);
-
- dm = kzalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return ERR_PTR(-ENOMEM);
-
- dm->type = type;
-
- switch (type) {
- case MLX5_IB_UAPI_DM_TYPE_MEMIC:
- err = handle_alloc_dm_memic(context, dm,
- attr,
- attrs);
- break;
- case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
- err = handle_alloc_dm_sw_icm(context, dm,
- attr, attrs,
- MLX5_SW_ICM_TYPE_STEERING);
- break;
- case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- err = handle_alloc_dm_sw_icm(context, dm,
- attr, attrs,
- MLX5_SW_ICM_TYPE_HEADER_MODIFY);
- break;
- default:
- err = -EOPNOTSUPP;
- }
-
- if (err)
- goto err_free;
-
- return &dm->ibdm;
-
-err_free:
- kfree(dm);
- return ERR_PTR(err);
-}
-
-int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
- &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
- struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
- struct mlx5_ib_dm *dm = to_mdm(ibdm);
- int ret;
-
- switch (dm->type) {
- case MLX5_IB_UAPI_DM_TYPE_MEMIC:
- rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
- return 0;
- case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
- ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
- dm->size, ctx->devx_uid, dm->dev_addr,
- dm->icm_dm.obj_id);
- if (ret)
- return ret;
- break;
- case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY,
- dm->size, ctx->devx_uid, dm->dev_addr,
- dm->icm_dm.obj_id);
- if (ret)
- return ret;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- kfree(dm);
-
- return 0;
-}
-
static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct mlx5_ib_pd *pd = to_mpd(ibpd);
@@ -2779,7 +2565,7 @@ static void delay_drop_handler(struct work_struct *work)
static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
struct ib_event *ibev)
{
- u8 port = (eqe->data.port.port >> 4) & 0xf;
+ u32 port = (eqe->data.port.port >> 4) & 0xf;
switch (eqe->sub_type) {
case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
@@ -2795,7 +2581,7 @@ static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe
static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
struct ib_event *ibev)
{
- u8 port = (eqe->data.port.port >> 4) & 0xf;
+ u32 port = (eqe->data.port.port >> 4) & 0xf;
ibev->element.port_num = port;
@@ -3152,7 +2938,7 @@ static u32 get_core_cap_flags(struct ib_device *ibdev,
return ret;
}
-static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -3180,7 +2966,7 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
+static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -3252,7 +3038,7 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
}
}
-static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
+static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
{
int err;
@@ -3266,7 +3052,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
return 0;
}
-static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
+static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
{
if (dev->port[port_num].roce.nb.notifier_call) {
unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
@@ -3300,7 +3086,7 @@ static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
mlx5_nic_vport_disable_roce(dev->mdev);
}
-static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
+static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params)
{
@@ -3352,7 +3138,7 @@ static const struct file_operations fops_delay_drop_timeout = {
static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
struct mlx5_ib_multiport_info *mpi)
{
- u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
struct mlx5_ib_port *port = &ibdev->port[port_num];
int comps;
int err;
@@ -3398,7 +3184,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
- mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
+ mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1);
/* Log an error, still needed to cleanup the pointers and add
* it back to the list.
*/
@@ -3412,14 +3198,14 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
struct mlx5_ib_multiport_info *mpi)
{
- u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
int err;
lockdep_assert_held(&mlx5_ib_multiport_mutex);
spin_lock(&ibdev->port[port_num].mp.mpi_lock);
if (ibdev->port[port_num].mp.mpi) {
- mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
+ mlx5_ib_dbg(ibdev, "port %u already affiliated.\n",
port_num + 1);
spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
return false;
@@ -3455,12 +3241,12 @@ unbind:
static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
{
- int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
port_num + 1);
struct mlx5_ib_multiport_info *mpi;
int err;
- int i;
+ u32 i;
if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
return 0;
@@ -3523,10 +3309,10 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
{
- int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
port_num + 1);
- int i;
+ u32 i;
if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
return;
@@ -3539,7 +3325,8 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
kfree(dev->port[i].mp.mpi);
dev->port[i].mp.mpi = NULL;
} else {
- mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
+ mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
+ i + 1);
mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
}
}
@@ -3816,20 +3603,6 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
&UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
ADD_UVERBS_ATTRIBUTES_SIMPLE(
- mlx5_ib_dm,
- UVERBS_OBJECT_DM,
- UVERBS_METHOD_DM_ALLOC,
- UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
- UVERBS_ATTR_TYPE(u64),
- UA_MANDATORY),
- UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
- UVERBS_ATTR_TYPE(u16),
- UA_OPTIONAL),
- UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
- enum mlx5_ib_uapi_dm_type,
- UA_OPTIONAL));
-
-ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_flow_action,
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
@@ -3851,10 +3624,10 @@ static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
+ UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
&mlx5_ib_flow_action),
- UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
@@ -3891,8 +3664,6 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->port[i].roce.last_port_state = IB_PORT_DOWN;
}
- mlx5_ib_internal_fill_odp_caps(dev);
-
err = mlx5_ib_init_multiport_master(dev);
if (err)
return err;
@@ -4032,12 +3803,6 @@ static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
};
-static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
- .alloc_dm = mlx5_ib_alloc_dm,
- .dealloc_dm = mlx5_ib_dealloc_dm,
- .reg_dm_mr = mlx5_ib_reg_dm_mr,
-};
-
static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
@@ -4160,7 +3925,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
int port_type_cap;
- u8 port_num = 0;
+ u32 port_num = 0;
int err;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
@@ -4173,7 +3938,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
/* Register only for native ports */
err = mlx5_add_netdev_notifier(dev, port_num);
- if (err || dev->is_rep || !mlx5_is_roce_enabled(mdev))
+ if (err || dev->is_rep || !mlx5_is_roce_init_enabled(mdev))
/*
* We don't enable ETH interface for
* 1. IB representors
@@ -4197,7 +3962,7 @@ static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
int port_type_cap;
- u8 port_num;
+ u32 port_num;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
@@ -4710,7 +4475,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
dev->mdev = mdev;
dev->num_ports = num_ports;
- if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
+ if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
profile = &raw_eth_profile;
else
profile = &pf_profile;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 88cc26e008fc..e9a3f34a30b8 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -166,6 +166,7 @@ enum mlx5_ib_mmap_type {
MLX5_IB_MMAP_TYPE_VAR = 2,
MLX5_IB_MMAP_TYPE_UAR_WC = 3,
MLX5_IB_MMAP_TYPE_UAR_NC = 4,
+ MLX5_IB_MMAP_TYPE_MEMIC_OP = 5,
};
struct mlx5_bfreg_info {
@@ -406,7 +407,7 @@ struct mlx5_ib_qp_base {
struct mlx5_ib_qp_trans {
struct mlx5_ib_qp_base base;
u16 xrcdn;
- u8 alt_port;
+ u32 alt_port;
u8 atomic_rd_en;
u8 resp_depth;
};
@@ -453,7 +454,7 @@ struct mlx5_ib_dct {
struct mlx5_ib_gsi_qp {
struct ib_qp *rx_qp;
- u8 port_num;
+ u32 port_num;
struct ib_qp_cap cap;
struct ib_cq *cq;
struct mlx5_ib_gsi_wr *outstanding_wrs;
@@ -490,7 +491,7 @@ struct mlx5_ib_qp {
struct mutex mutex;
/* cached variant of create_flags from struct ib_qp_init_attr */
u32 flags;
- u8 port;
+ u32 port;
u8 state;
int max_inline_data;
struct mlx5_bf bf;
@@ -547,11 +548,6 @@ static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
return container_of(wr, struct mlx5_umr_wr, wr);
}
-struct mlx5_shared_mr_info {
- int mr_id;
- struct ib_umem *umem;
-};
-
enum mlx5_ib_cq_pr_flags {
MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
};
@@ -623,20 +619,6 @@ struct mlx5_user_mmap_entry {
u32 page_idx;
};
-struct mlx5_ib_dm {
- struct ib_dm ibdm;
- phys_addr_t dev_addr;
- u32 type;
- size_t size;
- union {
- struct {
- u32 obj_id;
- } icm_dm;
- /* other dm types specific params should be added here */
- };
- struct mlx5_user_mmap_entry mentry;
-};
-
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
#define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
@@ -654,47 +636,69 @@ struct mlx5_ib_dm {
atomic64_add(value, &((mr)->odp_stats.counter_name))
struct mlx5_ib_mr {
- struct ib_mr ibmr;
- void *descs;
- dma_addr_t desc_map;
- int ndescs;
- int data_length;
- int meta_ndescs;
- int meta_length;
- int max_descs;
- int desc_size;
- int access_mode;
- unsigned int page_shift;
- struct mlx5_core_mkey mmkey;
- struct ib_umem *umem;
- struct mlx5_shared_mr_info *smr_info;
- struct list_head list;
- struct mlx5_cache_ent *cache_ent;
- u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
- struct mlx5_core_sig_ctx *sig;
- void *descs_alloc;
- int access_flags; /* Needed for rereg MR */
-
- struct mlx5_ib_mr *parent;
- /* Needed for IB_MR_TYPE_INTEGRITY */
- struct mlx5_ib_mr *pi_mr;
- struct mlx5_ib_mr *klm_mr;
- struct mlx5_ib_mr *mtt_mr;
- u64 data_iova;
- u64 pi_iova;
-
- /* For ODP and implicit */
- struct xarray implicit_children;
+ struct ib_mr ibmr;
+ struct mlx5_core_mkey mmkey;
+
+ /* User MR data */
+ struct mlx5_cache_ent *cache_ent;
+ struct ib_umem *umem;
+
+ /* This is zero'd when the MR is allocated */
union {
- struct list_head elm;
- struct work_struct work;
- } odp_destroy;
- struct ib_odp_counters odp_stats;
- bool is_odp_implicit;
+ /* Used only while the MR is in the cache */
+ struct {
+ u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
+ struct mlx5_async_work cb_work;
+ /* Cache list element */
+ struct list_head list;
+ };
- struct mlx5_async_work cb_work;
+ /* Used only by kernel MRs (umem == NULL) */
+ struct {
+ void *descs;
+ void *descs_alloc;
+ dma_addr_t desc_map;
+ int max_descs;
+ int ndescs;
+ int desc_size;
+ int access_mode;
+
+ /* For Kernel IB_MR_TYPE_INTEGRITY */
+ struct mlx5_core_sig_ctx *sig;
+ struct mlx5_ib_mr *pi_mr;
+ struct mlx5_ib_mr *klm_mr;
+ struct mlx5_ib_mr *mtt_mr;
+ u64 data_iova;
+ u64 pi_iova;
+ int meta_ndescs;
+ int meta_length;
+ int data_length;
+ };
+
+ /* Used only by User MRs (umem != NULL) */
+ struct {
+ unsigned int page_shift;
+ /* Current access_flags */
+ int access_flags;
+
+ /* For User ODP */
+ struct mlx5_ib_mr *parent;
+ struct xarray implicit_children;
+ union {
+ struct work_struct work;
+ } odp_destroy;
+ struct ib_odp_counters odp_stats;
+ bool is_odp_implicit;
+ };
+ };
};
+/* Zero the fields in the mr that are variant depending on usage */
+static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
+{
+ memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out));
+}
+
static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
{
return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
@@ -822,7 +826,7 @@ struct mlx5_roce {
atomic_t tx_port_affinity;
enum ib_port_state last_port_state;
struct mlx5_ib_dev *dev;
- u8 native_port_num;
+ u32 native_port_num;
};
struct mlx5_ib_port {
@@ -837,7 +841,7 @@ struct mlx5_ib_dbg_param {
int offset;
struct mlx5_ib_dev *dev;
struct dentry *dentry;
- u8 port_num;
+ u32 port_num;
};
enum mlx5_ib_dbg_cc_types {
@@ -1063,6 +1067,7 @@ struct mlx5_ib_dev {
struct mutex slow_path_mutex;
struct ib_odp_caps odp_caps;
u64 odp_max_size;
+ struct mutex odp_eq_mutex;
struct mlx5_ib_pf_eq odp_pf_eq;
struct xarray odp_mkeys;
@@ -1170,11 +1175,6 @@ static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
return container_of(msrq, struct mlx5_ib_srq, msrq);
}
-static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
-{
- return container_of(ibdm, struct mlx5_ib_dm, ibdm);
-}
-
static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct mlx5_ib_mr, ibmr);
@@ -1268,8 +1268,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct ib_udata *udata,
int access_flags);
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
-void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
-void mlx5_ib_fence_dmabuf_mr(struct mlx5_ib_mr *mr);
+void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr);
struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags,
struct ib_pd *pd, struct ib_udata *udata);
@@ -1285,7 +1284,7 @@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
int data_sg_nents, unsigned int *data_sg_offset,
struct scatterlist *meta_sg, int meta_sg_nents,
unsigned int *meta_sg_offset);
-int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index);
@@ -1300,13 +1299,13 @@ int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
u32 *vendor_id);
int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
-int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey);
-int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid);
-int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
-int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
+int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
u64 access_flags);
@@ -1317,8 +1316,6 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
unsigned int entry, int access_flags);
-void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
@@ -1332,18 +1329,13 @@ int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
-struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_dm_alloc_attr *attr,
- struct uverbs_attr_bundle *attrs);
-int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs);
struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
+int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
@@ -1357,12 +1349,12 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
+static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
+static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_pf_eq *eq)
{
- return;
+ return 0;
}
-
-static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
@@ -1397,22 +1389,22 @@ int __mlx5_ib_add(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile);
int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
- u8 port, struct ifla_vf_info *info);
+ u32 port, struct ifla_vf_info *info);
int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
- u8 port, int state);
+ u32 port, int state);
int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
- u8 port, struct ifla_vf_stats *stats);
-int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ u32 port, struct ifla_vf_stats *stats);
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid);
-int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
u64 guid, int type);
__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
const struct ib_gid_attr *attr);
-void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
-void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
+void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
/* GSI QP helper functions */
int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
@@ -1435,10 +1427,10 @@ void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
int bfregn);
struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
- u8 ib_port_num,
- u8 *native_port_num);
+ u32 ib_port_num,
+ u32 *native_port_num);
void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
- u8 port_num);
+ u32 port_num);
extern const struct uapi_definition mlx5_ib_devx_defs[];
extern const struct uapi_definition mlx5_ib_flow_defs[];
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index db05b0e0a8d7..4388afeff251 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -42,6 +42,7 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include <rdma/ib_verbs.h>
+#include "dm.h"
#include "mlx5_ib.h"
/*
@@ -119,8 +120,6 @@ mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
create_mkey_callback, context);
}
-static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
@@ -590,6 +589,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
ent->available_mrs--;
queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
+
+ mlx5_clear_mr(mr);
}
mr->access_flags = access_flags;
return mr;
@@ -615,42 +616,20 @@ static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
ent->available_mrs--;
queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
- break;
+ mlx5_clear_mr(mr);
+ return mr;
}
queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
}
-
- if (!mr)
- req_ent->miss++;
-
- return mr;
-}
-
-static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
-{
- struct mlx5_cache_ent *ent = mr->cache_ent;
-
- mr->cache_ent = NULL;
- spin_lock_irq(&ent->lock);
- ent->total_mrs--;
- spin_unlock_irq(&ent->lock);
+ req_ent->miss++;
+ return NULL;
}
-void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
struct mlx5_cache_ent *ent = mr->cache_ent;
- if (!ent)
- return;
-
- if (mlx5_mr_cache_invalidate(mr)) {
- detach_mr_from_cache(mr);
- destroy_mkey(dev, mr);
- kfree(mr);
- return;
- }
-
spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head);
ent->available_mrs++;
@@ -993,8 +972,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->ibmr.pd = pd;
mr->umem = umem;
- mr->access_flags = access_flags;
- mr->desc_size = sizeof(struct mlx5_mtt);
mr->mmkey.iova = iova;
mr->mmkey.size = umem->length;
mr->mmkey.pd = to_mpd(pd)->pdn;
@@ -1028,7 +1005,7 @@ static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
*/
might_sleep();
- gfp_mask |= __GFP_ZERO;
+ gfp_mask |= __GFP_ZERO | __GFP_NORETRY;
/*
* If the system already has a suitable high order page then just use
@@ -1505,7 +1482,7 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
*/
err = mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE);
if (err) {
- dereg_mr(dev, mr);
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
return ERR_PTR(err);
}
}
@@ -1524,6 +1501,9 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
return ERR_PTR(-EOPNOTSUPP);
+ err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq);
+ if (err)
+ return ERR_PTR(err);
if (!start && length == U64_MAX) {
if (iova != 0)
return ERR_PTR(-EINVAL);
@@ -1562,7 +1542,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
return &mr->ibmr;
err_dereg_mr:
- dereg_mr(dev, mr);
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
return ERR_PTR(err);
}
@@ -1659,19 +1639,19 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
return &mr->ibmr;
err_dereg_mr:
- dereg_mr(dev, mr);
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
return ERR_PTR(err);
}
/**
- * mlx5_mr_cache_invalidate - Fence all DMA on the MR
+ * revoke_mr - Fence all DMA on the MR
* @mr: The MR to fence
*
* Upon return the NIC will not be doing any DMA to the pages under the MR,
- * and any DMA inprogress will be completed. Failure of this function
+ * and any DMA in progress will be completed. Failure of this function
* indicates the HW has failed catastrophically.
*/
-int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr)
+static int revoke_mr(struct mlx5_ib_mr *mr)
{
struct mlx5_umr_wr umrwr = {};
@@ -1765,7 +1745,7 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
* with it. This ensure the change is atomic relative to any use of the
* MR.
*/
- err = mlx5_mr_cache_invalidate(mr);
+ err = revoke_mr(mr);
if (err)
return err;
@@ -1844,7 +1824,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
* Only one active MR can refer to a umem at one time, revoke
* the old MR before assigning the umem to the new one.
*/
- err = mlx5_mr_cache_invalidate(mr);
+ err = revoke_mr(mr);
if (err)
return ERR_PTR(err);
umem = mr->umem;
@@ -1931,7 +1911,7 @@ err:
static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
- if (mr->descs) {
+ if (!mr->umem && mr->descs) {
struct ib_device *device = mr->ibmr.device;
int size = mr->max_descs * mr->desc_size;
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -1943,69 +1923,82 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
}
}
-static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
- if (mr->sig) {
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+ int rc;
+
+ /*
+ * Any async use of the mr must hold the refcount, once the refcount
+ * goes to zero no other thread, such as ODP page faults, prefetch, any
+ * UMR activity, etc can touch the mkey. Thus it is safe to destroy it.
+ */
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
+ refcount_read(&mr->mmkey.usecount) != 0 &&
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)))
+ mlx5r_deref_wait_odp_mkey(&mr->mmkey);
+
+ if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
+ xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), ibmr,
+ NULL, GFP_KERNEL);
+
+ if (mr->mtt_mr) {
+ rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
+ if (rc)
+ return rc;
+ mr->mtt_mr = NULL;
+ }
+ if (mr->klm_mr) {
+ rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
+ if (rc)
+ return rc;
+ mr->klm_mr = NULL;
+ }
+
if (mlx5_core_destroy_psv(dev->mdev,
mr->sig->psv_memory.psv_idx))
mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
mr->sig->psv_memory.psv_idx);
- if (mlx5_core_destroy_psv(dev->mdev,
- mr->sig->psv_wire.psv_idx))
+ if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
mr->sig->psv_wire.psv_idx);
- xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key));
kfree(mr->sig);
mr->sig = NULL;
}
+ /* Stop DMA */
+ if (mr->cache_ent) {
+ if (revoke_mr(mr)) {
+ spin_lock_irq(&mr->cache_ent->lock);
+ mr->cache_ent->total_mrs--;
+ spin_unlock_irq(&mr->cache_ent->lock);
+ mr->cache_ent = NULL;
+ }
+ }
if (!mr->cache_ent) {
- destroy_mkey(dev, mr);
- mlx5_free_priv_descs(mr);
+ rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
+ if (rc)
+ return rc;
}
-}
-static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
-{
- struct ib_umem *umem = mr->umem;
+ if (mr->umem) {
+ bool is_odp = is_odp_mr(mr);
- /* Stop all DMA */
- if (is_odp_mr(mr))
- mlx5_ib_fence_odp_mr(mr);
- else if (is_dmabuf_mr(mr))
- mlx5_ib_fence_dmabuf_mr(mr);
- else
- clean_mr(dev, mr);
-
- if (umem) {
- if (!is_odp_mr(mr))
- atomic_sub(ib_umem_num_pages(umem),
+ if (!is_odp)
+ atomic_sub(ib_umem_num_pages(mr->umem),
&dev->mdev->priv.reg_pages);
- ib_umem_release(umem);
+ ib_umem_release(mr->umem);
+ if (is_odp)
+ mlx5_ib_free_odp_mr(mr);
}
- if (mr->cache_ent)
+ if (mr->cache_ent) {
mlx5_mr_cache_free(dev, mr);
- else
+ } else {
+ mlx5_free_priv_descs(mr);
kfree(mr);
-}
-
-int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
-{
- struct mlx5_ib_mr *mmr = to_mmr(ibmr);
-
- if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
- dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr);
- dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
}
-
- if (is_odp_mr(mmr) && to_ib_umem_odp(mmr->umem)->is_implicit_odp) {
- mlx5_ib_free_implicit_mr(mmr);
- return 0;
- }
-
- dereg_mr(to_mdev(ibmr->device), mmr);
-
return 0;
}
@@ -2177,10 +2170,10 @@ err_free_descs:
destroy_mkey(dev, mr);
mlx5_free_priv_descs(mr);
err_free_mtt_mr:
- dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
+ mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
mr->mtt_mr = NULL;
err_free_klm_mr:
- dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr);
+ mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
mr->klm_mr = NULL;
err_destroy_psv:
if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index b103555b1f5d..782b2af8f211 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -181,64 +181,29 @@ void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
}
}
-static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
-{
- struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
-
- /* Ensure mlx5_ib_invalidate_range() will not touch the MR any more */
- mutex_lock(&odp->umem_mutex);
- if (odp->npages) {
- mlx5_mr_cache_invalidate(mr);
- ib_umem_odp_unmap_dma_pages(odp, ib_umem_start(odp),
- ib_umem_end(odp));
- WARN_ON(odp->npages);
- }
- odp->private = NULL;
- mutex_unlock(&odp->umem_mutex);
-
- if (!mr->cache_ent) {
- mlx5_core_destroy_mkey(mr_to_mdev(mr)->mdev, &mr->mmkey);
- WARN_ON(mr->descs);
- }
-}
-
/*
* This must be called after the mr has been removed from implicit_children.
* NOTE: The MR does not necessarily have to be
* empty here, parallel page faults could have raced with the free process and
* added pages to it.
*/
-static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
+static void free_implicit_child_mr_work(struct work_struct *work)
{
+ struct mlx5_ib_mr *mr =
+ container_of(work, struct mlx5_ib_mr, odp_destroy.work);
struct mlx5_ib_mr *imr = mr->parent;
struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
mlx5r_deref_wait_odp_mkey(&mr->mmkey);
- if (need_imr_xlt) {
- mutex_lock(&odp_imr->umem_mutex);
- mlx5_ib_update_xlt(mr->parent, idx, 1, 0,
- MLX5_IB_UPD_XLT_INDIRECT |
- MLX5_IB_UPD_XLT_ATOMIC);
- mutex_unlock(&odp_imr->umem_mutex);
- }
-
- dma_fence_odp_mr(mr);
-
- mr->parent = NULL;
- mlx5_mr_cache_free(mr_to_mdev(mr), mr);
- ib_umem_odp_release(odp);
-}
-
-static void free_implicit_child_mr_work(struct work_struct *work)
-{
- struct mlx5_ib_mr *mr =
- container_of(work, struct mlx5_ib_mr, odp_destroy.work);
- struct mlx5_ib_mr *imr = mr->parent;
+ mutex_lock(&odp_imr->umem_mutex);
+ mlx5_ib_update_xlt(mr->parent, ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT,
+ 1, 0,
+ MLX5_IB_UPD_XLT_INDIRECT | MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&odp_imr->umem_mutex);
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
- free_implicit_child_mr(mr, true);
mlx5r_deref_odp_mkey(&imr->mmkey);
}
@@ -352,7 +317,7 @@ const struct mmu_interval_notifier_ops mlx5_mn_ops = {
.invalidate = mlx5_ib_invalidate_range,
};
-void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
+static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
struct ib_odp_caps *caps = &dev->odp_caps;
@@ -455,8 +420,10 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
ret = mr = mlx5_mr_cache_alloc(
mr_to_mdev(imr), MLX5_IMR_MTT_CACHE_ENTRY, imr->access_flags);
- if (IS_ERR(mr))
- goto out_umem;
+ if (IS_ERR(mr)) {
+ ib_umem_odp_release(odp);
+ return mr;
+ }
mr->ibmr.pd = imr->ibmr.pd;
mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;
@@ -506,9 +473,7 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
out_lock:
xa_unlock(&imr->implicit_children);
out_mr:
- mlx5_mr_cache_free(mr_to_mdev(imr), mr);
-out_umem:
- ib_umem_odp_release(odp);
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
return ret;
}
@@ -531,8 +496,8 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY, access_flags);
if (IS_ERR(imr)) {
- err = PTR_ERR(imr);
- goto out_umem;
+ ib_umem_odp_release(umem_odp);
+ return imr;
}
imr->ibmr.pd = &pd->ibpd;
@@ -562,93 +527,22 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
return imr;
out_mr:
mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
- mlx5_mr_cache_free(dev, imr);
-out_umem:
- ib_umem_odp_release(umem_odp);
+ mlx5_ib_dereg_mr(&imr->ibmr, NULL);
return ERR_PTR(err);
}
-void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
+void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr)
{
- struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
- struct mlx5_ib_dev *dev = mr_to_mdev(imr);
struct mlx5_ib_mr *mtt;
unsigned long idx;
- xa_erase(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key));
- /*
- * All work on the prefetch list must be completed, xa_erase() prevented
- * new work from being created.
- */
- mlx5r_deref_wait_odp_mkey(&imr->mmkey);
- /*
- * At this point it is forbidden for any other thread to enter
- * pagefault_mr() on this imr. It is already forbidden to call
- * pagefault_mr() on an implicit child. Due to this additions to
- * implicit_children are prevented.
- * In addition, any new call to destroy_unused_implicit_child_mr()
- * may return immediately.
- */
-
/*
- * Fence the imr before we destroy the children. This allows us to
- * skip updating the XLT of the imr during destroy of the child mkey
- * the imr points to.
+ * If this is an implicit MR it is already invalidated so we can just
+ * delete the children mkeys.
*/
- mlx5_mr_cache_invalidate(imr);
-
- xa_for_each(&imr->implicit_children, idx, mtt) {
- xa_erase(&imr->implicit_children, idx);
- free_implicit_child_mr(mtt, false);
- }
-
- mlx5_mr_cache_free(dev, imr);
- ib_umem_odp_release(odp_imr);
-}
-
-/**
- * mlx5_ib_fence_odp_mr - Stop all access to the ODP MR
- * @mr: to fence
- *
- * On return no parallel threads will be touching this MR and no DMA will be
- * active.
- */
-void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
-{
- /* Prevent new page faults and prefetch requests from succeeding */
- xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
-
- /* Wait for all running page-fault handlers to finish. */
- mlx5r_deref_wait_odp_mkey(&mr->mmkey);
-
- dma_fence_odp_mr(mr);
-}
-
-/**
- * mlx5_ib_fence_dmabuf_mr - Stop all access to the dmabuf MR
- * @mr: to fence
- *
- * On return no parallel threads will be touching this MR and no DMA will be
- * active.
- */
-void mlx5_ib_fence_dmabuf_mr(struct mlx5_ib_mr *mr)
-{
- struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
-
- /* Prevent new page faults and prefetch requests from succeeding */
- xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
-
- mlx5r_deref_wait_odp_mkey(&mr->mmkey);
-
- dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
- mlx5_mr_cache_invalidate(mr);
- umem_dmabuf->private = NULL;
- ib_umem_dmabuf_unmap_pages(umem_dmabuf);
- dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
-
- if (!mr->cache_ent) {
- mlx5_core_destroy_mkey(mr_to_mdev(mr)->mdev, &mr->mmkey);
- WARN_ON(mr->descs);
+ xa_for_each(&mr->implicit_children, idx, mtt) {
+ xa_erase(&mr->implicit_children, idx);
+ mlx5_ib_dereg_mr(&mtt->ibmr, NULL);
}
}
@@ -1637,20 +1531,24 @@ enum {
MLX5_IB_NUM_PF_DRAIN = 64,
};
-static int
-mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
+int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
{
struct mlx5_eq_param param = {};
- int err;
+ int err = 0;
+ mutex_lock(&dev->odp_eq_mutex);
+ if (eq->core)
+ goto unlock;
INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
spin_lock_init(&eq->lock);
eq->dev = dev;
eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
sizeof(struct mlx5_pagefault));
- if (!eq->pool)
- return -ENOMEM;
+ if (!eq->pool) {
+ err = -ENOMEM;
+ goto unlock;
+ }
eq->wq = alloc_workqueue("mlx5_ib_page_fault",
WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
@@ -1661,7 +1559,7 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
}
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
- param = (struct mlx5_eq_param) {
+ param = (struct mlx5_eq_param){
.irq_index = 0,
.nent = MLX5_IB_NUM_PF_EQE,
};
@@ -1677,21 +1575,27 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
goto err_eq;
}
+ mutex_unlock(&dev->odp_eq_mutex);
return 0;
err_eq:
mlx5_eq_destroy_generic(dev->mdev, eq->core);
err_wq:
+ eq->core = NULL;
destroy_workqueue(eq->wq);
err_mempool:
mempool_destroy(eq->pool);
+unlock:
+ mutex_unlock(&dev->odp_eq_mutex);
return err;
}
static int
-mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
+mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
{
int err;
+ if (!eq->core)
+ return 0;
mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
cancel_work_sync(&eq->work);
@@ -1735,6 +1639,8 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{
int ret = 0;
+ internal_fill_odp_caps(dev);
+
if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
return ret;
@@ -1748,8 +1654,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
}
}
- ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
-
+ mutex_init(&dev->odp_eq_mutex);
return ret;
}
@@ -1758,7 +1663,7 @@ void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
return;
- mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
+ mlx5_ib_odp_destroy_eq(dev, &dev->odp_pf_eq);
}
int mlx5_ib_odp_init(void)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index f5a52a6fae43..9282eb10bfae 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -67,7 +67,7 @@ struct mlx5_modify_raw_qp_param {
struct mlx5_rate_limit rl;
u8 rq_q_ctr_id;
- u16 port;
+ u32 port;
};
static void get_cqs(enum ib_qp_type qp_type,
@@ -3146,6 +3146,19 @@ enum {
MLX5_PATH_FLAG_COUNTER = 1 << 2,
};
+static int mlx5_to_ib_rate_map(u8 rate)
+{
+ static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS,
+ IB_RATE_25_GBPS, IB_RATE_100_GBPS,
+ IB_RATE_200_GBPS, IB_RATE_50_GBPS,
+ IB_RATE_400_GBPS };
+
+ if (rate < ARRAY_SIZE(rates))
+ return rates[rate];
+
+ return rate - MLX5_STAT_RATE_OFFSET;
+}
+
static int ib_to_mlx5_rate_map(u8 rate)
{
switch (rate) {
@@ -4485,7 +4498,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
static_rate = MLX5_GET(ads, path, stat_rate);
- rdma_ah_set_static_rate(ah_attr, static_rate ? static_rate - 5 : 0);
+ rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate));
if (MLX5_GET(ads, path, grh) ||
ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
diff --git a/drivers/infiniband/hw/mlx5/std_types.c b/drivers/infiniband/hw/mlx5/std_types.c
index 16145fda68d0..c0ddf7b3c6e2 100644
--- a/drivers/infiniband/hw/mlx5/std_types.c
+++ b/drivers/infiniband/hw/mlx5/std_types.c
@@ -7,6 +7,8 @@
#include <rdma/mlx5_user_ioctl_cmds.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/eswitch.h>
+#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
#define UVERBS_MODULE_NAME mlx5_ib
@@ -23,6 +25,174 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_PD_QUERY)(
&mpd->pdn, sizeof(mpd->pdn));
}
+static int fill_vport_icm_addr(struct mlx5_core_dev *mdev, u16 vport,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
+ bool sw_owner_supp;
+ u64 icm_rx;
+ u64 icm_tx;
+ int err;
+
+ sw_owner_supp = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner) ||
+ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
+
+ if (vport == MLX5_VPORT_UPLINK) {
+ icm_rx = MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_uplink_icm_address_rx);
+ icm_tx = MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_uplink_icm_address_tx);
+ } else {
+ MLX5_SET(query_esw_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
+ MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
+ MLX5_SET(query_esw_vport_context_in, in, other_vport, true);
+
+ err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in,
+ out);
+
+ if (err)
+ return err;
+
+ icm_rx = MLX5_GET64(
+ query_esw_vport_context_out, out,
+ esw_vport_context.sw_steering_vport_icm_address_rx);
+
+ icm_tx = MLX5_GET64(
+ query_esw_vport_context_out, out,
+ esw_vport_context.sw_steering_vport_icm_address_tx);
+ }
+
+ if (sw_owner_supp && icm_rx) {
+ info->vport_steering_icm_rx = icm_rx;
+ info->flags |=
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_RX;
+ }
+
+ if (sw_owner_supp && icm_tx) {
+ info->vport_steering_icm_tx = icm_tx;
+ info->flags |=
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_TX;
+ }
+
+ return 0;
+}
+
+static int fill_vport_vhca_id(struct mlx5_core_dev *mdev, u16 vport,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ size_t out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+ void *out;
+ int err;
+
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, other_function, true);
+ MLX5_SET(query_hca_cap_in, in, function_id, vport);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
+ HCA_CAP_OPMOD_GET_CUR);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto out;
+
+ info->vport_vhca_id = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.vhca_id);
+
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
+out:
+ kfree(out);
+ return err;
+}
+
+static int fill_switchdev_info(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_eswitch_rep *rep;
+ int err;
+
+ rep = dev->port[port_num - 1].rep;
+ if (!rep)
+ return -EOPNOTSUPP;
+
+ info->vport = rep->vport;
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT;
+
+ if (rep->vport != MLX5_VPORT_UPLINK) {
+ err = fill_vport_vhca_id(mdev, rep->vport, info);
+ if (err)
+ return err;
+ }
+
+ info->esw_owner_vhca_id = MLX5_CAP_GEN(mdev, vhca_id);
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID;
+
+ err = fill_vport_icm_addr(mdev, rep->vport, info);
+ if (err)
+ return err;
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(mdev->priv.eswitch)) {
+ info->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(
+ mdev->priv.eswitch, rep->vport);
+ info->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_REG_C0;
+ }
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_QUERY_PORT)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_uapi_query_port info = {};
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ u32 port_num;
+ int ret;
+
+ if (uverbs_copy_from(&port_num, attrs,
+ MLX5_IB_ATTR_QUERY_PORT_PORT_NUM))
+ return -EFAULT;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ if (!rdma_is_port_valid(&dev->ib_dev, port_num))
+ return -EINVAL;
+
+ if (mlx5_eswitch_mode(dev->mdev) == MLX5_ESWITCH_OFFLOADS) {
+ ret = fill_switchdev_info(dev, port_num, &info);
+ if (ret)
+ return ret;
+ }
+
+ return uverbs_copy_to_struct_or_zero(attrs, MLX5_IB_ATTR_QUERY_PORT, &info,
+ sizeof(info));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_QUERY_PORT,
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_QUERY_PORT_PORT_NUM,
+ UVERBS_ATTR_TYPE(u32), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_QUERY_PORT,
+ UVERBS_ATTR_STRUCT(struct mlx5_ib_uapi_query_port,
+ reg_c0),
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(mlx5_ib_device,
+ UVERBS_OBJECT_DEVICE,
+ &UVERBS_METHOD(MLX5_IB_METHOD_QUERY_PORT));
+
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_PD_QUERY,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_PD_HANDLE,
@@ -41,5 +211,8 @@ const struct uapi_definition mlx5_ib_std_types_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE(
UVERBS_OBJECT_PD,
&mlx5_ib_pd),
+ UAPI_DEF_CHAIN_OBJ_TREE(
+ UVERBS_OBJECT_DEVICE,
+ &mlx5_ib_device),
{},
};
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index f051f4e06b53..3df1f5ff7932 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -91,7 +91,7 @@ static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate)
}
}
-enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port)
+enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u32 port)
{
if (mthca_is_memfree(dev)) {
/* Handle old Arbel FW */
@@ -131,7 +131,7 @@ static u8 ib_rate_to_tavor(u8 static_rate)
}
}
-u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port)
+u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u32 port)
{
u8 rate;
@@ -293,7 +293,7 @@ int mthca_ah_query(struct ib_ah *ibah, struct rdma_ah_attr *attr)
{
struct mthca_ah *ah = to_mah(ibah);
struct mthca_dev *dev = to_mdev(ibah->device);
- u8 port_num = be32_to_cpu(ah->av->port_pd) >> 24;
+ u32 port_num = be32_to_cpu(ah->av->port_pd) >> 24;
/* Only implement for MAD and memfree ah for now. */
if (ah->type == MTHCA_AH_ON_HCA)
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index a445160de3e1..a4a9d871d00e 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -546,7 +546,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
enum ib_sig_type send_policy,
struct ib_qp_cap *cap,
int qpn,
- int port,
+ u32 port,
struct mthca_qp *qp,
struct ib_udata *udata);
void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp);
@@ -559,13 +559,13 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
struct ib_ud_header *header);
int mthca_ah_query(struct ib_ah *ibah, struct rdma_ah_attr *attr);
int mthca_ah_grh_present(struct mthca_ah *ah);
-u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port);
-enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port);
+u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u32 port);
+enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u32 port);
int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 99aa8183a7f2..04252700790e 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -162,7 +162,7 @@ static void node_desc_override(struct ib_device *dev,
}
static void forward_trap(struct mthca_dev *dev,
- u8 port_num,
+ u32 port_num,
const struct ib_mad *mad)
{
int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
@@ -196,7 +196,7 @@ static void forward_trap(struct mthca_dev *dev,
}
}
-int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index)
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 1a3dd07f993b..522bb606120e 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -127,7 +127,7 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
}
static int mthca_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
+ u32 port, struct ib_port_attr *props)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
@@ -194,7 +194,7 @@ static int mthca_modify_device(struct ib_device *ibdev,
}
static int mthca_modify_port(struct ib_device *ibdev,
- u8 port, int port_modify_mask,
+ u32 port, int port_modify_mask,
struct ib_port_modify *props)
{
struct mthca_set_ib_param set_ib;
@@ -223,7 +223,7 @@ out:
}
static int mthca_query_pkey(struct ib_device *ibdev,
- u8 port, u16 index, u16 *pkey)
+ u32 port, u16 index, u16 *pkey)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
@@ -251,7 +251,7 @@ static int mthca_query_pkey(struct ib_device *ibdev,
return err;
}
-static int mthca_query_gid(struct ib_device *ibdev, u8 port,
+static int mthca_query_gid(struct ib_device *ibdev, u32 port,
int index, union ib_gid *gid)
{
struct ib_smp *in_mad = NULL;
@@ -1051,7 +1051,7 @@ out:
return err;
}
-static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int mthca_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 07cfc0934b17..69bba0ef4a5d 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1370,7 +1370,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
enum ib_sig_type send_policy,
struct ib_qp_cap *cap,
int qpn,
- int port,
+ u32 port,
struct mthca_qp *qp,
struct ib_udata *udata)
{
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 699a8b719ed6..88c45928301f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -250,7 +250,7 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
}
int ocrdma_process_mad(struct ib_device *ibdev, int process_mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
+ u32 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const struct ib_mad *in,
struct ib_mad *out, size_t *out_mad_size,
u16 *out_mad_pkey_index)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 35cf2e2ff391..2626679df31d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -57,7 +57,7 @@ int ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int ocrdma_process_mad(struct ib_device *dev, int process_mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
+ u32 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const struct ib_mad *in,
struct ib_mad *out, size_t *out_mad_size,
u16 *out_mad_pkey_index);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 9a834a9cca0e..4882b3156edb 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -77,12 +77,12 @@ void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
guid[7] = mac_addr[5];
}
static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
-static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int ocrdma_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 3acb5c10b155..58619ce64d0d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -54,7 +54,7 @@
#include "ocrdma_verbs.h"
#include <rdma/ocrdma-abi.h>
-int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
{
if (index > 0)
return -EINVAL;
@@ -150,7 +150,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
}
int ocrdma_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
+ u32 port, struct ib_port_attr *props)
{
enum ib_port_state port_state;
struct ocrdma_dev *dev;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 425d554e7f3f..b1c5fad81603 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -53,13 +53,14 @@ int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags);
int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props,
struct ib_udata *uhw);
-int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
+int ocrdma_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props);
enum rdma_protocol_type
-ocrdma_query_protocol(struct ib_device *device, u8 port_num);
+ocrdma_query_protocol(struct ib_device *device, u32 port_num);
void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
-int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
+int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 8e7c069e1a2d..8334a9850220 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -53,7 +53,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define QEDR_WQ_MULTIPLIER_DFT (3)
-static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
+static void qedr_ib_dispatch_event(struct qedr_dev *dev, u32 port_num,
enum ib_event_type type)
{
struct ib_event ibev;
@@ -66,7 +66,7 @@ static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
}
static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
@@ -81,7 +81,7 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
(fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
}
-static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int qedr_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -100,7 +100,7 @@ static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int qedr_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index c4bc58736e48..1715fbe0719d 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -636,8 +636,10 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
- &qp->iwarp_cm_flags))
+ &qp->iwarp_cm_flags)) {
+ rc = -ENODEV;
goto err; /* QP already being destroyed */
+ }
rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
if (rc) {
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 9ea542270ed4..fdc47ef7d861 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -72,7 +72,7 @@ static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
return ib_copy_to_udata(udata, src, min_len);
}
-int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
{
if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
return -EINVAL;
@@ -81,7 +81,7 @@ int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
return 0;
}
-int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
+int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
int index, union ib_gid *sgid)
{
struct qedr_dev *dev = get_qedr_dev(ibdev);
@@ -210,7 +210,8 @@ static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
}
}
-int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
+int qedr_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *attr)
{
struct qedr_dev *dev;
struct qed_rdma_port *rdma_port;
@@ -4483,7 +4484,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
}
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
+ u32 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const struct ib_mad *in,
struct ib_mad *out_mad, size_t *out_mad_size,
u16 *out_mad_pkey_index)
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 2672c32bc2f7..34ad47515861 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -34,12 +34,13 @@
int qedr_query_device(struct ib_device *ibdev,
struct ib_device_attr *attr, struct ib_udata *udata);
-int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
+int qedr_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props);
-int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
+int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
int index, union ib_gid *gid);
-int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
+int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
@@ -92,11 +93,11 @@ int qedr_post_send(struct ib_qp *, const struct ib_send_wr *,
int qedr_post_recv(struct ib_qp *, const struct ib_recv_wr *,
const struct ib_recv_wr **bad_wr);
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
+ u32 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const struct ib_mad *in_mad,
struct ib_mad *out_mad, size_t *out_mad_size,
u16 *out_mad_pkey_index);
-int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
+int qedr_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable);
#endif
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index ee211423058a..88497739029e 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -630,7 +630,7 @@ struct qib_pportdata {
u8 rx_pol_inv;
u8 hw_pidx; /* physical port index */
- u8 port; /* IB port number and index into dd->pports - 1 */
+ u32 port; /* IB port number and index into dd->pports - 1 */
u8 delay_mult;
@@ -1200,10 +1200,10 @@ static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
return container_of(ibp, struct qib_pportdata, ibport_data);
}
-static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
+static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u32 port)
{
struct qib_devdata *dd = dd_from_ibdev(ibdev);
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+ u32 pidx = port - 1; /* IB number port from 1, hdw from 0 */
WARN_ON(pidx >= dd->num_pports);
return &dd->pport[pidx].ibport_data;
@@ -1303,11 +1303,6 @@ int qib_sdma_verbs_send(struct qib_pportdata *, struct rvt_sge_state *,
/* ppd->sdma_lock should be locked before calling this. */
int qib_sdma_make_progress(struct qib_pportdata *dd);
-static inline int qib_sdma_empty(const struct qib_pportdata *ppd)
-{
- return ppd->sdma_descq_added == ppd->sdma_descq_removed;
-}
-
/* must be called under qib_sdma_lock */
static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
{
@@ -1364,27 +1359,6 @@ static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
*((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */
}
-static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
-{
- const struct qib_devdata *dd = rcd->dd;
- u32 hdrqtail;
-
- if (dd->flags & QIB_NODMA_RTAIL) {
- __le32 *rhf_addr;
- u32 seq;
-
- rhf_addr = (__le32 *) rcd->rcvhdrq +
- rcd->head + dd->rhf_offset;
- seq = qib_hdrget_seq(rhf_addr);
- hdrqtail = rcd->head;
- if (seq == rcd->seq_cnt)
- hdrqtail++;
- } else
- hdrqtail = qib_get_rcvhdrtail(rcd);
-
- return hdrqtail;
-}
-
/*
* sysfs interface.
*/
@@ -1395,7 +1369,7 @@ extern const struct attribute_group qib_attr_group;
int qib_device_create(struct qib_devdata *);
void qib_device_remove(struct qib_devdata *);
-int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
+int qib_create_port_files(struct ib_device *ibdev, u32 port_num,
struct kobject *kobj);
void qib_verbs_unregister_sysfs(struct qib_devdata *);
/* Hook for sysfs read of QSFP */
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
index f91f23e02283..cf652831d8e7 100644
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -795,11 +795,4 @@ static inline __u32 qib_hdrget_use_egr_buf(const __le32 *rbuf)
{
return __le32_to_cpu(rbuf[0]) & QLOGIC_IB_RHF_L_USE_EGR;
}
-
-static inline __u32 qib_hdrget_qib_ver(__le32 hdrword)
-{
- return (__le32_to_cpu(hdrword) >> QLOGIC_IB_I_VERS_SHIFT) &
- QLOGIC_IB_I_VERS_MASK;
-}
-
#endif /* _QIB_COMMON_H */
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index ff87a67dd7b7..c60e79d214a1 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1758,7 +1758,8 @@ bail:
}
/**
- * unlock_exptid - unlock any expected TID entries context still had in use
+ * unlock_expected_tids - unlock any expected TID entries context still had
+ * in use
* @rcd: ctxt
*
* We don't actually update the chip here, because we do a bulk update
@@ -2247,7 +2248,7 @@ static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (!iter_is_iovec(from) || !from->nr_segs || !pq)
return -EINVAL;
-
+
return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs);
}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index e336d778e076..a0c5f3bdc324 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -427,79 +427,21 @@ bail:
return ret;
}
-static int remove_file(struct dentry *parent, char *name)
-{
- struct dentry *tmp;
- int ret;
-
- tmp = lookup_one_len(name, parent, strlen(name));
-
- if (IS_ERR(tmp)) {
- ret = PTR_ERR(tmp);
- goto bail;
- }
-
- spin_lock(&tmp->d_lock);
- if (simple_positive(tmp)) {
- __d_drop(tmp);
- spin_unlock(&tmp->d_lock);
- simple_unlink(d_inode(parent), tmp);
- } else {
- spin_unlock(&tmp->d_lock);
- }
- dput(tmp);
-
- ret = 0;
-bail:
- /*
- * We don't expect clients to care about the return value, but
- * it's there if they need it.
- */
- return ret;
-}
-
static int remove_device_files(struct super_block *sb,
struct qib_devdata *dd)
{
- struct dentry *dir, *root;
+ struct dentry *dir;
char unit[10];
- int ret, i;
- root = dget(sb->s_root);
- inode_lock(d_inode(root));
snprintf(unit, sizeof(unit), "%u", dd->unit);
- dir = lookup_one_len(unit, root, strlen(unit));
+ dir = lookup_one_len_unlocked(unit, sb->s_root, strlen(unit));
if (IS_ERR(dir)) {
- ret = PTR_ERR(dir);
pr_err("Lookup of %s failed\n", unit);
- goto bail;
+ return PTR_ERR(dir);
}
-
- inode_lock(d_inode(dir));
- remove_file(dir, "counters");
- remove_file(dir, "counter_names");
- remove_file(dir, "portcounter_names");
- for (i = 0; i < dd->num_pports; i++) {
- char fname[24];
-
- sprintf(fname, "port%dcounters", i + 1);
- remove_file(dir, fname);
- if (dd->flags & QIB_HAS_QSFP) {
- sprintf(fname, "qsfp%d", i + 1);
- remove_file(dir, fname);
- }
- }
- remove_file(dir, "flash");
- inode_unlock(d_inode(dir));
- ret = simple_rmdir(d_inode(root), dir);
- d_drop(dir);
- dput(dir);
-
-bail:
- inode_unlock(d_inode(root));
- dput(root);
- return ret;
+ simple_recursive_removal(dir, NULL);
+ return 0;
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index b35e1174be22..a9b83bc13f4a 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2609,7 +2609,7 @@ static void qib_chk_6120_errormask(struct qib_devdata *dd)
}
/**
- * qib_get_faststats - get word counters from chip before they overflow
+ * qib_get_6120_faststats - get word counters from chip before they overflow
* @t: contains a pointer to the qlogic_ib device qib_devdata
*
* This needs more work; in particular, decision on whether we really
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 229dcd6ead95..d1c0bc31869f 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2236,7 +2236,7 @@ static void qib_7220_tidtemplate(struct qib_devdata *dd)
}
/**
- * qib_init_7220_get_base_info - set chip-specific flags for user code
+ * qib_7220_get_base_info - set chip-specific flags for user code
* @rcd: the qlogic_ib ctxt
* @kinfo: qib_base_info pointer
*
@@ -4411,7 +4411,7 @@ static void writescratch(struct qib_devdata *dd, u32 val)
#define VALID_TS_RD_REG_MASK 0xBF
/**
- * qib_7220_tempsense_read - read register of temp sensor via TWSI
+ * qib_7220_tempsense_rd - read register of temp sensor via TWSI
* @dd: the qlogic_ib device
* @regnum: register to read from
*
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 9fe6ea75b45e..ab98b6a3ae1e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -791,28 +791,6 @@ static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
}
/**
- * qib_read_ureg - read virtualized per-context register
- * @dd: device
- * @regno: register number
- * @ctxt: context number
- *
- * Return the contents of a register that is virtualized to be per context.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u64 qib_read_ureg(const struct qib_devdata *dd,
- enum qib_ureg regno, int ctxt)
-{
-
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readq(regno + (u64 __iomem *)(
- (dd->ureg_align * ctxt) + (dd->userbase ?
- (char __iomem *)dd->userbase :
- (char __iomem *)dd->kregbase + dd->uregbase)));
-}
-
-/**
* qib_write_ureg - write virtualized per-context register
* @dd: device
* @regno: register number
@@ -2513,7 +2491,7 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
}
/**
- * qib_7322_quiet_serdes - set serdes to txidle
+ * qib_7322_mini_quiet_serdes - set serdes to txidle
* @ppd: the qlogic_ib device
* Called when driver is being unloaded
*/
@@ -3859,7 +3837,7 @@ static void qib_7322_tidtemplate(struct qib_devdata *dd)
}
/**
- * qib_init_7322_get_base_info - set chip-specific flags for user code
+ * qib_7322_get_base_info - set chip-specific flags for user code
* @rcd: the qlogic_ib ctxt
* @kinfo: qib_base_info pointer
*
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 43c8ee1f46e0..b5a78576c48b 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1609,7 +1609,7 @@ bail:
}
/**
- * allocate eager buffers, both kernel and user contexts.
+ * qib_setup_eagerbufs - allocate eager buffers, both kernel and user contexts.
* @rcd: the context we are setting up.
*
* Allocate the eager TID buffers and program them into hip.
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 44e2f813024a..ef02f2bfddb2 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -203,7 +203,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
/*
* Send a Port Capability Mask Changed trap (ch. 14.3.11).
*/
-void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
+void qib_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num)
{
struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
struct qib_devdata *dd = dd_from_dev(ibdev);
@@ -2360,7 +2360,7 @@ static int process_cc(struct ib_device *ibdev, int mad_flags,
*
* This is called by the ib_mad module.
*/
-int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+int qib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index)
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index ca39a029e4af..1974ceb9d405 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -125,7 +125,7 @@ static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
* zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
*/
int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port)
+ enum ib_qp_type type, u32 port)
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
@@ -136,7 +136,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
u16 qpt_mask = dd->qpn_mask;
if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
- unsigned n;
+ u32 n;
ret = type == IB_QPT_GSI;
n = 1 << (ret + 2 * (port - 1));
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 4f4a09c2dbcd..81b810d006c0 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -687,7 +687,6 @@ static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
return -1;
}
- ret = 0;
for (tries = EPB_TRANS_TRIES; tries; --tries) {
transval = qib_read_kreg32(dd, trans);
if (transval & EPB_TRANS_RDY)
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 62c179fc764b..5e9e66f27064 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -728,7 +728,7 @@ const struct attribute_group qib_attr_group = {
.attrs = qib_attributes,
};
-int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
+int qib_create_port_files(struct ib_device *ibdev, u32 port_num,
struct kobject *kobj)
{
struct qib_pportdata *ppd;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 8e0de265ad57..d17d034ecdfd 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1188,7 +1188,7 @@ full:
}
}
-static int qib_query_port(struct rvt_dev_info *rdi, u8 port_num,
+static int qib_query_port(struct rvt_dev_info *rdi, u32 port_num,
struct ib_port_attr *props)
{
struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
@@ -1273,7 +1273,7 @@ bail:
return ret;
}
-static int qib_shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
+static int qib_shut_down_port(struct rvt_dev_info *rdi, u32 port_num)
{
struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
struct qib_devdata *dd = dd_from_dev(ibdev);
@@ -1342,7 +1342,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
struct rvt_qp *qp0;
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_devdata *dd = dd_from_ppd(ppd);
- u8 port_num = ppd->port;
+ u32 port_num = ppd->port;
memset(&attr, 0, sizeof(attr));
attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index dc0e81f3b6f4..07548fac1d8e 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -239,10 +239,10 @@ static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
void qib_bad_pkey(struct qib_ibport *ibp, u32 key, u32 sl,
u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
-void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num);
+void qib_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num);
void qib_sys_guid_chg(struct qib_ibport *ibp);
void qib_node_desc_chg(struct qib_ibport *ibp);
-int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int qib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index);
@@ -273,7 +273,7 @@ void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void qib_notify_qp_reset(struct rvt_qp *qp);
int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port);
+ enum ib_qp_type type, u32 port);
void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 1b63a491fa72..ff6a40e259d5 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -303,7 +303,7 @@ static struct notifier_block usnic_ib_inetaddr_notifier = {
};
/* End of inet section*/
-static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int usnic_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 3705c6b8b223..57d210ca855a 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -270,7 +270,7 @@ static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
/* Start of ib callback functions */
enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
@@ -332,7 +332,7 @@ int usnic_ib_query_device(struct ib_device *ibdev,
return 0;
}
-int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
+int usnic_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
@@ -420,7 +420,7 @@ err_out:
return err;
}
-int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 11fe1ba6bbc9..6b82d0f2d184 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -37,16 +37,16 @@
#include "usnic_ib.h"
enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
- u8 port_num);
+ u32 port_num);
int usnic_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw);
-int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
+int usnic_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid);
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index de57f2fed743..763ddc6f25d1 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -344,11 +344,6 @@ static inline enum ib_port_state pvrdma_port_state_to_ib(
return (enum ib_port_state)state;
}
-static inline int ib_port_cap_flags_to_pvrdma(int flags)
-{
- return flags & PVRDMA_MASK(PVRDMA_PORT_CAP_FLAGS_MAX);
-}
-
static inline int pvrdma_port_cap_flags_to_ib(int flags)
{
return flags;
@@ -410,11 +405,6 @@ static inline enum pvrdma_qp_type ib_qp_type_to_pvrdma(enum ib_qp_type type)
return (enum pvrdma_qp_type)type;
}
-static inline enum ib_qp_type pvrdma_qp_type_to_ib(enum pvrdma_qp_type type)
-{
- return (enum ib_qp_type)type;
-}
-
static inline enum pvrdma_qp_state ib_qp_state_to_pvrdma(enum ib_qp_state state)
{
return (enum pvrdma_qp_state)state;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 4b6019e7de67..6bf2d2e47d07 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -121,7 +121,7 @@ static int pvrdma_init_device(struct pvrdma_dev *dev)
return 0;
}
-static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int pvrdma_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct pvrdma_dev *dev = to_vdev(ibdev);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 1d3bdd7bb51d..67769b715126 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -882,7 +882,7 @@ out:
}
/**
- * pvrdma_post_receive - post receive work request entries on a QP
+ * pvrdma_post_recv - post receive work request entries on a QP
* @ibqp: the QP
* @wr: the work request list to post
* @bad_wr: the first bad WR returned
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index fc412cbfd042..19176583dbde 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -125,7 +125,7 @@ int pvrdma_query_device(struct ib_device *ibdev,
*
* @return: 0 on success, otherwise negative errno
*/
-int pvrdma_query_port(struct ib_device *ibdev, u8 port,
+int pvrdma_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct pvrdma_dev *dev = to_vdev(ibdev);
@@ -183,7 +183,7 @@ int pvrdma_query_port(struct ib_device *ibdev, u8 port,
*
* @return: 0 on success, otherwise negative errno
*/
-int pvrdma_query_gid(struct ib_device *ibdev, u8 port, int index,
+int pvrdma_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
struct pvrdma_dev *dev = to_vdev(ibdev);
@@ -205,7 +205,7 @@ int pvrdma_query_gid(struct ib_device *ibdev, u8 port, int index,
*
* @return: 0 on success, otherwise negative errno
*/
-int pvrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+int pvrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
int err = 0;
@@ -232,7 +232,7 @@ int pvrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
}
enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
- u8 port)
+ u32 port)
{
return IB_LINK_LAYER_ETHERNET;
}
@@ -274,7 +274,7 @@ int pvrdma_modify_device(struct ib_device *ibdev, int mask,
*
* @return: 0 on success, otherwise negative errno
*/
-int pvrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
+int pvrdma_modify_port(struct ib_device *ibdev, u32 port, int mask,
struct ib_port_modify *props)
{
struct ib_port_attr attr;
@@ -516,7 +516,7 @@ int pvrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct pvrdma_dev *dev = to_vdev(ibah->device);
struct pvrdma_ah *ah = to_vah(ibah);
const struct ib_global_route *grh;
- u8 port_num = rdma_ah_get_port_num(ah_attr);
+ u32 port_num = rdma_ah_get_port_num(ah_attr);
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
return -EINVAL;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index 97ed8f952f6e..544b94d97c3a 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -70,30 +70,6 @@ enum pvrdma_mtu {
PVRDMA_MTU_4096 = 5,
};
-static inline int pvrdma_mtu_enum_to_int(enum pvrdma_mtu mtu)
-{
- switch (mtu) {
- case PVRDMA_MTU_256: return 256;
- case PVRDMA_MTU_512: return 512;
- case PVRDMA_MTU_1024: return 1024;
- case PVRDMA_MTU_2048: return 2048;
- case PVRDMA_MTU_4096: return 4096;
- default: return -1;
- }
-}
-
-static inline enum pvrdma_mtu pvrdma_mtu_int_to_enum(int mtu)
-{
- switch (mtu) {
- case 256: return PVRDMA_MTU_256;
- case 512: return PVRDMA_MTU_512;
- case 1024: return PVRDMA_MTU_1024;
- case 2048: return PVRDMA_MTU_2048;
- case 4096:
- default: return PVRDMA_MTU_4096;
- }
-}
-
enum pvrdma_port_state {
PVRDMA_PORT_NOP = 0,
PVRDMA_PORT_DOWN = 1,
@@ -138,17 +114,6 @@ enum pvrdma_port_width {
PVRDMA_WIDTH_12X = 8,
};
-static inline int pvrdma_width_enum_to_int(enum pvrdma_port_width width)
-{
- switch (width) {
- case PVRDMA_WIDTH_1X: return 1;
- case PVRDMA_WIDTH_4X: return 4;
- case PVRDMA_WIDTH_8X: return 8;
- case PVRDMA_WIDTH_12X: return 12;
- default: return -1;
- }
-}
-
enum pvrdma_port_speed {
PVRDMA_SPEED_SDR = 1,
PVRDMA_SPEED_DDR = 2,
@@ -383,17 +348,17 @@ enum pvrdma_access_flags {
int pvrdma_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *udata);
-int pvrdma_query_port(struct ib_device *ibdev, u8 port,
+int pvrdma_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
-int pvrdma_query_gid(struct ib_device *ibdev, u8 port,
+int pvrdma_query_gid(struct ib_device *ibdev, u32 port,
int index, union ib_gid *gid);
-int pvrdma_query_pkey(struct ib_device *ibdev, u8 port,
+int pvrdma_query_pkey(struct ib_device *ibdev, u32 port,
u16 index, u16 *pkey);
enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
- u8 port);
+ u32 port);
int pvrdma_modify_device(struct ib_device *ibdev, int mask,
struct ib_device_modify *props);
-int pvrdma_modify_port(struct ib_device *ibdev, u8 port,
+int pvrdma_modify_port(struct ib_device *ibdev, u32 port,
int mask, struct ib_port_modify *props);
int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
diff --git a/drivers/infiniband/sw/rdmavt/mad.c b/drivers/infiniband/sw/rdmavt/mad.c
index fa5be13a4394..207bc0ed96ff 100644
--- a/drivers/infiniband/sw/rdmavt/mad.c
+++ b/drivers/infiniband/sw/rdmavt/mad.c
@@ -70,7 +70,7 @@
*
* Return: IB_MAD_RESULT_SUCCESS or error
*/
-int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
struct ib_mad_hdr *out, size_t *out_mad_size,
@@ -82,9 +82,6 @@ int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
* future may choose to implement this but it should not be made into a
* requirement.
*/
- if (ibport_num_to_idx(ibdev, port_num) < 0)
- return -EINVAL;
-
return IB_MAD_RESULT_FAILURE;
}
diff --git a/drivers/infiniband/sw/rdmavt/mad.h b/drivers/infiniband/sw/rdmavt/mad.h
index a9d6eecc3723..1eae5efea4be 100644
--- a/drivers/infiniband/sw/rdmavt/mad.h
+++ b/drivers/infiniband/sw/rdmavt/mad.h
@@ -50,7 +50,7 @@
#include <rdma/rdma_vt.h>
-int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
struct ib_mad_hdr *out, size_t *out_mad_size,
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 8fd0128a9336..12ebe041a5da 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -151,15 +151,12 @@ static int rvt_modify_device(struct ib_device *device,
*
* Return: 0 on success
*/
-static int rvt_query_port(struct ib_device *ibdev, u8 port_num,
+static int rvt_query_port(struct ib_device *ibdev, u32 port_num,
struct ib_port_attr *props)
{
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
struct rvt_ibport *rvp;
- int port_index = ibport_num_to_idx(ibdev, port_num);
-
- if (port_index < 0)
- return -EINVAL;
+ u32 port_index = ibport_num_to_idx(ibdev, port_num);
rvp = rdi->ports[port_index];
/* props being zeroed by the caller, avoid zeroing it here */
@@ -186,16 +183,13 @@ static int rvt_query_port(struct ib_device *ibdev, u8 port_num,
*
* Return: 0 on success
*/
-static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
+static int rvt_modify_port(struct ib_device *ibdev, u32 port_num,
int port_modify_mask, struct ib_port_modify *props)
{
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
struct rvt_ibport *rvp;
int ret = 0;
- int port_index = ibport_num_to_idx(ibdev, port_num);
-
- if (port_index < 0)
- return -EINVAL;
+ u32 port_index = ibport_num_to_idx(ibdev, port_num);
rvp = rdi->ports[port_index];
if (port_modify_mask & IB_PORT_OPA_MASK_CHG) {
@@ -225,7 +219,7 @@ static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
*
* Return: 0 on failure pkey otherwise
*/
-static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
+static int rvt_query_pkey(struct ib_device *ibdev, u32 port_num, u16 index,
u16 *pkey)
{
/*
@@ -235,11 +229,9 @@ static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
* no way to protect against that anyway.
*/
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
- int port_index;
+ u32 port_index;
port_index = ibport_num_to_idx(ibdev, port_num);
- if (port_index < 0)
- return -EINVAL;
if (index >= rvt_get_npkeys(rdi))
return -EINVAL;
@@ -257,12 +249,12 @@ static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
*
* Return: 0 on success
*/
-static int rvt_query_gid(struct ib_device *ibdev, u8 port_num,
+static int rvt_query_gid(struct ib_device *ibdev, u32 port_num,
int guid_index, union ib_gid *gid)
{
struct rvt_dev_info *rdi;
struct rvt_ibport *rvp;
- int port_index;
+ u32 port_index;
/*
* Driver is responsible for updating the guid table. Which will be used
@@ -270,8 +262,6 @@ static int rvt_query_gid(struct ib_device *ibdev, u8 port_num,
* is being done.
*/
port_index = ibport_num_to_idx(ibdev, port_num);
- if (port_index < 0)
- return -EINVAL;
rdi = ib_to_rvt(ibdev);
rvp = rdi->ports[port_index];
@@ -301,16 +291,12 @@ static void rvt_dealloc_ucontext(struct ib_ucontext *context)
return;
}
-static int rvt_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int rvt_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
struct ib_port_attr attr;
- int err, port_index;
-
- port_index = ibport_num_to_idx(ibdev, port_num);
- if (port_index < 0)
- return -EINVAL;
+ int err;
immutable->core_cap_flags = rdi->dparms.core_cap_flags;
diff --git a/drivers/infiniband/sw/rdmavt/vt.h b/drivers/infiniband/sw/rdmavt/vt.h
index d19ff817c2c7..c0fed6510f0b 100644
--- a/drivers/infiniband/sw/rdmavt/vt.h
+++ b/drivers/infiniband/sw/rdmavt/vt.h
@@ -96,16 +96,9 @@
#define __rvt_pr_err_ratelimited(pdev, name, fmt, ...) \
dev_err_ratelimited(&(pdev)->dev, "%s: " fmt, name, ##__VA_ARGS__)
-static inline int ibport_num_to_idx(struct ib_device *ibdev, u8 port_num)
+static inline u32 ibport_num_to_idx(struct ib_device *ibdev, u32 port_num)
{
- struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
- int port_index;
-
- port_index = port_num - 1; /* IB ports start at 1 our arrays at 0 */
- if ((port_index < 0) || (port_index >= rdi->dparms.nports))
- return -EINVAL;
-
- return port_index;
+ return port_num - 1; /* IB ports start at 1 our arrays at 0 */
}
#endif /* DEF_RDMAVT_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index df0d173d6acb..da2e867a1ed9 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -88,7 +88,7 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
type = RXE_NETWORK_TYPE_IPV4;
break;
case RDMA_NETWORK_IPV6:
- type = RXE_NETWORK_TYPE_IPV4;
+ type = RXE_NETWORK_TYPE_IPV6;
break;
default:
/* not reached - checked in rxe_av_chk_attr */
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 17a361b8dbb1..2af26737d32d 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -345,7 +345,7 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, payload_addr(pkt),
- payload_size(pkt), to_mem_obj, NULL);
+ payload_size(pkt), to_mr_obj, NULL);
if (ret)
return COMPST_ERROR;
@@ -365,7 +365,7 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, &atomic_orig,
- sizeof(u64), to_mem_obj, NULL);
+ sizeof(u64), to_mr_obj, NULL);
if (ret)
return COMPST_ERROR;
else
@@ -676,7 +676,6 @@ int rxe_completer(void *arg)
/* there is nothing to retry in this case */
if (!wqe || (wqe->state == wqe_state_posted)) {
- pr_warn("Retry attempted without a valid wqe\n");
ret = -EAGAIN;
goto done;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
index ac9154f0593d..f469fd1c753d 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.c
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
@@ -26,7 +26,7 @@ static const char * const rxe_counter_name[] = {
int rxe_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port, int index)
+ u32 port, int index)
{
struct rxe_dev *dev = to_rdev(ibdev);
unsigned int cnt;
@@ -41,7 +41,7 @@ int rxe_ib_get_hw_stats(struct ib_device *ibdev,
}
struct rdma_hw_stats *rxe_ib_alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num)
+ u32 port_num)
{
BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_name) != RXE_NUM_OF_COUNTERS);
/* We support only per port stats */
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.h b/drivers/infiniband/sw/rxe/rxe_hw_counters.h
index 49ee6f96656d..2f369acb46d7 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.h
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.h
@@ -30,8 +30,8 @@ enum rxe_counters {
};
struct rdma_hw_stats *rxe_ib_alloc_hw_stats(struct ib_device *ibdev,
- u8 port_num);
+ u32 port_num);
int rxe_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
- u8 port, int index);
+ u32 port, int index);
#endif /* RXE_HW_COUNTERS_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 0d758760b9ae..ef8061d2fbe0 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -72,40 +72,37 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
/* rxe_mr.c */
enum copy_direction {
- to_mem_obj,
- from_mem_obj,
+ to_mr_obj,
+ from_mr_obj,
};
-void rxe_mem_init_dma(struct rxe_pd *pd,
- int access, struct rxe_mem *mem);
+void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
-int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
- u64 length, u64 iova, int access, struct ib_udata *udata,
- struct rxe_mem *mr);
+int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
+ int access, struct ib_udata *udata, struct rxe_mr *mr);
-int rxe_mem_init_fast(struct rxe_pd *pd,
- int max_pages, struct rxe_mem *mem);
+int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
-int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
- int length, enum copy_direction dir, u32 *crcp);
+int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
+ enum copy_direction dir, u32 *crcp);
int copy_data(struct rxe_pd *pd, int access,
struct rxe_dma_info *dma, void *addr, int length,
enum copy_direction dir, u32 *crcp);
-void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
+void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
enum lookup_type {
lookup_local,
lookup_remote,
};
-struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
- enum lookup_type type);
+struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
+ enum lookup_type type);
-int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
+int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
-void rxe_mem_cleanup(struct rxe_pool_entry *arg);
+void rxe_mr_cleanup(struct rxe_pool_entry *arg);
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
@@ -116,7 +113,6 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt);
int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc);
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
-struct device *rxe_dma_device(struct rxe_dev *rxe);
int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid);
int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 6e8c41567ba0..9f63947bab12 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -24,16 +24,15 @@ static u8 rxe_get_key(void)
return key;
}
-int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
+int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
{
- switch (mem->type) {
- case RXE_MEM_TYPE_DMA:
+ switch (mr->type) {
+ case RXE_MR_TYPE_DMA:
return 0;
- case RXE_MEM_TYPE_MR:
- if (iova < mem->iova ||
- length > mem->length ||
- iova > mem->iova + mem->length - length)
+ case RXE_MR_TYPE_MR:
+ if (iova < mr->iova || length > mr->length ||
+ iova > mr->iova + mr->length - length)
return -EFAULT;
return 0;
@@ -46,85 +45,83 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
| IB_ACCESS_REMOTE_WRITE \
| IB_ACCESS_REMOTE_ATOMIC)
-static void rxe_mem_init(int access, struct rxe_mem *mem)
+static void rxe_mr_init(int access, struct rxe_mr *mr)
{
- u32 lkey = mem->pelem.index << 8 | rxe_get_key();
+ u32 lkey = mr->pelem.index << 8 | rxe_get_key();
u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
- mem->ibmr.lkey = lkey;
- mem->ibmr.rkey = rkey;
- mem->state = RXE_MEM_STATE_INVALID;
- mem->type = RXE_MEM_TYPE_NONE;
- mem->map_shift = ilog2(RXE_BUF_PER_MAP);
+ mr->ibmr.lkey = lkey;
+ mr->ibmr.rkey = rkey;
+ mr->state = RXE_MR_STATE_INVALID;
+ mr->type = RXE_MR_TYPE_NONE;
+ mr->map_shift = ilog2(RXE_BUF_PER_MAP);
}
-void rxe_mem_cleanup(struct rxe_pool_entry *arg)
+void rxe_mr_cleanup(struct rxe_pool_entry *arg)
{
- struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem);
+ struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
int i;
- ib_umem_release(mem->umem);
+ ib_umem_release(mr->umem);
- if (mem->map) {
- for (i = 0; i < mem->num_map; i++)
- kfree(mem->map[i]);
+ if (mr->map) {
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
- kfree(mem->map);
+ kfree(mr->map);
}
}
-static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf)
+static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
{
int i;
int num_map;
- struct rxe_map **map = mem->map;
+ struct rxe_map **map = mr->map;
num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
- mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
- if (!mem->map)
+ mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
+ if (!mr->map)
goto err1;
for (i = 0; i < num_map; i++) {
- mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
- if (!mem->map[i])
+ mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
+ if (!mr->map[i])
goto err2;
}
BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
- mem->map_shift = ilog2(RXE_BUF_PER_MAP);
- mem->map_mask = RXE_BUF_PER_MAP - 1;
+ mr->map_shift = ilog2(RXE_BUF_PER_MAP);
+ mr->map_mask = RXE_BUF_PER_MAP - 1;
- mem->num_buf = num_buf;
- mem->num_map = num_map;
- mem->max_buf = num_map * RXE_BUF_PER_MAP;
+ mr->num_buf = num_buf;
+ mr->num_map = num_map;
+ mr->max_buf = num_map * RXE_BUF_PER_MAP;
return 0;
err2:
for (i--; i >= 0; i--)
- kfree(mem->map[i]);
+ kfree(mr->map[i]);
- kfree(mem->map);
+ kfree(mr->map);
err1:
return -ENOMEM;
}
-void rxe_mem_init_dma(struct rxe_pd *pd,
- int access, struct rxe_mem *mem)
+void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
{
- rxe_mem_init(access, mem);
+ rxe_mr_init(access, mr);
- mem->ibmr.pd = &pd->ibpd;
- mem->access = access;
- mem->state = RXE_MEM_STATE_VALID;
- mem->type = RXE_MEM_TYPE_DMA;
+ mr->ibmr.pd = &pd->ibpd;
+ mr->access = access;
+ mr->state = RXE_MR_STATE_VALID;
+ mr->type = RXE_MR_TYPE_DMA;
}
-int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
- u64 length, u64 iova, int access, struct ib_udata *udata,
- struct rxe_mem *mem)
+int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
+ int access, struct ib_udata *udata, struct rxe_mr *mr)
{
struct rxe_map **map;
struct rxe_phys_buf *buf = NULL;
@@ -142,23 +139,23 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
goto err1;
}
- mem->umem = umem;
+ mr->umem = umem;
num_buf = ib_umem_num_pages(umem);
- rxe_mem_init(access, mem);
+ rxe_mr_init(access, mr);
- err = rxe_mem_alloc(mem, num_buf);
+ err = rxe_mr_alloc(mr, num_buf);
if (err) {
- pr_warn("err %d from rxe_mem_alloc\n", err);
+ pr_warn("err %d from rxe_mr_alloc\n", err);
ib_umem_release(umem);
goto err1;
}
- mem->page_shift = PAGE_SHIFT;
- mem->page_mask = PAGE_SIZE - 1;
+ mr->page_shift = PAGE_SHIFT;
+ mr->page_mask = PAGE_SIZE - 1;
num_buf = 0;
- map = mem->map;
+ map = mr->map;
if (length > 0) {
buf = map[0]->buf;
@@ -185,15 +182,15 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
}
}
- mem->ibmr.pd = &pd->ibpd;
- mem->umem = umem;
- mem->access = access;
- mem->length = length;
- mem->iova = iova;
- mem->va = start;
- mem->offset = ib_umem_offset(umem);
- mem->state = RXE_MEM_STATE_VALID;
- mem->type = RXE_MEM_TYPE_MR;
+ mr->ibmr.pd = &pd->ibpd;
+ mr->umem = umem;
+ mr->access = access;
+ mr->length = length;
+ mr->iova = iova;
+ mr->va = start;
+ mr->offset = ib_umem_offset(umem);
+ mr->state = RXE_MR_STATE_VALID;
+ mr->type = RXE_MR_TYPE_MR;
return 0;
@@ -201,24 +198,23 @@ err1:
return err;
}
-int rxe_mem_init_fast(struct rxe_pd *pd,
- int max_pages, struct rxe_mem *mem)
+int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
{
int err;
- rxe_mem_init(0, mem);
+ rxe_mr_init(0, mr);
/* In fastreg, we also set the rkey */
- mem->ibmr.rkey = mem->ibmr.lkey;
+ mr->ibmr.rkey = mr->ibmr.lkey;
- err = rxe_mem_alloc(mem, max_pages);
+ err = rxe_mr_alloc(mr, max_pages);
if (err)
goto err1;
- mem->ibmr.pd = &pd->ibpd;
- mem->max_buf = max_pages;
- mem->state = RXE_MEM_STATE_FREE;
- mem->type = RXE_MEM_TYPE_MR;
+ mr->ibmr.pd = &pd->ibpd;
+ mr->max_buf = max_pages;
+ mr->state = RXE_MR_STATE_FREE;
+ mr->type = RXE_MR_TYPE_MR;
return 0;
@@ -226,28 +222,24 @@ err1:
return err;
}
-static void lookup_iova(
- struct rxe_mem *mem,
- u64 iova,
- int *m_out,
- int *n_out,
- size_t *offset_out)
+static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
+ size_t *offset_out)
{
- size_t offset = iova - mem->iova + mem->offset;
+ size_t offset = iova - mr->iova + mr->offset;
int map_index;
int buf_index;
u64 length;
- if (likely(mem->page_shift)) {
- *offset_out = offset & mem->page_mask;
- offset >>= mem->page_shift;
- *n_out = offset & mem->map_mask;
- *m_out = offset >> mem->map_shift;
+ if (likely(mr->page_shift)) {
+ *offset_out = offset & mr->page_mask;
+ offset >>= mr->page_shift;
+ *n_out = offset & mr->map_mask;
+ *m_out = offset >> mr->map_shift;
} else {
map_index = 0;
buf_index = 0;
- length = mem->map[map_index]->buf[buf_index].size;
+ length = mr->map[map_index]->buf[buf_index].size;
while (offset >= length) {
offset -= length;
@@ -257,7 +249,7 @@ static void lookup_iova(
map_index++;
buf_index = 0;
}
- length = mem->map[map_index]->buf[buf_index].size;
+ length = mr->map[map_index]->buf[buf_index].size;
}
*m_out = map_index;
@@ -266,49 +258,49 @@ static void lookup_iova(
}
}
-void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
+void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
{
size_t offset;
int m, n;
void *addr;
- if (mem->state != RXE_MEM_STATE_VALID) {
- pr_warn("mem not in valid state\n");
+ if (mr->state != RXE_MR_STATE_VALID) {
+ pr_warn("mr not in valid state\n");
addr = NULL;
goto out;
}
- if (!mem->map) {
+ if (!mr->map) {
addr = (void *)(uintptr_t)iova;
goto out;
}
- if (mem_check_range(mem, iova, length)) {
+ if (mr_check_range(mr, iova, length)) {
pr_warn("range violation\n");
addr = NULL;
goto out;
}
- lookup_iova(mem, iova, &m, &n, &offset);
+ lookup_iova(mr, iova, &m, &n, &offset);
- if (offset + length > mem->map[m]->buf[n].size) {
+ if (offset + length > mr->map[m]->buf[n].size) {
pr_warn("crosses page boundary\n");
addr = NULL;
goto out;
}
- addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
+ addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
out:
return addr;
}
/* copy data from a range (vaddr, vaddr+length-1) to or from
- * a mem object starting at iova. Compute incremental value of
- * crc32 if crcp is not zero. caller must hold a reference to mem
+ * a mr object starting at iova. Compute incremental value of
+ * crc32 if crcp is not zero. caller must hold a reference to mr
*/
-int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
- enum copy_direction dir, u32 *crcp)
+int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
+ enum copy_direction dir, u32 *crcp)
{
int err;
int bytes;
@@ -323,43 +315,41 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
if (length == 0)
return 0;
- if (mem->type == RXE_MEM_TYPE_DMA) {
+ if (mr->type == RXE_MR_TYPE_DMA) {
u8 *src, *dest;
- src = (dir == to_mem_obj) ?
- addr : ((void *)(uintptr_t)iova);
+ src = (dir == to_mr_obj) ? addr : ((void *)(uintptr_t)iova);
- dest = (dir == to_mem_obj) ?
- ((void *)(uintptr_t)iova) : addr;
+ dest = (dir == to_mr_obj) ? ((void *)(uintptr_t)iova) : addr;
memcpy(dest, src, length);
if (crcp)
- *crcp = rxe_crc32(to_rdev(mem->ibmr.device),
- *crcp, dest, length);
+ *crcp = rxe_crc32(to_rdev(mr->ibmr.device), *crcp, dest,
+ length);
return 0;
}
- WARN_ON_ONCE(!mem->map);
+ WARN_ON_ONCE(!mr->map);
- err = mem_check_range(mem, iova, length);
+ err = mr_check_range(mr, iova, length);
if (err) {
err = -EFAULT;
goto err1;
}
- lookup_iova(mem, iova, &m, &i, &offset);
+ lookup_iova(mr, iova, &m, &i, &offset);
- map = mem->map + m;
+ map = mr->map + m;
buf = map[0]->buf + i;
while (length > 0) {
u8 *src, *dest;
va = (u8 *)(uintptr_t)buf->addr + offset;
- src = (dir == to_mem_obj) ? addr : va;
- dest = (dir == to_mem_obj) ? va : addr;
+ src = (dir == to_mr_obj) ? addr : va;
+ dest = (dir == to_mr_obj) ? va : addr;
bytes = buf->size - offset;
@@ -369,8 +359,8 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
memcpy(dest, src, bytes);
if (crcp)
- crc = rxe_crc32(to_rdev(mem->ibmr.device),
- crc, dest, bytes);
+ crc = rxe_crc32(to_rdev(mr->ibmr.device), crc, dest,
+ bytes);
length -= bytes;
addr += bytes;
@@ -411,7 +401,7 @@ int copy_data(
struct rxe_sge *sge = &dma->sge[dma->cur_sge];
int offset = dma->sge_offset;
int resid = dma->resid;
- struct rxe_mem *mem = NULL;
+ struct rxe_mr *mr = NULL;
u64 iova;
int err;
@@ -424,8 +414,8 @@ int copy_data(
}
if (sge->length && (offset < sge->length)) {
- mem = lookup_mem(pd, access, sge->lkey, lookup_local);
- if (!mem) {
+ mr = lookup_mr(pd, access, sge->lkey, lookup_local);
+ if (!mr) {
err = -EINVAL;
goto err1;
}
@@ -435,9 +425,9 @@ int copy_data(
bytes = length;
if (offset >= sge->length) {
- if (mem) {
- rxe_drop_ref(mem);
- mem = NULL;
+ if (mr) {
+ rxe_drop_ref(mr);
+ mr = NULL;
}
sge++;
dma->cur_sge++;
@@ -449,9 +439,9 @@ int copy_data(
}
if (sge->length) {
- mem = lookup_mem(pd, access, sge->lkey,
- lookup_local);
- if (!mem) {
+ mr = lookup_mr(pd, access, sge->lkey,
+ lookup_local);
+ if (!mr) {
err = -EINVAL;
goto err1;
}
@@ -466,7 +456,7 @@ int copy_data(
if (bytes > 0) {
iova = sge->addr + offset;
- err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
+ err = rxe_mr_copy(mr, iova, addr, bytes, dir, crcp);
if (err)
goto err2;
@@ -480,14 +470,14 @@ int copy_data(
dma->sge_offset = offset;
dma->resid = resid;
- if (mem)
- rxe_drop_ref(mem);
+ if (mr)
+ rxe_drop_ref(mr);
return 0;
err2:
- if (mem)
- rxe_drop_ref(mem);
+ if (mr)
+ rxe_drop_ref(mr);
err1:
return err;
}
@@ -525,31 +515,30 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
return 0;
}
-/* (1) find the mem (mr or mw) corresponding to lkey/rkey
+/* (1) find the mr corresponding to lkey/rkey
* depending on lookup_type
- * (2) verify that the (qp) pd matches the mem pd
- * (3) verify that the mem can support the requested access
- * (4) verify that mem state is valid
+ * (2) verify that the (qp) pd matches the mr pd
+ * (3) verify that the mr can support the requested access
+ * (4) verify that mr state is valid
*/
-struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
- enum lookup_type type)
+struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
+ enum lookup_type type)
{
- struct rxe_mem *mem;
+ struct rxe_mr *mr;
struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
int index = key >> 8;
- mem = rxe_pool_get_index(&rxe->mr_pool, index);
- if (!mem)
+ mr = rxe_pool_get_index(&rxe->mr_pool, index);
+ if (!mr)
return NULL;
- if (unlikely((type == lookup_local && mr_lkey(mem) != key) ||
- (type == lookup_remote && mr_rkey(mem) != key) ||
- mr_pd(mem) != pd ||
- (access && !(access & mem->access)) ||
- mem->state != RXE_MEM_STATE_VALID)) {
- rxe_drop_ref(mem);
- mem = NULL;
+ if (unlikely((type == lookup_local && mr_lkey(mr) != key) ||
+ (type == lookup_remote && mr_rkey(mr) != key) ||
+ mr_pd(mr) != pd || (access && !(access & mr->access)) ||
+ mr->state != RXE_MR_STATE_VALID)) {
+ rxe_drop_ref(mr);
+ mr = NULL;
}
- return mem;
+ return mr;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 307d8986e7c9..d24901f2af3f 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -8,8 +8,6 @@
#include "rxe_loc.h"
/* info about object pools
- * note that mr and mw share a single index space
- * so that one can map an lkey to the correct type of object
*/
struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_UC] = {
@@ -56,18 +54,18 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
},
[RXE_TYPE_MR] = {
.name = "rxe-mr",
- .size = sizeof(struct rxe_mem),
- .elem_offset = offsetof(struct rxe_mem, pelem),
- .cleanup = rxe_mem_cleanup,
+ .size = sizeof(struct rxe_mr),
+ .elem_offset = offsetof(struct rxe_mr, pelem),
+ .cleanup = rxe_mr_cleanup,
.flags = RXE_POOL_INDEX,
.max_index = RXE_MAX_MR_INDEX,
.min_index = RXE_MIN_MR_INDEX,
},
[RXE_TYPE_MW] = {
.name = "rxe-mw",
- .size = sizeof(struct rxe_mem),
- .elem_offset = offsetof(struct rxe_mem, pelem),
- .flags = RXE_POOL_INDEX,
+ .size = sizeof(struct rxe_mw),
+ .elem_offset = offsetof(struct rxe_mw, pelem),
+ .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.max_index = RXE_MAX_MW_INDEX,
.min_index = RXE_MIN_MW_INDEX,
},
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 889290793d75..3664cdae7e1f 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -464,7 +464,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
} else {
err = copy_data(qp->pd, 0, &wqe->dma,
payload_addr(pkt), paylen,
- from_mem_obj,
+ from_mr_obj,
&crc);
if (err)
return err;
@@ -596,7 +596,7 @@ next_wqe:
if (wqe->mask & WR_REG_MASK) {
if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- struct rxe_mem *rmr;
+ struct rxe_mr *rmr;
rmr = rxe_pool_get_index(&rxe->mr_pool,
wqe->wr.ex.invalidate_rkey >> 8);
@@ -607,14 +607,14 @@ next_wqe:
wqe->status = IB_WC_MW_BIND_ERR;
goto exit;
}
- rmr->state = RXE_MEM_STATE_FREE;
+ rmr->state = RXE_MR_STATE_FREE;
rxe_drop_ref(rmr);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
} else if (wqe->wr.opcode == IB_WR_REG_MR) {
- struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
+ struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);
- rmr->state = RXE_MEM_STATE_VALID;
+ rmr->state = RXE_MR_STATE_VALID;
rmr->access = wqe->wr.wr.reg.access;
rmr->ibmr.lkey = wqe->wr.wr.reg.key;
rmr->ibmr.rkey = wqe->wr.wr.reg.key;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 142f3d8014d8..2b220659bddb 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -391,7 +391,7 @@ static enum resp_states check_length(struct rxe_qp *qp,
static enum resp_states check_rkey(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
- struct rxe_mem *mem = NULL;
+ struct rxe_mr *mr = NULL;
u64 va;
u32 rkey;
u32 resid;
@@ -430,18 +430,18 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
resid = qp->resp.resid;
pktlen = payload_size(pkt);
- mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
- if (!mem) {
+ mr = lookup_mr(qp->pd, access, rkey, lookup_remote);
+ if (!mr) {
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
- if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
+ if (unlikely(mr->state == RXE_MR_STATE_FREE)) {
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
- if (mem_check_range(mem, va, resid)) {
+ if (mr_check_range(mr, va, resid)) {
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
@@ -469,12 +469,12 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
WARN_ON_ONCE(qp->resp.mr);
- qp->resp.mr = mem;
+ qp->resp.mr = mr;
return RESPST_EXECUTE;
err:
- if (mem)
- rxe_drop_ref(mem);
+ if (mr)
+ rxe_drop_ref(mr);
return state;
}
@@ -484,7 +484,7 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
int err;
err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
- data_addr, data_len, to_mem_obj, NULL);
+ data_addr, data_len, to_mr_obj, NULL);
if (unlikely(err))
return (err == -ENOSPC) ? RESPST_ERR_LENGTH
: RESPST_ERR_MALFORMED_WQE;
@@ -499,8 +499,8 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
int err;
int data_len = payload_size(pkt);
- err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
- data_len, to_mem_obj, NULL);
+ err = rxe_mr_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), data_len,
+ to_mr_obj, NULL);
if (err) {
rc = RESPST_ERR_RKEY_VIOLATION;
goto out;
@@ -522,9 +522,9 @@ static enum resp_states process_atomic(struct rxe_qp *qp,
u64 iova = atmeth_va(pkt);
u64 *vaddr;
enum resp_states ret;
- struct rxe_mem *mr = qp->resp.mr;
+ struct rxe_mr *mr = qp->resp.mr;
- if (mr->state != RXE_MEM_STATE_VALID) {
+ if (mr->state != RXE_MR_STATE_VALID) {
ret = RESPST_ERR_RKEY_VIOLATION;
goto out;
}
@@ -700,8 +700,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
if (!skb)
return RESPST_ERR_RNR;
- err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
- payload, from_mem_obj, &icrc);
+ err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
+ payload, from_mr_obj, &icrc);
if (err)
pr_err("Failed copying memory\n");
@@ -816,8 +816,8 @@ static enum resp_states do_complete(struct rxe_qp *qp,
struct rxe_recv_wqe *wqe = qp->resp.wqe;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- if (unlikely(!wqe))
- return RESPST_CLEANUP;
+ if (!wqe)
+ goto finish;
memset(&cqe, 0, sizeof(cqe));
@@ -883,7 +883,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
}
if (pkt->mask & RXE_IETH_MASK) {
- struct rxe_mem *rmr;
+ struct rxe_mr *rmr;
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
wc->ex.invalidate_rkey = ieth_rkey(pkt);
@@ -895,7 +895,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
wc->ex.invalidate_rkey);
return RESPST_ERROR;
}
- rmr->state = RXE_MEM_STATE_FREE;
+ rmr->state = RXE_MR_STATE_FREE;
rxe_drop_ref(rmr);
}
@@ -917,12 +917,12 @@ static enum resp_states do_complete(struct rxe_qp *qp,
if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
return RESPST_ERR_CQ_OVERFLOW;
- if (qp->resp.state == QP_STATE_ERROR)
+finish:
+ if (unlikely(qp->resp.state == QP_STATE_ERROR))
return RESPST_CHK_RESOURCE;
-
- if (!pkt)
+ if (unlikely(!pkt))
return RESPST_DONE;
- else if (qp_type(qp) == IB_QPT_RC)
+ if (qp_type(qp) == IB_QPT_RC)
return RESPST_ACKNOWLEDGE;
else
return RESPST_CLEANUP;
@@ -1056,10 +1056,8 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
if (pkt->mask & RXE_SEND_MASK ||
pkt->mask & RXE_WRITE_MASK) {
/* SEND. Ack again and cleanup. C9-105. */
- if (bth_ack(pkt))
- send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
- rc = RESPST_CLEANUP;
- goto out;
+ send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
+ return RESPST_CLEANUP;
} else if (pkt->mask & RXE_READ_MASK) {
struct resp_res *res;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index dee5e0e919d2..aeb5e232c195 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -26,7 +26,7 @@ static int rxe_query_device(struct ib_device *dev,
}
static int rxe_query_port(struct ib_device *dev,
- u8 port_num, struct ib_port_attr *attr)
+ u32 port_num, struct ib_port_attr *attr)
{
struct rxe_dev *rxe = to_rdev(dev);
struct rxe_port *port;
@@ -54,7 +54,7 @@ static int rxe_query_port(struct ib_device *dev,
}
static int rxe_query_pkey(struct ib_device *device,
- u8 port_num, u16 index, u16 *pkey)
+ u32 port_num, u16 index, u16 *pkey)
{
if (index > 0)
return -EINVAL;
@@ -84,7 +84,7 @@ static int rxe_modify_device(struct ib_device *dev,
}
static int rxe_modify_port(struct ib_device *dev,
- u8 port_num, int mask, struct ib_port_modify *attr)
+ u32 port_num, int mask, struct ib_port_modify *attr)
{
struct rxe_dev *rxe = to_rdev(dev);
struct rxe_port *port;
@@ -101,7 +101,7 @@ static int rxe_modify_port(struct ib_device *dev,
}
static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
- u8 port_num)
+ u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
@@ -121,7 +121,7 @@ static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
rxe_drop_ref(uc);
}
-static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
+static int rxe_port_immutable(struct ib_device *dev, u32 port_num,
struct ib_port_immutable *immutable)
{
int err;
@@ -865,7 +865,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
{
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mem *mr;
+ struct rxe_mr *mr;
mr = rxe_alloc(&rxe->mr_pool);
if (!mr)
@@ -873,7 +873,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
rxe_add_index(mr);
rxe_add_ref(pd);
- rxe_mem_init_dma(pd, access, mr);
+ rxe_mr_init_dma(pd, access, mr);
return &mr->ibmr;
}
@@ -887,7 +887,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
int err;
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mem *mr;
+ struct rxe_mr *mr;
mr = rxe_alloc(&rxe->mr_pool);
if (!mr) {
@@ -899,8 +899,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
rxe_add_ref(pd);
- err = rxe_mem_init_user(pd, start, length, iova,
- access, udata, mr);
+ err = rxe_mr_init_user(pd, start, length, iova, access, udata, mr);
if (err)
goto err3;
@@ -916,9 +915,9 @@ err2:
static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
- struct rxe_mem *mr = to_rmr(ibmr);
+ struct rxe_mr *mr = to_rmr(ibmr);
- mr->state = RXE_MEM_STATE_ZOMBIE;
+ mr->state = RXE_MR_STATE_ZOMBIE;
rxe_drop_ref(mr_pd(mr));
rxe_drop_index(mr);
rxe_drop_ref(mr);
@@ -930,7 +929,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
{
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
- struct rxe_mem *mr;
+ struct rxe_mr *mr;
int err;
if (mr_type != IB_MR_TYPE_MEM_REG)
@@ -946,7 +945,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
rxe_add_ref(pd);
- err = rxe_mem_init_fast(pd, max_num_sg, mr);
+ err = rxe_mr_init_fast(pd, max_num_sg, mr);
if (err)
goto err2;
@@ -962,7 +961,7 @@ err1:
static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
{
- struct rxe_mem *mr = to_rmr(ibmr);
+ struct rxe_mr *mr = to_rmr(ibmr);
struct rxe_map *map;
struct rxe_phys_buf *buf;
@@ -982,7 +981,7 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
- struct rxe_mem *mr = to_rmr(ibmr);
+ struct rxe_mr *mr = to_rmr(ibmr);
int n;
mr->nbuf = 0;
@@ -1110,6 +1109,7 @@ static const struct ib_device_ops rxe_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
+ INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
};
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 79e0a5a878da..11eba7a3ba8f 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -156,7 +156,7 @@ struct resp_res {
struct sk_buff *skb;
} atomic;
struct {
- struct rxe_mem *mr;
+ struct rxe_mr *mr;
u64 va_org;
u32 rkey;
u32 length;
@@ -183,7 +183,7 @@ struct rxe_resp_info {
/* RDMA read / atomic only */
u64 va;
- struct rxe_mem *mr;
+ struct rxe_mr *mr;
u32 resid;
u32 rkey;
u32 length;
@@ -262,18 +262,18 @@ struct rxe_qp {
struct execute_work cleanup_work;
};
-enum rxe_mem_state {
- RXE_MEM_STATE_ZOMBIE,
- RXE_MEM_STATE_INVALID,
- RXE_MEM_STATE_FREE,
- RXE_MEM_STATE_VALID,
+enum rxe_mr_state {
+ RXE_MR_STATE_ZOMBIE,
+ RXE_MR_STATE_INVALID,
+ RXE_MR_STATE_FREE,
+ RXE_MR_STATE_VALID,
};
-enum rxe_mem_type {
- RXE_MEM_TYPE_NONE,
- RXE_MEM_TYPE_DMA,
- RXE_MEM_TYPE_MR,
- RXE_MEM_TYPE_MW,
+enum rxe_mr_type {
+ RXE_MR_TYPE_NONE,
+ RXE_MR_TYPE_DMA,
+ RXE_MR_TYPE_MR,
+ RXE_MR_TYPE_MW,
};
#define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf))
@@ -287,17 +287,14 @@ struct rxe_map {
struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
};
-struct rxe_mem {
+struct rxe_mr {
struct rxe_pool_entry pelem;
- union {
- struct ib_mr ibmr;
- struct ib_mw ibmw;
- };
+ struct ib_mr ibmr;
struct ib_umem *umem;
- enum rxe_mem_state state;
- enum rxe_mem_type type;
+ enum rxe_mr_state state;
+ enum rxe_mr_type type;
u64 va;
u64 iova;
size_t length;
@@ -318,6 +315,17 @@ struct rxe_mem {
struct rxe_map **map;
};
+enum rxe_mw_state {
+ RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
+ RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
+ RXE_MW_STATE_VALID = RXE_MR_STATE_VALID,
+};
+
+struct rxe_mw {
+ struct ib_mw ibmw;
+ struct rxe_pool_entry pelem;
+};
+
struct rxe_mc_grp {
struct rxe_pool_entry pelem;
spinlock_t mcg_lock; /* guard group */
@@ -422,27 +430,27 @@ static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
}
-static inline struct rxe_mem *to_rmr(struct ib_mr *mr)
+static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
{
- return mr ? container_of(mr, struct rxe_mem, ibmr) : NULL;
+ return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
}
-static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
+static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
{
- return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL;
+ return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
}
-static inline struct rxe_pd *mr_pd(struct rxe_mem *mr)
+static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
{
return to_rpd(mr->ibmr.pd);
}
-static inline u32 mr_lkey(struct rxe_mem *mr)
+static inline u32 mr_lkey(struct rxe_mr *mr)
{
return mr->ibmr.lkey;
}
-static inline u32 mr_rkey(struct rxe_mem *mr)
+static inline u32 mr_rkey(struct rxe_mr *mr)
{
return mr->ibmr.rkey;
}
diff --git a/drivers/infiniband/sw/siw/iwarp.h b/drivers/infiniband/sw/siw/iwarp.h
index e8a04d9c89cb..3f1dedb50a0d 100644
--- a/drivers/infiniband/sw/siw/iwarp.h
+++ b/drivers/infiniband/sw/siw/iwarp.h
@@ -114,13 +114,6 @@ static inline u8 __ddp_get_version(struct iwarp_ctrl *ctrl)
return be16_to_cpu(ctrl->ddp_rdmap_ctrl & DDP_MASK_VERSION) >> 8;
}
-static inline void __ddp_set_version(struct iwarp_ctrl *ctrl, u8 version)
-{
- ctrl->ddp_rdmap_ctrl =
- (ctrl->ddp_rdmap_ctrl & ~DDP_MASK_VERSION) |
- (cpu_to_be16((u16)version << 8) & DDP_MASK_VERSION);
-}
-
static inline u8 __rdmap_get_version(struct iwarp_ctrl *ctrl)
{
__be16 ver = ctrl->ddp_rdmap_ctrl & RDMAP_MASK_VERSION;
@@ -128,12 +121,6 @@ static inline u8 __rdmap_get_version(struct iwarp_ctrl *ctrl)
return be16_to_cpu(ver) >> 6;
}
-static inline void __rdmap_set_version(struct iwarp_ctrl *ctrl, u8 version)
-{
- ctrl->ddp_rdmap_ctrl = (ctrl->ddp_rdmap_ctrl & ~RDMAP_MASK_VERSION) |
- (cpu_to_be16(version << 6) & RDMAP_MASK_VERSION);
-}
-
static inline u8 __rdmap_get_opcode(struct iwarp_ctrl *ctrl)
{
return be16_to_cpu(ctrl->ddp_rdmap_ctrl & RDMAP_MASK_OPCODE);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 1f9e15b71504..7a5ed86ffc9f 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1300,7 +1300,7 @@ static void siw_cm_llp_state_change(struct sock *sk)
}
static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
- struct sockaddr *raddr)
+ struct sockaddr *raddr, bool afonly)
{
int rv, flags = 0;
size_t size = laddr->sa_family == AF_INET ?
@@ -1311,6 +1311,12 @@ static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
*/
sock_set_reuseaddr(s->sk);
+ if (afonly) {
+ rv = ip6_sock_set_v6only(s->sk);
+ if (rv)
+ return rv;
+ }
+
rv = s->ops->bind(s, laddr, size);
if (rv < 0)
return rv;
@@ -1371,7 +1377,7 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
* mode. Might be reconsidered for async connection setup at
* TCP level.
*/
- rv = kernel_bindconnect(s, laddr, raddr);
+ rv = kernel_bindconnect(s, laddr, raddr, id->afonly);
if (rv != 0) {
siw_dbg_qp(qp, "kernel_bindconnect: error %d\n", rv);
goto error;
@@ -1786,6 +1792,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
} else {
struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
+ if (id->afonly) {
+ rv = ip6_sock_set_v6only(s->sk);
+ if (rv) {
+ siw_dbg(id->device,
+ "ip6_sock_set_v6only erro: %d\n", rv);
+ goto error;
+ }
+ }
+
/* For wildcard addr, limit binding to current device only */
if (ipv6_addr_any(&laddr->sin6_addr))
s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index 34a910cf0edb..61c17db70d65 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -106,8 +106,6 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
mem->perms = rights & IWARP_ACCESS_MASK;
kref_init(&mem->ref);
- mr->mem = mem;
-
get_random_bytes(&next, 4);
next &= 0x00ffffff;
@@ -116,6 +114,8 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
kfree(mem);
return -ENOMEM;
}
+
+ mr->mem = mem;
/* Set the STag index part */
mem->stag = id << 8;
mr->base_mr.lkey = mr->base_mr.rkey = mem->stag;
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index db138c8423da..f911287576d1 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -29,11 +29,6 @@ static inline void siw_mem_put(struct siw_mem *mem)
kref_put(&mem->ref, siw_free_mem);
}
-static inline struct siw_mr *siw_mem2mr(struct siw_mem *m)
-{
- return container_of(m, struct siw_mr, mem);
-}
-
static inline void siw_unref_mem_sgl(struct siw_mem **mem, unsigned int num_sge)
{
while (num_sge) {
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index e389d44e5591..d2313efb26db 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -160,7 +160,7 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
return 0;
}
-int siw_query_port(struct ib_device *base_dev, u8 port,
+int siw_query_port(struct ib_device *base_dev, u32 port,
struct ib_port_attr *attr)
{
struct siw_device *sdev = to_siw_dev(base_dev);
@@ -194,7 +194,7 @@ int siw_query_port(struct ib_device *base_dev, u8 port,
return rv;
}
-int siw_get_port_immutable(struct ib_device *base_dev, u8 port,
+int siw_get_port_immutable(struct ib_device *base_dev, u32 port,
struct ib_port_immutable *port_immutable)
{
struct ib_port_attr attr;
@@ -209,7 +209,7 @@ int siw_get_port_immutable(struct ib_device *base_dev, u8 port,
return 0;
}
-int siw_query_gid(struct ib_device *base_dev, u8 port, int idx,
+int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
union ib_gid *gid)
{
struct siw_device *sdev = to_siw_dev(base_dev);
@@ -1848,7 +1848,7 @@ void siw_srq_event(struct siw_srq *srq, enum ib_event_type etype)
}
}
-void siw_port_event(struct siw_device *sdev, u8 port, enum ib_event_type etype)
+void siw_port_event(struct siw_device *sdev, u32 port, enum ib_event_type etype)
{
struct ib_event event;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h
index 637454529357..67ac08886a70 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.h
+++ b/drivers/infiniband/sw/siw/siw_verbs.h
@@ -36,17 +36,17 @@ static inline void siw_copy_sgl(struct ib_sge *sge, struct siw_sge *siw_sge,
int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata);
void siw_dealloc_ucontext(struct ib_ucontext *base_ctx);
-int siw_query_port(struct ib_device *base_dev, u8 port,
+int siw_query_port(struct ib_device *base_dev, u32 port,
struct ib_port_attr *attr);
-int siw_get_port_immutable(struct ib_device *base_dev, u8 port,
+int siw_get_port_immutable(struct ib_device *base_dev, u32 port,
struct ib_port_immutable *port_immutable);
int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
struct ib_udata *udata);
int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
-int siw_query_port(struct ib_device *base_dev, u8 port,
+int siw_query_port(struct ib_device *base_dev, u32 port,
struct ib_port_attr *attr);
-int siw_query_gid(struct ib_device *base_dev, u8 port, int idx,
+int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
union ib_gid *gid);
int siw_alloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
int siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
@@ -86,6 +86,6 @@ void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
void siw_qp_event(struct siw_qp *qp, enum ib_event_type type);
void siw_cq_event(struct siw_cq *cq, enum ib_event_type type);
void siw_srq_event(struct siw_srq *srq, enum ib_event_type type);
-void siw_port_event(struct siw_device *dev, u8 port, enum ib_event_type type);
+void siw_port_event(struct siw_device *dev, u32 port, enum ib_event_type type);
#endif
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 179ff1d068e5..75cd44789661 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -501,9 +501,9 @@ void ipoib_reap_ah(struct work_struct *work);
struct ipoib_path *__path_find(struct net_device *dev, void *gid);
void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev);
-struct net_device *ipoib_intf_alloc(struct ib_device *hca, u8 port,
+struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port,
const char *format);
-int ipoib_intf_init(struct ib_device *hca, u8 port, const char *format,
+int ipoib_intf_init(struct ib_device *hca, u32 port, const char *format,
struct net_device *dev);
void ipoib_ib_tx_timer_func(struct timer_list *t);
void ipoib_ib_dev_flush_light(struct work_struct *work);
@@ -677,8 +677,6 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc);
void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc);
#else
-struct ipoib_cm_tx;
-
#define ipoib_max_conn_qp 0
static inline int ipoib_cm_admin_enabled(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index d5d592bdab35..9dbc85a6b702 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1122,12 +1122,8 @@ static int ipoib_cm_modify_tx_init(struct net_device *dev,
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
- ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
- if (ret) {
- ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
- return ret;
- }
+ qp_attr.pkey_index = priv->pkey_index;
qp_attr.qp_state = IB_QPS_INIT;
qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
qp_attr.port_num = priv->port;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 494f413dc3c6..ceabfb0b0a83 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1060,7 +1060,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
union ib_gid *netdev_gid;
int err;
u16 index;
- u8 port;
+ u32 port;
bool ret = false;
netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index e16b40c09f82..bbb18087fdab 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -90,7 +90,7 @@ static int ipoib_add_one(struct ib_device *device);
static void ipoib_remove_one(struct ib_device *device, void *client_data);
static void ipoib_neigh_reclaim(struct rcu_head *rp);
static struct net_device *ipoib_get_net_dev_by_params(
- struct ib_device *dev, u8 port, u16 pkey,
+ struct ib_device *dev, u32 port, u16 pkey,
const union ib_gid *gid, const struct sockaddr *addr,
void *client_data);
static int ipoib_set_mac(struct net_device *dev, void *addr);
@@ -164,8 +164,13 @@ int ipoib_open(struct net_device *dev)
dev_change_flags(cpriv->dev, flags | IFF_UP, NULL);
}
up_read(&priv->vlan_rwsem);
- }
+ } else if (priv->parent) {
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+ if (!test_bit(IPOIB_FLAG_ADMIN_UP, &ppriv->flags))
+ ipoib_dbg(priv, "parent device %s is not up, so child device may be not functioning.\n",
+ ppriv->dev->name);
+ }
netif_start_queue(dev);
return 0;
@@ -438,7 +443,7 @@ static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
/* Returns the number of matching net_devs found (between 0 and 2). Also
* return the matching net_device in the @net_dev parameter, holding a
* reference to the net_device, if the number of matches >= 1 */
-static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
+static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u32 port,
u16 pkey_index,
const union ib_gid *gid,
const struct sockaddr *addr,
@@ -463,7 +468,7 @@ static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
}
static struct net_device *ipoib_get_net_dev_by_params(
- struct ib_device *dev, u8 port, u16 pkey,
+ struct ib_device *dev, u32 port, u16 pkey,
const union ib_gid *gid, const struct sockaddr *addr,
void *client_data)
{
@@ -1181,7 +1186,12 @@ unref:
static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ if (rn->tx_timeout) {
+ rn->tx_timeout(dev, txqueue);
+ return;
+ }
ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
jiffies_to_msecs(jiffies - dev_trans_start(dev)));
ipoib_warn(priv,
@@ -2145,7 +2155,7 @@ static void ipoib_build_priv(struct net_device *dev)
INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
}
-static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u8 port,
+static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u32 port,
const char *name)
{
struct net_device *dev;
@@ -2162,7 +2172,7 @@ static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u8 port,
return dev;
}
-int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name,
+int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
struct net_device *dev)
{
struct rdma_netdev *rn = netdev_priv(dev);
@@ -2213,7 +2223,7 @@ out:
return rc;
}
-struct net_device *ipoib_intf_alloc(struct ib_device *hca, u8 port,
+struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port,
const char *name)
{
struct net_device *dev;
@@ -2456,7 +2466,7 @@ static int ipoib_intercept_dev_id_attr(struct net_device *dev)
}
static struct net_device *ipoib_add_port(const char *format,
- struct ib_device *hca, u8 port)
+ struct ib_device *hca, u32 port)
{
struct rtnl_link_ops *ops = ipoib_get_link_ops();
struct rdma_netdev_alloc_params params;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 78ee9445f801..9f6ac0a09a78 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -297,7 +297,6 @@ struct iser_login_desc {
struct iser_conn;
struct ib_conn;
-struct iscsi_iser_task;
/**
* struct iser_device - iSER device handle
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 7305ed8976c2..18266f07c58d 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -438,23 +438,23 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
isert_init_conn(isert_conn);
isert_conn->cm_id = cma_id;
- ret = isert_alloc_login_buf(isert_conn, cma_id->device);
- if (ret)
- goto out;
-
device = isert_device_get(cma_id);
if (IS_ERR(device)) {
ret = PTR_ERR(device);
- goto out_rsp_dma_map;
+ goto out;
}
isert_conn->device = device;
+ ret = isert_alloc_login_buf(isert_conn, cma_id->device);
+ if (ret)
+ goto out_conn_dev;
+
isert_set_nego_params(isert_conn, &event->param.conn);
isert_conn->qp = isert_create_qp(isert_conn, cma_id);
if (IS_ERR(isert_conn->qp)) {
ret = PTR_ERR(isert_conn->qp);
- goto out_conn_dev;
+ goto out_rsp_dma_map;
}
ret = isert_login_post_recv(isert_conn);
@@ -473,10 +473,10 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
out_destroy_qp:
isert_destroy_qp(isert_conn);
-out_conn_dev:
- isert_device_put(device);
out_rsp_dma_map:
isert_free_login_buf(isert_conn);
+out_conn_dev:
+ isert_device_put(device);
out:
kfree(isert_conn);
rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index b6a0abf40589..7d53d18a5004 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -101,6 +101,9 @@ static ssize_t mpath_policy_show(struct device *dev,
case MP_POLICY_MIN_INFLIGHT:
return sysfs_emit(page, "min-inflight (MI: %d)\n",
clt->mp_policy);
+ case MP_POLICY_MIN_LATENCY:
+ return sysfs_emit(page, "min-latency (ML: %d)\n",
+ clt->mp_policy);
default:
return sysfs_emit(page, "Unknown (%d)\n", clt->mp_policy);
}
@@ -114,22 +117,32 @@ static ssize_t mpath_policy_store(struct device *dev,
struct rtrs_clt *clt;
int value;
int ret;
+ size_t len = 0;
clt = container_of(dev, struct rtrs_clt, dev);
ret = kstrtoint(buf, 10, &value);
if (!ret && (value == MP_POLICY_RR ||
- value == MP_POLICY_MIN_INFLIGHT)) {
+ value == MP_POLICY_MIN_INFLIGHT ||
+ value == MP_POLICY_MIN_LATENCY)) {
clt->mp_policy = value;
return count;
}
+ /* distinguish "mi" and "min-latency" with length */
+ len = strnlen(buf, NAME_MAX);
+ if (buf[len - 1] == '\n')
+ len--;
+
if (!strncasecmp(buf, "round-robin", 11) ||
- !strncasecmp(buf, "rr", 2))
+ (len == 2 && !strncasecmp(buf, "rr", 2)))
clt->mp_policy = MP_POLICY_RR;
else if (!strncasecmp(buf, "min-inflight", 12) ||
- !strncasecmp(buf, "mi", 2))
+ (len == 2 && !strncasecmp(buf, "mi", 2)))
clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
+ else if (!strncasecmp(buf, "min-latency", 11) ||
+ (len == 2 && !strncasecmp(buf, "ml", 2)))
+ clt->mp_policy = MP_POLICY_MIN_LATENCY;
else
return -EINVAL;
@@ -342,6 +355,21 @@ static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj,
static struct kobj_attribute rtrs_clt_hca_name_attr =
__ATTR(hca_name, 0444, rtrs_clt_hca_name_show, NULL);
+static ssize_t rtrs_clt_cur_latency_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+
+ return sysfs_emit(page, "%lld ns\n",
+ ktime_to_ns(sess->s.hb_cur_latency));
+}
+
+static struct kobj_attribute rtrs_clt_cur_latency_attr =
+ __ATTR(cur_latency, 0444, rtrs_clt_cur_latency_show, NULL);
+
static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
@@ -385,6 +413,7 @@ static struct attribute *rtrs_clt_sess_attrs[] = {
&rtrs_clt_reconnect_attr.attr,
&rtrs_clt_disconnect_attr.attr,
&rtrs_clt_remove_path_attr.attr,
+ &rtrs_clt_cur_latency_attr.attr,
NULL,
};
@@ -396,14 +425,13 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
{
struct rtrs_clt *clt = sess->clt;
char str[NAME_MAX];
- int err, cnt;
-
- cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
- str, sizeof(str));
- cnt += scnprintf(str + cnt, sizeof(str) - cnt, "@");
- sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
- str + cnt, sizeof(str) - cnt);
+ int err;
+ struct rtrs_addr path = {
+ .src = &sess->s.src_addr,
+ .dst = &sess->s.dst_addr,
+ };
+ rtrs_addr_to_str(&path, str, sizeof(str));
err = kobject_init_and_add(&sess->kobj, &ktype_sess, clt->kobj_paths,
"%s", str);
if (err) {
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index b74a872387c4..40f4383764de 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -325,7 +325,7 @@ static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (unlikely(wc->status != IB_WC_SUCCESS)) {
rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
@@ -345,7 +345,7 @@ static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_io_req *req =
container_of(wc->wr_cqe, typeof(*req), inv_cqe);
- struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (unlikely(wc->status != IB_WC_SUCCESS)) {
rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
@@ -437,6 +437,13 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
req->in_use = false;
req->con = NULL;
+ if (errno) {
+ rtrs_err_rl(con->c.sess,
+ "IO request failed: error=%d path=%s [%s:%u]\n",
+ errno, kobject_name(&sess->kobj), sess->hca_name,
+ sess->hca_port);
+ }
+
if (notify)
req->conf(req->priv, errno);
}
@@ -586,7 +593,7 @@ static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
u32 imm_type, imm_payload;
bool w_inval = false;
@@ -628,6 +635,8 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
sess->s.hb_missed_cnt = 0;
+ sess->s.hb_cur_latency =
+ ktime_sub(ktime_get(), sess->s.hb_last_sent);
if (sess->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else {
@@ -826,6 +835,57 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
return min_path;
}
+/**
+ * get_next_path_min_latency() - Returns path with minimal latency.
+ * @it: the path pointer
+ *
+ * Return: a path with the lowest latency or NULL if all paths are tried
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ *
+ * Related to @MP_POLICY_MIN_LATENCY
+ *
+ * This DOES skip an already-tried path.
+ * There is a skip-list to skip a path if the path has tried but failed.
+ * It will try the minimum latency path and then the second minimum latency
+ * path and so on. Finally it will return NULL if all paths are tried.
+ * Therefore the caller MUST check the returned
+ * path is NULL and trigger the IO error.
+ */
+static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it)
+{
+ struct rtrs_clt_sess *min_path = NULL;
+ struct rtrs_clt *clt = it->clt;
+ struct rtrs_clt_sess *sess;
+ ktime_t min_latency = INT_MAX;
+ ktime_t latency;
+
+ list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
+ if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
+ continue;
+
+ if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
+ continue;
+
+ latency = sess->s.hb_cur_latency;
+
+ if (latency < min_latency) {
+ min_latency = latency;
+ min_path = sess;
+ }
+ }
+
+ /*
+ * add the path to the skip list, so that next time we can get
+ * a different one
+ */
+ if (min_path)
+ list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
+
+ return min_path;
+}
+
static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
{
INIT_LIST_HEAD(&it->skip_list);
@@ -834,8 +894,10 @@ static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
if (clt->mp_policy == MP_POLICY_RR)
it->next_path = get_next_path_rr;
- else
+ else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
it->next_path = get_next_path_min_inflight;
+ else
+ it->next_path = get_next_path_min_latency;
}
static inline void path_it_deinit(struct path_it *it)
@@ -1020,7 +1082,10 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
req->usr_len + sizeof(*msg),
imm);
if (unlikely(ret)) {
- rtrs_err(s, "Write request failed: %d\n", ret);
+ rtrs_err_rl(s,
+ "Write request failed: error=%d path=%s [%s:%u]\n",
+ ret, kobject_name(&sess->kobj), sess->hca_name,
+ sess->hca_port);
if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&sess->stats->inflight);
if (req->sg_cnt)
@@ -1052,7 +1117,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
struct rtrs_sess *s = con->c.sess;
struct rtrs_clt_sess *sess = to_clt_sess(s);
struct rtrs_msg_rdma_read *msg;
- struct rtrs_ib_dev *dev;
+ struct rtrs_ib_dev *dev = sess->s.dev;
struct ib_reg_wr rwr;
struct ib_send_wr *wr = NULL;
@@ -1062,9 +1127,6 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
- s = &sess->s;
- dev = sess->s.dev;
-
if (unlikely(tsize > sess->chunk_size)) {
rtrs_wrn(s,
"Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
@@ -1141,7 +1203,10 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
req->data_len, imm, wr);
if (unlikely(ret)) {
- rtrs_err(s, "Read request failed: %d\n", ret);
+ rtrs_err_rl(s,
+ "Read request failed: error=%d path=%s [%s:%u]\n",
+ ret, kobject_name(&sess->kobj), sess->hca_name,
+ sess->hca_port);
if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&sess->stats->inflight);
req->need_inv = false;
@@ -1863,12 +1928,14 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
- rtrs_wrn(s, "CM error event %d\n", ev->event);
+ rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
cm_err = -ECONNRESET;
break;
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
- rtrs_wrn(s, "CM error event %d\n", ev->event);
+ rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
cm_err = -EHOSTUNREACH;
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
@@ -1878,7 +1945,8 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
rtrs_clt_close_conns(sess, false);
return 0;
default:
- rtrs_err(s, "Unexpected RDMA CM event (%d)\n", ev->event);
+ rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
cm_err = -ECONNRESET;
break;
}
@@ -2251,7 +2319,7 @@ destroy:
static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
struct rtrs_iu *iu;
@@ -2333,7 +2401,7 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
struct rtrs_msg_info_rsp *msg;
enum rtrs_clt_state state;
@@ -2464,16 +2532,28 @@ out:
static int init_sess(struct rtrs_clt_sess *sess)
{
int err;
+ char str[NAME_MAX];
+ struct rtrs_addr path = {
+ .src = &sess->s.src_addr,
+ .dst = &sess->s.dst_addr,
+ };
+
+ rtrs_addr_to_str(&path, str, sizeof(str));
mutex_lock(&sess->init_mutex);
err = init_conns(sess);
if (err) {
- rtrs_err(sess->clt, "init_conns(), err: %d\n", err);
+ rtrs_err(sess->clt,
+ "init_conns() failed: err=%d path=%s [%s:%u]\n", err,
+ str, sess->hca_name, sess->hca_port);
goto out;
}
err = rtrs_send_sess_info(sess);
if (err) {
- rtrs_err(sess->clt, "rtrs_send_sess_info(), err: %d\n", err);
+ rtrs_err(
+ sess->clt,
+ "rtrs_send_sess_info() failed: err=%d path=%s [%s:%u]\n",
+ err, str, sess->hca_name, sess->hca_port);
goto out;
}
rtrs_clt_sess_up(sess);
@@ -2791,8 +2871,8 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
} while (!changed && old_state != RTRS_CLT_DEAD);
if (likely(changed)) {
- rtrs_clt_destroy_sess_files(sess, sysfs_self);
rtrs_clt_remove_path_from_arr(sess);
+ rtrs_clt_destroy_sess_files(sess, sysfs_self);
kobject_put(&sess->kobj);
}
@@ -2933,9 +3013,9 @@ int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
return -ECOMM;
attr->queue_depth = clt->queue_depth;
- attr->max_io_size = clt->max_io_size;
- attr->sess_kobj = &clt->dev.kobj;
- strlcpy(attr->sessname, clt->sessname, sizeof(attr->sessname));
+ /* Cap max_io_size to min of remote buffer size and the fr pages */
+ attr->max_io_size = min_t(int, clt->max_io_size,
+ clt->max_segments * SZ_4K);
return 0;
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index 98ba5d0a48b8..4c52f30e4da1 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -29,6 +29,7 @@ enum rtrs_clt_state {
enum rtrs_mp_policy {
MP_POLICY_RR,
MP_POLICY_MIN_INFLIGHT,
+ MP_POLICY_MIN_LATENCY,
};
/* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 00eb45053339..86e65cf30cab 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -91,6 +91,7 @@ struct rtrs_con {
struct ib_cq *cq;
struct rdma_cm_id *cm_id;
unsigned int cid;
+ u16 cq_size;
};
struct rtrs_sess {
@@ -112,6 +113,8 @@ struct rtrs_sess {
unsigned int hb_interval_ms;
unsigned int hb_missed_cnt;
unsigned int hb_missed_max;
+ ktime_t hb_last_sent;
+ ktime_t hb_cur_latency;
};
/* rtrs information unit */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index 126a96e75c62..a9288175fbb5 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -176,7 +176,8 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
err = device_add(&srv->dev);
if (err) {
pr_err("device_add(): %d\n", err);
- goto put;
+ put_device(&srv->dev);
+ goto unlock;
}
srv->kobj_paths = kobject_create_and_add("paths", &srv->dev.kobj);
if (!srv->kobj_paths) {
@@ -188,10 +189,6 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
}
dev_set_uevent_suppress(&srv->dev, false);
kobject_uevent(&srv->dev.kobj, KOBJ_ADD);
- goto unlock;
-
-put:
- put_device(&srv->dev);
unlock:
mutex_unlock(&srv->paths_mutex);
@@ -262,14 +259,13 @@ int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess)
struct rtrs_srv *srv = sess->srv;
struct rtrs_sess *s = &sess->s;
char str[NAME_MAX];
- int err, cnt;
-
- cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
- str, sizeof(str));
- cnt += scnprintf(str + cnt, sizeof(str) - cnt, "@");
- sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
- str + cnt, sizeof(str) - cnt);
+ int err;
+ struct rtrs_addr path = {
+ .src = &sess->s.dst_addr,
+ .dst = &sess->s.src_addr,
+ };
+ rtrs_addr_to_str(&path, str, sizeof(str));
err = rtrs_srv_create_once_sysfs_root_folders(sess);
if (err)
return err;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index f7aa2a7e7442..0fa116cabc44 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -199,7 +199,7 @@ static void rtrs_srv_wait_ops_ids(struct rtrs_srv_sess *sess)
static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_sess *s = con->c.sess;
struct rtrs_srv_sess *sess = to_srv_sess(s);
@@ -518,8 +518,9 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
rtrs_err_rl(s,
- "Sending I/O response failed, session is disconnected, sess state %s\n",
- rtrs_srv_state_str(sess->state));
+ "Sending I/O response failed, session %s is disconnected, sess state %s\n",
+ kobject_name(&sess->kobj),
+ rtrs_srv_state_str(sess->state));
goto out;
}
if (always_invalidate) {
@@ -529,7 +530,9 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
}
if (unlikely(atomic_sub_return(1,
&con->sq_wr_avail) < 0)) {
- pr_err("IB send queue full\n");
+ rtrs_err(s, "IB send queue full: sess=%s cid=%d\n",
+ kobject_name(&sess->kobj),
+ con->c.cid);
atomic_add(1, &con->sq_wr_avail);
spin_lock(&con->rsp_wr_wait_lock);
list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
@@ -543,7 +546,8 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
err = rdma_write_sg(id);
if (unlikely(err)) {
- rtrs_err_rl(s, "IO response failed: %d\n", err);
+ rtrs_err_rl(s, "IO response failed: %d: sess=%s\n", err,
+ kobject_name(&sess->kobj));
close_sess(sess);
}
out:
@@ -720,7 +724,7 @@ static void rtrs_srv_stop_hb(struct rtrs_srv_sess *sess)
static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_sess *s = con->c.sess;
struct rtrs_srv_sess *sess = to_srv_sess(s);
struct rtrs_iu *iu;
@@ -862,7 +866,7 @@ rwr_free:
static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_sess *s = con->c.sess;
struct rtrs_srv_sess *sess = to_srv_sess(s);
struct rtrs_msg_info_req *msg;
@@ -1110,7 +1114,7 @@ static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_mr *mr =
container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
- struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_sess *s = con->c.sess;
struct rtrs_srv_sess *sess = to_srv_sess(s);
struct rtrs_srv *srv = sess->srv;
@@ -1167,7 +1171,7 @@ static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_sess *s = con->c.sess;
struct rtrs_srv_sess *sess = to_srv_sess(s);
struct rtrs_srv *srv = sess->srv;
@@ -1683,6 +1687,8 @@ static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
{
struct rtrs_srv_sess *sess;
int err = -ENOMEM;
+ char str[NAME_MAX];
+ struct rtrs_addr path;
if (srv->paths_num >= MAX_PATHS_NUM) {
err = -ECONNRESET;
@@ -1717,6 +1723,13 @@ static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
sess->cur_cq_vector = -1;
sess->s.dst_addr = cm_id->route.addr.dst_addr;
sess->s.src_addr = cm_id->route.addr.src_addr;
+
+ /* temporary until receiving session-name from client */
+ path.src = &sess->s.src_addr;
+ path.dst = &sess->s.dst_addr;
+ rtrs_addr_to_str(&path, str, sizeof(str));
+ strlcpy(sess->s.sessname, str, sizeof(sess->s.sessname));
+
sess->s.con_num = con_num;
sess->s.recon_cnt = recon_cnt;
uuid_copy(&sess->s.uuid, uuid);
@@ -1908,13 +1921,10 @@ static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_UNREACHABLE:
rtrs_err(s, "CM error (CM event: %s, err: %d)\n",
rdma_event_msg(ev->event), ev->status);
- close_sess(sess);
- break;
+ fallthrough;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
- close_sess(sess);
- break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
close_sess(sess);
break;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index d13aff0aa816..a7847282a2eb 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -218,14 +218,14 @@ static int create_cq(struct rtrs_con *con, int cq_vector, u16 cq_size,
struct rdma_cm_id *cm_id = con->cm_id;
struct ib_cq *cq;
- cq = ib_alloc_cq(cm_id->device, con, cq_size,
- cq_vector, poll_ctx);
+ cq = ib_cq_pool_get(cm_id->device, cq_size, cq_vector, poll_ctx);
if (IS_ERR(cq)) {
rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n",
PTR_ERR(cq));
return PTR_ERR(cq);
}
con->cq = cq;
+ con->cq_size = cq_size;
return 0;
}
@@ -273,7 +273,7 @@ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr,
max_send_sge);
if (err) {
- ib_free_cq(con->cq);
+ ib_cq_pool_put(con->cq, con->cq_size);
con->cq = NULL;
return err;
}
@@ -290,7 +290,7 @@ void rtrs_cq_qp_destroy(struct rtrs_con *con)
con->qp = NULL;
}
if (con->cq) {
- ib_free_cq(con->cq);
+ ib_cq_pool_put(con->cq, con->cq_size);
con->cq = NULL;
}
}
@@ -337,6 +337,9 @@ static void hb_work(struct work_struct *work)
schedule_hb(sess);
return;
}
+
+ sess->hb_last_sent = ktime_get();
+
imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
0, NULL);
@@ -464,6 +467,30 @@ int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len)
EXPORT_SYMBOL(sockaddr_to_str);
/**
+ * rtrs_addr_to_str() - convert rtrs_addr to a string "src@dst"
+ * @addr: the rtrs_addr structure to be converted
+ * @buf: string containing source and destination addr of a path
+ * separated by '@' I.e. "ip:1.1.1.1@ip:1.1.1.2"
+ * "ip:1.1.1.1@ip:1.1.1.2".
+ * @len: string length
+ *
+ * The return value is the number of characters written into buf not
+ * including the trailing '\0'.
+ */
+int rtrs_addr_to_str(const struct rtrs_addr *addr, char *buf, size_t len)
+{
+ int cnt;
+
+ cnt = sockaddr_to_str((struct sockaddr *)addr->src,
+ buf, len);
+ cnt += scnprintf(buf + cnt, len - cnt, "@");
+ sockaddr_to_str((struct sockaddr *)addr->dst,
+ buf + cnt, len - cnt);
+ return cnt;
+}
+EXPORT_SYMBOL(rtrs_addr_to_str);
+
+/**
* rtrs_addr_to_sockaddr() - convert path string "src,dst" or "src@dst"
* to sockaddreses
* @str: string containing source and destination addr of a path
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
index bebaa94c4728..dc3e1af1a85b 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -110,8 +110,6 @@ int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index);
struct rtrs_attrs {
u32 queue_depth;
u32 max_io_size;
- u8 sessname[NAME_MAX];
- struct kobject *sess_kobj;
};
int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr);
@@ -185,4 +183,5 @@ int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
struct rtrs_addr *addr);
int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len);
+int rtrs_addr_to_str(const struct rtrs_addr *addr, char *buf, size_t len);
#endif
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 51c386a215f5..ea447805d4ea 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2382,6 +2382,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
dev_name(&sdev->device->dev), port_num);
mutex_unlock(&sport->mutex);
+ ret = -EINVAL;
goto reject;
}
@@ -3109,7 +3110,8 @@ static int srpt_add_one(struct ib_device *device)
{
struct srpt_device *sdev;
struct srpt_port *sport;
- int i, ret;
+ int ret;
+ u32 i;
pr_debug("device = %p\n", device);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 97381e2e03ba..2d0bc029619f 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1885,8 +1885,6 @@ static int elantech_create_smbus(struct psmouse *psmouse,
};
unsigned int idx = 0;
- smbus_board.properties = i2c_props;
-
i2c_props[idx++] = PROPERTY_ENTRY_U32("touchscreen-size-x",
info->x_max + 1);
i2c_props[idx++] = PROPERTY_ENTRY_U32("touchscreen-size-y",
@@ -1918,6 +1916,10 @@ static int elantech_create_smbus(struct psmouse *psmouse,
if (elantech_is_buttonpad(info))
i2c_props[idx++] = PROPERTY_ENTRY_BOOL("elan,clickpad");
+ smbus_board.fwnode = fwnode_create_software_node(i2c_props, NULL);
+ if (IS_ERR(smbus_board.fwnode))
+ return PTR_ERR(smbus_board.fwnode);
+
return psmouse_smbus_init(psmouse, &smbus_board, NULL, 0, false,
leave_breadcrumbs);
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 192ef8f61310..1f111b399bca 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -349,7 +349,7 @@ config S390_AP_IOMMU
is not implemented as it is not necessary for VFIO.
config MTK_IOMMU
- bool "MTK IOMMU Support"
+ tristate "MediaTek IOMMU Support"
depends on ARCH_MEDIATEK || COMPILE_TEST
select ARM_DMA_USE_IOMMU
select IOMMU_API
@@ -364,7 +364,7 @@ config MTK_IOMMU
If unsure, say N here.
config MTK_IOMMU_V1
- bool "MTK IOMMU Version 1 (M4U gen1) Support"
+ tristate "MediaTek IOMMU Version 1 (M4U gen1) Support"
depends on ARM
depends on ARCH_MEDIATEK || COMPILE_TEST
select ARM_DMA_USE_IOMMU
@@ -408,4 +408,16 @@ config VIRTIO_IOMMU
Say Y here if you intend to run this kernel as a guest.
+config SPRD_IOMMU
+ tristate "Unisoc IOMMU Support"
+ depends on ARCH_SPRD || COMPILE_TEST
+ select IOMMU_API
+ help
+ Support for IOMMU on Unisoc's SoCs, this IOMMU can be used by
+ Unisoc's multimedia devices, such as display, Image codec(jpeg)
+ and a few signal processors, including VSP(video), GSP(graphic),
+ ISP(image), and CPP(camera pixel processor), etc.
+
+ Say Y here if you want to use the multimedia devices listed above.
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 61bd30cd8369..c0fb0ba88143 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -27,4 +27,5 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
-obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o
+obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o io-pgfault.o
+obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 026ce7f8d993..55dd38d814d9 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -11,7 +11,6 @@
#include "amd_iommu_types.h"
-extern int amd_iommu_get_num_iommus(void);
extern int amd_iommu_init_dma_ops(void);
extern int amd_iommu_init_passthrough(void);
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
@@ -65,7 +64,6 @@ extern int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
unsigned long cr3);
extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid);
-extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
#ifdef CONFIG_IRQ_REMAP
extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 6937e3674a16..94c1a7a9876d 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -693,7 +693,6 @@ struct iommu_dev_data {
} ats; /* ATS state */
bool pri_tlp; /* PASID TLB required for
PPR completions */
- u32 errata; /* Bitmap for errata to apply */
bool use_vapic; /* Enable device to use vapic mode */
bool defer_attach;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 321f5906e6ed..d006724f4dc2 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -12,7 +12,6 @@
#include <linux/acpi.h>
#include <linux/list.h>
#include <linux/bitmap.h>
-#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
@@ -208,7 +207,6 @@ u16 *amd_iommu_alias_table;
* for a specific device. It is also indexed by the PCI device id.
*/
struct amd_iommu **amd_iommu_rlookup_table;
-EXPORT_SYMBOL(amd_iommu_rlookup_table);
/*
* This table is used to find the irq remapping table for a given device id
@@ -257,8 +255,6 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);
-static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
- u8 fxn, u64 *value, bool is_write);
static bool amd_iommu_pre_enabled = true;
@@ -268,7 +264,6 @@ bool translation_pre_enabled(struct amd_iommu *iommu)
{
return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
}
-EXPORT_SYMBOL(translation_pre_enabled);
static void clear_translation_pre_enabled(struct amd_iommu *iommu)
{
@@ -1717,53 +1712,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
return 0;
}
-static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
+static void init_iommu_perf_ctr(struct amd_iommu *iommu)
{
- int retry;
+ u64 val;
struct pci_dev *pdev = iommu->dev;
- u64 val = 0xabcd, val2 = 0, save_reg, save_src;
if (!iommu_feature(iommu, FEATURE_PC))
return;
amd_iommu_pc_present = true;
- /* save the value to restore, if writable */
- if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
- iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
- goto pc_false;
-
- /*
- * Disable power gating by programing the performance counter
- * source to 20 (i.e. counts the reads and writes from/to IOMMU
- * Reserved Register [MMIO Offset 1FF8h] that are ignored.),
- * which never get incremented during this init phase.
- * (Note: The event is also deprecated.)
- */
- val = 20;
- if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
- goto pc_false;
-
- /* Check if the performance counters can be written to */
- val = 0xabcd;
- for (retry = 5; retry; retry--) {
- if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
- iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
- val2)
- break;
-
- /* Wait about 20 msec for power gating to disable and retry. */
- msleep(20);
- }
-
- /* restore */
- if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
- iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
- goto pc_false;
-
- if (val != val2)
- goto pc_false;
-
pci_info(pdev, "IOMMU performance counters supported\n");
val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
@@ -1771,11 +1729,6 @@ static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
iommu->max_counters = (u8) ((val >> 7) & 0xf);
return;
-
-pc_false:
- pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
- amd_iommu_pc_present = false;
- return;
}
static ssize_t amd_iommu_show_cap(struct device *dev,
@@ -1837,7 +1790,7 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
* IVHD and MMIO conflict.
*/
if (features != iommu->features)
- pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
+ pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
features, iommu->features);
}
@@ -1935,8 +1888,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
amd_iommu_groups, "ivhd%d", iommu->index);
- iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
- iommu_device_register(&iommu->iommu);
+ iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
return pci_enable_device(iommu->dev);
}
@@ -3277,7 +3229,6 @@ struct amd_iommu *get_amd_iommu(unsigned int idx)
return iommu;
return NULL;
}
-EXPORT_SYMBOL(get_amd_iommu);
/****************************************************************************
*
@@ -3359,7 +3310,6 @@ int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64
return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
}
-EXPORT_SYMBOL(amd_iommu_pc_get_reg);
int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
{
@@ -3368,4 +3318,3 @@ int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64
return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
}
-EXPORT_SYMBOL(amd_iommu_pc_set_reg);
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index a69a8b573e40..80e8e1916dd1 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -290,15 +290,6 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev)
return true;
}
-static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
-{
- struct iommu_dev_data *dev_data;
-
- dev_data = dev_iommu_priv_get(&pdev->dev);
-
- return dev_data->errata & (1 << erratum) ? true : false;
-}
-
/*
* This function checks if the driver got a valid device from the caller to
* avoid dereferencing invalid pointers.
@@ -861,33 +852,58 @@ static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
}
-static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
- size_t size, u16 domid, int pde)
+/*
+ * Builds an invalidation address which is suitable for one page or multiple
+ * pages. Sets the size bit (S) as needed is more than one page is flushed.
+ */
+static inline u64 build_inv_address(u64 address, size_t size)
{
- u64 pages;
- bool s;
+ u64 pages, end, msb_diff;
pages = iommu_num_pages(address, size, PAGE_SIZE);
- s = false;
- if (pages > 1) {
+ if (pages == 1)
+ return address & PAGE_MASK;
+
+ end = address + size - 1;
+
+ /*
+ * msb_diff would hold the index of the most significant bit that
+ * flipped between the start and end.
+ */
+ msb_diff = fls64(end ^ address) - 1;
+
+ /*
+ * Bits 63:52 are sign extended. If for some reason bit 51 is different
+ * between the start and the end, invalidate everything.
+ */
+ if (unlikely(msb_diff > 51)) {
+ address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+ } else {
/*
- * If we have to flush more than one page, flush all
- * TLB entries for this domain
+ * The msb-bit must be clear on the address. Just set all the
+ * lower bits.
*/
- address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
- s = true;
+ address |= 1ull << (msb_diff - 1);
}
+ /* Clear bits 11:0 */
address &= PAGE_MASK;
+ /* Set the size bit - we flush more than one 4kb page */
+ return address | CMD_INV_IOMMU_PAGES_SIZE_MASK;
+}
+
+static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
+ size_t size, u16 domid, int pde)
+{
+ u64 inv_address = build_inv_address(address, size);
+
memset(cmd, 0, sizeof(*cmd));
cmd->data[1] |= domid;
- cmd->data[2] = lower_32_bits(address);
- cmd->data[3] = upper_32_bits(address);
+ cmd->data[2] = lower_32_bits(inv_address);
+ cmd->data[3] = upper_32_bits(inv_address);
CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
- if (s) /* size bit - we flush more than one 4kb page */
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
}
@@ -895,32 +911,15 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
u64 address, size_t size)
{
- u64 pages;
- bool s;
-
- pages = iommu_num_pages(address, size, PAGE_SIZE);
- s = false;
-
- if (pages > 1) {
- /*
- * If we have to flush more than one page, flush all
- * TLB entries for this domain
- */
- address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
- s = true;
- }
-
- address &= PAGE_MASK;
+ u64 inv_address = build_inv_address(address, size);
memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = devid;
cmd->data[0] |= (qdep & 0xff) << 24;
cmd->data[1] = devid;
- cmd->data[2] = lower_32_bits(address);
- cmd->data[3] = upper_32_bits(address);
+ cmd->data[2] = lower_32_bits(inv_address);
+ cmd->data[3] = upper_32_bits(inv_address);
CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
- if (s)
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
}
static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
@@ -1531,33 +1530,9 @@ static void pdev_iommuv2_disable(struct pci_dev *pdev)
pci_disable_pasid(pdev);
}
-/* FIXME: Change generic reset-function to do the same */
-static int pri_reset_while_enabled(struct pci_dev *pdev)
-{
- u16 control;
- int pos;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
- return -EINVAL;
-
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
- control |= PCI_PRI_CTRL_RESET;
- pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
-
- return 0;
-}
-
static int pdev_iommuv2_enable(struct pci_dev *pdev)
{
- bool reset_enable;
- int reqs, ret;
-
- /* FIXME: Hardcode number of outstanding requests for now */
- reqs = 32;
- if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
- reqs = 1;
- reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
+ int ret;
/* Only allow access to user-accessible pages */
ret = pci_enable_pasid(pdev, 0);
@@ -1570,16 +1545,11 @@ static int pdev_iommuv2_enable(struct pci_dev *pdev)
goto out_err;
/* Enable PRI */
- ret = pci_enable_pri(pdev, reqs);
+ /* FIXME: Hardcode number of outstanding requests for now */
+ ret = pci_enable_pri(pdev, 32);
if (ret)
goto out_err;
- if (reset_enable) {
- ret = pri_reset_while_enabled(pdev);
- if (ret)
- goto out_err;
- }
-
ret = pci_enable_ats(pdev, PAGE_SHIFT);
if (ret)
goto out_err;
@@ -1715,9 +1685,6 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
return ERR_PTR(-ENODEV);
devid = get_device_id(dev);
- if (devid < 0)
- return ERR_PTR(devid);
-
iommu = amd_iommu_rlookup_table[devid];
if (dev_iommu_priv_get(dev))
@@ -1771,26 +1738,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
return acpihid_device_group(dev);
}
-static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
-{
- switch (domain->type) {
- case IOMMU_DOMAIN_UNMANAGED:
- return -ENODEV;
- case IOMMU_DOMAIN_DMA:
- switch (attr) {
- case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- *(int *)data = !amd_iommu_unmap_flush;
- return 0;
- default:
- return -ENODEV;
- }
- break;
- default:
- return -EINVAL;
- }
-}
-
/*****************************************************************************
*
* The next functions belong to the dma_ops mapping/unmapping code.
@@ -1855,7 +1802,7 @@ int __init amd_iommu_init_dma_ops(void)
pr_info("IO/TLB flush on unmap enabled\n");
else
pr_info("Lazy IO/TLB flushing enabled\n");
-
+ iommu_set_dma_strict(amd_iommu_unmap_flush);
return 0;
}
@@ -2019,16 +1966,12 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
struct device *dev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+ int devid = get_device_id(dev);
struct amd_iommu *iommu;
- int devid;
if (!check_device(dev))
return;
- devid = get_device_id(dev);
- if (devid < 0)
- return;
-
if (dev_data->domain != NULL)
detach_device(dev);
@@ -2257,7 +2200,6 @@ const struct iommu_ops amd_iommu_ops = {
.release_device = amd_iommu_release_device,
.probe_finalize = amd_iommu_probe_finalize,
.device_group = amd_iommu_device_group,
- .domain_get_attr = amd_iommu_domain_get_attr,
.get_resv_regions = amd_iommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
@@ -2310,9 +2252,6 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
unsigned long flags;
int levels, ret;
- if (pasids <= 0 || pasids > (PASID_MASK + 1))
- return -EINVAL;
-
/* Number of GCR3 table levels required */
for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
levels += 1;
@@ -2563,52 +2502,6 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
}
EXPORT_SYMBOL(amd_iommu_complete_ppr);
-struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
-{
- struct protection_domain *pdomain;
- struct iommu_dev_data *dev_data;
- struct device *dev = &pdev->dev;
- struct iommu_domain *io_domain;
-
- if (!check_device(dev))
- return NULL;
-
- dev_data = dev_iommu_priv_get(&pdev->dev);
- pdomain = dev_data->domain;
- io_domain = iommu_get_domain_for_dev(dev);
-
- if (pdomain == NULL && dev_data->defer_attach) {
- dev_data->defer_attach = false;
- pdomain = to_pdomain(io_domain);
- attach_device(dev, pdomain);
- }
-
- if (pdomain == NULL)
- return NULL;
-
- if (io_domain->type != IOMMU_DOMAIN_DMA)
- return NULL;
-
- /* Only return IOMMUv2 domains */
- if (!(pdomain->flags & PD_IOMMUV2_MASK))
- return NULL;
-
- return &pdomain->domain;
-}
-EXPORT_SYMBOL(amd_iommu_get_v2_domain);
-
-void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
-{
- struct iommu_dev_data *dev_data;
-
- if (!amd_iommu_v2_supported())
- return;
-
- dev_data = dev_iommu_priv_get(&pdev->dev);
- dev_data->errata |= (1 << erratum);
-}
-EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
-
int amd_iommu_device_info(struct pci_dev *pdev,
struct amd_iommu_device_info *info)
{
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 8594b4a83043..54b2f27b81d4 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -245,8 +245,6 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
break;
case CMDQ_OP_PREFETCH_CFG:
cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid);
- cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size);
- cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
break;
case CMDQ_OP_CFGI_CD:
cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid);
@@ -909,8 +907,8 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
- for (i = 0; i < master->num_sids; i++) {
- cmd.cfgi.sid = master->sids[i];
+ for (i = 0; i < master->num_streams; i++) {
+ cmd.cfgi.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
}
}
@@ -1355,6 +1353,29 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return 0;
}
+__maybe_unused
+static struct arm_smmu_master *
+arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
+{
+ struct rb_node *node;
+ struct arm_smmu_stream *stream;
+
+ lockdep_assert_held(&smmu->streams_mutex);
+
+ node = smmu->streams.rb_node;
+ while (node) {
+ stream = rb_entry(node, struct arm_smmu_stream, node);
+ if (stream->id < sid)
+ node = node->rb_right;
+ else if (stream->id > sid)
+ node = node->rb_left;
+ else
+ return stream->master;
+ }
+
+ return NULL;
+}
+
/* IRQ and event handlers */
static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
{
@@ -1588,8 +1609,8 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
- for (i = 0; i < master->num_sids; i++) {
- cmd.atc.sid = master->sids[i];
+ for (i = 0; i < master->num_streams; i++) {
+ cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
}
@@ -1632,8 +1653,8 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
if (!master->ats_enabled)
continue;
- for (i = 0; i < master->num_sids; i++) {
- cmd.atc.sid = master->sids[i];
+ for (i = 0; i < master->num_streams; i++) {
+ cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
}
}
@@ -2017,7 +2038,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
.iommu_dev = smmu->dev,
};
- if (smmu_domain->non_strict)
+ if (!iommu_get_dma_strict(domain))
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
@@ -2065,13 +2086,13 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
int i, j;
struct arm_smmu_device *smmu = master->smmu;
- for (i = 0; i < master->num_sids; ++i) {
- u32 sid = master->sids[i];
+ for (i = 0; i < master->num_streams; ++i) {
+ u32 sid = master->streams[i].id;
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
/* Bridged PCI devices may end up with duplicated IDs */
for (j = 0; j < i; j++)
- if (master->sids[j] == sid)
+ if (master->streams[j].id == sid)
break;
if (j < i)
continue;
@@ -2305,6 +2326,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ if (!gather->pgsize)
+ return;
+
arm_smmu_tlb_inv_range_domain(gather->start,
gather->end - gather->start + 1,
gather->pgsize, true, smmu_domain);
@@ -2345,11 +2369,101 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
return sid < limit;
}
+static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
+ struct arm_smmu_master *master)
+{
+ int i;
+ int ret = 0;
+ struct arm_smmu_stream *new_stream, *cur_stream;
+ struct rb_node **new_node, *parent_node = NULL;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+
+ master->streams = kcalloc(fwspec->num_ids, sizeof(*master->streams),
+ GFP_KERNEL);
+ if (!master->streams)
+ return -ENOMEM;
+ master->num_streams = fwspec->num_ids;
+
+ mutex_lock(&smmu->streams_mutex);
+ for (i = 0; i < fwspec->num_ids; i++) {
+ u32 sid = fwspec->ids[i];
+
+ new_stream = &master->streams[i];
+ new_stream->id = sid;
+ new_stream->master = master;
+
+ /*
+ * Check the SIDs are in range of the SMMU and our stream table
+ */
+ if (!arm_smmu_sid_in_range(smmu, sid)) {
+ ret = -ERANGE;
+ break;
+ }
+
+ /* Ensure l2 strtab is initialised */
+ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
+ ret = arm_smmu_init_l2_strtab(smmu, sid);
+ if (ret)
+ break;
+ }
+
+ /* Insert into SID tree */
+ new_node = &(smmu->streams.rb_node);
+ while (*new_node) {
+ cur_stream = rb_entry(*new_node, struct arm_smmu_stream,
+ node);
+ parent_node = *new_node;
+ if (cur_stream->id > new_stream->id) {
+ new_node = &((*new_node)->rb_left);
+ } else if (cur_stream->id < new_stream->id) {
+ new_node = &((*new_node)->rb_right);
+ } else {
+ dev_warn(master->dev,
+ "stream %u already in tree\n",
+ cur_stream->id);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ if (ret)
+ break;
+
+ rb_link_node(&new_stream->node, parent_node, new_node);
+ rb_insert_color(&new_stream->node, &smmu->streams);
+ }
+
+ if (ret) {
+ for (i--; i >= 0; i--)
+ rb_erase(&master->streams[i].node, &smmu->streams);
+ kfree(master->streams);
+ }
+ mutex_unlock(&smmu->streams_mutex);
+
+ return ret;
+}
+
+static void arm_smmu_remove_master(struct arm_smmu_master *master)
+{
+ int i;
+ struct arm_smmu_device *smmu = master->smmu;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+
+ if (!smmu || !master->streams)
+ return;
+
+ mutex_lock(&smmu->streams_mutex);
+ for (i = 0; i < fwspec->num_ids; i++)
+ rb_erase(&master->streams[i].node, &smmu->streams);
+ mutex_unlock(&smmu->streams_mutex);
+
+ kfree(master->streams);
+}
+
static struct iommu_ops arm_smmu_ops;
static struct iommu_device *arm_smmu_probe_device(struct device *dev)
{
- int i, ret;
+ int ret;
struct arm_smmu_device *smmu;
struct arm_smmu_master *master;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
@@ -2370,29 +2484,15 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
master->dev = dev;
master->smmu = smmu;
- master->sids = fwspec->ids;
- master->num_sids = fwspec->num_ids;
INIT_LIST_HEAD(&master->bonds);
dev_iommu_priv_set(dev, master);
- /* Check the SIDs are in range of the SMMU and our stream table */
- for (i = 0; i < master->num_sids; i++) {
- u32 sid = master->sids[i];
-
- if (!arm_smmu_sid_in_range(smmu, sid)) {
- ret = -ERANGE;
- goto err_free_master;
- }
-
- /* Ensure l2 strtab is initialised */
- if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
- ret = arm_smmu_init_l2_strtab(smmu, sid);
- if (ret)
- goto err_free_master;
- }
- }
+ ret = arm_smmu_insert_master(smmu, master);
+ if (ret)
+ goto err_free_master;
- master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits);
+ device_property_read_u32(dev, "pasid-num-bits", &master->ssid_bits);
+ master->ssid_bits = min(smmu->ssid_bits, master->ssid_bits);
/*
* Note that PASID must be enabled before, and disabled after ATS:
@@ -2428,6 +2528,7 @@ static void arm_smmu_release_device(struct device *dev)
WARN_ON(arm_smmu_master_sva_enabled(master));
arm_smmu_detach_dev(master);
arm_smmu_disable_pasid(master);
+ arm_smmu_remove_master(master);
kfree(master);
iommu_fwspec_free(dev);
}
@@ -2449,76 +2550,18 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
return group;
}
-static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+static int arm_smmu_enable_nesting(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
- switch (domain->type) {
- case IOMMU_DOMAIN_UNMANAGED:
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
- return 0;
- default:
- return -ENODEV;
- }
- break;
- case IOMMU_DOMAIN_DMA:
- switch (attr) {
- case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- *(int *)data = smmu_domain->non_strict;
- return 0;
- default:
- return -ENODEV;
- }
- break;
- default:
- return -EINVAL;
- }
-}
-
-static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
-{
int ret = 0;
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
mutex_lock(&smmu_domain->init_mutex);
-
- switch (domain->type) {
- case IOMMU_DOMAIN_UNMANAGED:
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- if (smmu_domain->smmu) {
- ret = -EPERM;
- goto out_unlock;
- }
-
- if (*(int *)data)
- smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
- else
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
- break;
- default:
- ret = -ENODEV;
- }
- break;
- case IOMMU_DOMAIN_DMA:
- switch(attr) {
- case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- smmu_domain->non_strict = *(int *)data;
- break;
- default:
- ret = -ENODEV;
- }
- break;
- default:
- ret = -EINVAL;
- }
-
-out_unlock:
+ if (smmu_domain->smmu)
+ ret = -EPERM;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
mutex_unlock(&smmu_domain->init_mutex);
+
return ret;
}
@@ -2619,8 +2662,7 @@ static struct iommu_ops arm_smmu_ops = {
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
- .domain_get_attr = arm_smmu_domain_get_attr,
- .domain_set_attr = arm_smmu_domain_set_attr,
+ .enable_nesting = arm_smmu_enable_nesting,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
@@ -2632,6 +2674,7 @@ static struct iommu_ops arm_smmu_ops = {
.sva_unbind = arm_smmu_sva_unbind,
.sva_get_pasid = arm_smmu_sva_get_pasid,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
+ .owner = THIS_MODULE,
};
/* Probing and initialisation functions */
@@ -2851,6 +2894,9 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
{
int ret;
+ mutex_init(&smmu->streams_mutex);
+ smmu->streams = RB_ROOT;
+
ret = arm_smmu_init_queues(smmu);
if (ret)
return ret;
@@ -3620,10 +3666,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (ret)
return ret;
- iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
- iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
-
- ret = iommu_device_register(&smmu->iommu);
+ ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
return ret;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index f985817c967a..46e8c49214a8 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -115,7 +115,7 @@
#define GERROR_PRIQ_ABT_ERR (1 << 3)
#define GERROR_EVTQ_ABT_ERR (1 << 2)
#define GERROR_CMDQ_ERR (1 << 0)
-#define GERROR_ERR_MASK 0xfd
+#define GERROR_ERR_MASK 0x1fd
#define ARM_SMMU_GERRORN 0x64
@@ -410,8 +410,6 @@ struct arm_smmu_cmdq_ent {
#define CMDQ_OP_PREFETCH_CFG 0x1
struct {
u32 sid;
- u8 size;
- u64 addr;
} prefetch;
#define CMDQ_OP_CFGI_STE 0x3
@@ -639,6 +637,15 @@ struct arm_smmu_device {
/* IOMMU core code handle */
struct iommu_device iommu;
+
+ struct rb_root streams;
+ struct mutex streams_mutex;
+};
+
+struct arm_smmu_stream {
+ u32 id;
+ struct arm_smmu_master *master;
+ struct rb_node node;
};
/* SMMU private data for each master */
@@ -647,8 +654,8 @@ struct arm_smmu_master {
struct device *dev;
struct arm_smmu_domain *domain;
struct list_head domain_head;
- u32 *sids;
- unsigned int num_sids;
+ struct arm_smmu_stream *streams;
+ unsigned int num_streams;
bool ats_enabled;
bool sva_enabled;
struct list_head bonds;
@@ -668,7 +675,6 @@ struct arm_smmu_domain {
struct mutex init_mutex; /* Protects smmu pointer */
struct io_pgtable_ops *pgtbl_ops;
- bool non_strict;
atomic_t nr_ats_masters;
enum arm_smmu_domain_stage stage;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index d8c6bfde6a61..6f72c4d208ca 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -761,14 +761,17 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.iommu_dev = smmu->dev,
};
+ if (!iommu_get_dma_strict(domain))
+ pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
+
if (smmu->impl && smmu->impl->init_context) {
ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
if (ret)
goto out_clear_smmu;
}
- if (smmu_domain->pgtbl_cfg.quirks)
- pgtbl_cfg.quirks |= smmu_domain->pgtbl_cfg.quirks;
+ if (smmu_domain->pgtbl_quirks)
+ pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops) {
@@ -1481,98 +1484,34 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
return group;
}
-static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+static int arm_smmu_enable_nesting(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ int ret = 0;
- switch(domain->type) {
- case IOMMU_DOMAIN_UNMANAGED:
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
- return 0;
- case DOMAIN_ATTR_IO_PGTABLE_CFG: {
- struct io_pgtable_domain_attr *pgtbl_cfg = data;
- *pgtbl_cfg = smmu_domain->pgtbl_cfg;
-
- return 0;
- }
- default:
- return -ENODEV;
- }
- break;
- case IOMMU_DOMAIN_DMA:
- switch (attr) {
- case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: {
- bool non_strict = smmu_domain->pgtbl_cfg.quirks &
- IO_PGTABLE_QUIRK_NON_STRICT;
- *(int *)data = non_strict;
- return 0;
- }
- default:
- return -ENODEV;
- }
- break;
- default:
- return -EINVAL;
- }
+ mutex_lock(&smmu_domain->init_mutex);
+ if (smmu_domain->smmu)
+ ret = -EPERM;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ mutex_unlock(&smmu_domain->init_mutex);
+
+ return ret;
}
-static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain,
+ unsigned long quirks)
{
- int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ int ret = 0;
mutex_lock(&smmu_domain->init_mutex);
-
- switch(domain->type) {
- case IOMMU_DOMAIN_UNMANAGED:
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- if (smmu_domain->smmu) {
- ret = -EPERM;
- goto out_unlock;
- }
-
- if (*(int *)data)
- smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
- else
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
- break;
- case DOMAIN_ATTR_IO_PGTABLE_CFG: {
- struct io_pgtable_domain_attr *pgtbl_cfg = data;
-
- if (smmu_domain->smmu) {
- ret = -EPERM;
- goto out_unlock;
- }
-
- smmu_domain->pgtbl_cfg = *pgtbl_cfg;
- break;
- }
- default:
- ret = -ENODEV;
- }
- break;
- case IOMMU_DOMAIN_DMA:
- switch (attr) {
- case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- if (*(int *)data)
- smmu_domain->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
- else
- smmu_domain->pgtbl_cfg.quirks &= ~IO_PGTABLE_QUIRK_NON_STRICT;
- break;
- default:
- ret = -ENODEV;
- }
- break;
- default:
- ret = -EINVAL;
- }
-out_unlock:
+ if (smmu_domain->smmu)
+ ret = -EPERM;
+ else
+ smmu_domain->pgtbl_quirks = quirks;
mutex_unlock(&smmu_domain->init_mutex);
+
return ret;
}
@@ -1631,13 +1570,14 @@ static struct iommu_ops arm_smmu_ops = {
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
- .domain_get_attr = arm_smmu_domain_get_attr,
- .domain_set_attr = arm_smmu_domain_set_attr,
+ .enable_nesting = arm_smmu_enable_nesting,
+ .set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
+ .owner = THIS_MODULE,
};
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@@ -2221,10 +2161,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return err;
}
- iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
- iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
-
- err = iommu_device_register(&smmu->iommu);
+ err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (err) {
dev_err(dev, "Failed to register iommu\n");
return err;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index d2a2d1bc58ba..c31a59d35c64 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -364,7 +364,7 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
- struct io_pgtable_domain_attr pgtbl_cfg;
+ unsigned long pgtbl_quirks;
const struct iommu_flush_ops *flush_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 7f280c8d5c53..4294abe389b2 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -847,10 +847,7 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
return ret;
}
- iommu_device_set_ops(&qcom_iommu->iommu, &qcom_iommu_ops);
- iommu_device_set_fwnode(&qcom_iommu->iommu, dev->fwnode);
-
- ret = iommu_device_register(&qcom_iommu->iommu);
+ ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
return ret;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index af765c813cc8..7bcdd1205535 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -52,15 +52,17 @@ struct iommu_dma_cookie {
};
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
+bool iommu_dma_forcedac __read_mostly;
-void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
- struct iommu_domain *domain)
+static int __init iommu_dma_forcedac_setup(char *str)
{
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
- struct iova_domain *iovad = &cookie->iovad;
+ int ret = kstrtobool(str, &iommu_dma_forcedac);
- free_cpu_cached_iovas(cpu, iovad);
+ if (!ret && iommu_dma_forcedac)
+ pr_info("Forcing DAC for PCI devices\n");
+ return ret;
}
+early_param("iommu.forcedac", iommu_dma_forcedac_setup);
static void iommu_dma_entry_dtor(unsigned long data)
{
@@ -304,10 +306,7 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
domain = cookie->fq_domain;
- /*
- * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
- * implies that ops->flush_iotlb_all must be non-NULL.
- */
+
domain->ops->flush_iotlb_all(domain);
}
@@ -334,7 +333,6 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
unsigned long order, base_pfn;
struct iova_domain *iovad;
- int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
@@ -371,8 +369,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
init_iova_domain(iovad, 1UL << order, base_pfn);
if (!cookie->fq_domain && (!dev || !dev_is_untrusted(dev)) &&
- !iommu_domain_get_attr(domain, DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) &&
- attr) {
+ domain->ops->flush_iotlb_all && !iommu_get_dma_strict(domain)) {
if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
iommu_dma_entry_dtor))
pr_warn("iova flush queue initialization failed\n");
@@ -444,7 +441,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
/* Try to get PCI devices a SAC address */
- if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
+ if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
iova = alloc_iova_fast(iovad, iova_len,
DMA_BIT_MASK(32) >> shift, false);
@@ -499,8 +496,6 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
- struct iova_domain *iovad = &cookie->iovad;
phys_addr_t phys;
phys = iommu_iova_to_phys(domain, dma_addr);
@@ -510,8 +505,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
__iommu_dma_unmap(dev, dma_addr, size);
if (unlikely(is_swiotlb_buffer(phys)))
- swiotlb_tbl_unmap_single(dev, phys, size,
- iova_align(iovad, size), dir, attrs);
+ swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
@@ -581,10 +575,8 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
}
iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
- if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys))
- swiotlb_tbl_unmap_single(dev, phys, org_size,
- aligned_size, dir, attrs);
-
+ if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
+ swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
return iova;
}
@@ -650,23 +642,12 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
return pages;
}
-/**
- * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
- * @dev: Device to allocate memory for. Must be a real device
- * attached to an iommu_dma_domain
- * @size: Size of buffer in bytes
- * @dma_handle: Out argument for allocated DMA handle
- * @gfp: Allocation flags
- * @prot: pgprot_t to use for the remapped mapping
- * @attrs: DMA attributes for this allocation
- *
- * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
+/*
+ * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
* but an IOMMU which supports smaller pages might not map the whole thing.
- *
- * Return: Mapped virtual address, or NULL on failure.
*/
-static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
+static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
+ size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot,
unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
@@ -676,11 +657,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
- struct sg_table sgt;
dma_addr_t iova;
- void *vaddr;
-
- *dma_handle = DMA_MAPPING_ERROR;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain))
@@ -707,41 +684,91 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
if (!iova)
goto out_free_pages;
- if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
+ if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL))
goto out_free_iova;
if (!(ioprot & IOMMU_CACHE)) {
struct scatterlist *sg;
int i;
- for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
+ if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
< size)
goto out_free_sg;
+ sgt->sgl->dma_address = iova;
+ sgt->sgl->dma_length = size;
+ return pages;
+
+out_free_sg:
+ sg_free_table(sgt);
+out_free_iova:
+ iommu_dma_free_iova(cookie, iova, size, NULL);
+out_free_pages:
+ __iommu_dma_free_pages(pages, count);
+ return NULL;
+}
+
+static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
+ unsigned long attrs)
+{
+ struct page **pages;
+ struct sg_table sgt;
+ void *vaddr;
+
+ pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot,
+ attrs);
+ if (!pages)
+ return NULL;
+ *dma_handle = sgt.sgl->dma_address;
+ sg_free_table(&sgt);
vaddr = dma_common_pages_remap(pages, size, prot,
__builtin_return_address(0));
if (!vaddr)
goto out_unmap;
-
- *dma_handle = iova;
- sg_free_table(&sgt);
return vaddr;
out_unmap:
- __iommu_dma_unmap(dev, iova, size);
-out_free_sg:
- sg_free_table(&sgt);
-out_free_iova:
- iommu_dma_free_iova(cookie, iova, size, NULL);
-out_free_pages:
- __iommu_dma_free_pages(pages, count);
+ __iommu_dma_unmap(dev, *dma_handle, size);
+ __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
return NULL;
}
+#ifdef CONFIG_DMA_REMAP
+static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
+ size_t size, enum dma_data_direction dir, gfp_t gfp,
+ unsigned long attrs)
+{
+ struct dma_sgt_handle *sh;
+
+ sh = kmalloc(sizeof(*sh), gfp);
+ if (!sh)
+ return NULL;
+
+ sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp,
+ PAGE_KERNEL, attrs);
+ if (!sh->pages) {
+ kfree(sh);
+ return NULL;
+ }
+ return &sh->sgt;
+}
+
+static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ struct dma_sgt_handle *sh = sgt_handle(sgt);
+
+ __iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
+ __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
+ sg_free_table(&sh->sgt);
+}
+#endif /* CONFIG_DMA_REMAP */
+
static void iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
@@ -755,7 +782,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
arch_sync_dma_for_cpu(phys, size, dir);
if (is_swiotlb_buffer(phys))
- swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
+ swiotlb_sync_single_for_cpu(dev, phys, size, dir);
}
static void iommu_dma_sync_single_for_device(struct device *dev,
@@ -768,7 +795,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
if (is_swiotlb_buffer(phys))
- swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE);
+ swiotlb_sync_single_for_device(dev, phys, size, dir);
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(phys, size, dir);
@@ -789,8 +816,8 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
if (is_swiotlb_buffer(sg_phys(sg)))
- swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
- dir, SYNC_FOR_CPU);
+ swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
+ sg->length, dir);
}
}
@@ -806,8 +833,8 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
for_each_sg(sgl, sg, nelems, i) {
if (is_swiotlb_buffer(sg_phys(sg)))
- swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
- dir, SYNC_FOR_DEVICE);
+ swiotlb_sync_single_for_device(dev, sg_phys(sg),
+ sg->length, dir);
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
@@ -1258,6 +1285,10 @@ static const struct dma_map_ops iommu_dma_ops = {
.free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
+#ifdef CONFIG_DMA_REMAP
+ .alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
+ .free_noncontiguous = iommu_dma_free_noncontiguous,
+#endif
.mmap = iommu_dma_mmap,
.get_sgtable = iommu_dma_get_sgtable,
.map_page = iommu_dma_map_page,
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index de324b4eedfe..7623d8c371f5 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -407,7 +407,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
struct sysmmu_drvdata *data = dev_id;
const struct sysmmu_fault_info *finfo;
unsigned int i, n, itype;
- sysmmu_iova_t fault_addr = -1;
+ sysmmu_iova_t fault_addr;
unsigned short reg_status, reg_clear;
int ret = -ENOSYS;
@@ -630,10 +630,7 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
- iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
-
- ret = iommu_device_register(&data->iommu);
+ ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
if (ret)
return ret;
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index b9a974d97831..fc38b1fba7cf 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -63,19 +63,6 @@ static const struct of_device_id l3_device_ids[] = {
/* maximum subwindows permitted per liodn */
static u32 max_subwindow_count;
-/* Pool for fspi allocation */
-static struct gen_pool *spaace_pool;
-
-/**
- * pamu_get_max_subwin_cnt() - Return the maximum supported
- * subwindow count per liodn.
- *
- */
-u32 pamu_get_max_subwin_cnt(void)
-{
- return max_subwindow_count;
-}
-
/**
* pamu_get_ppaace() - Return the primary PACCE
* @liodn: liodn PAACT index for desired PAACE
@@ -155,13 +142,6 @@ static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
return fls64(addrspace_size) - 2;
}
-/* Derive the PAACE window count encoding for the subwindow count */
-static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt)
-{
- /* window count is 2^(WCE+1) bytes */
- return __ffs(subwindow_cnt) - 1;
-}
-
/*
* Set the PAACE type as primary and set the coherency required domain
* attribute
@@ -175,88 +155,10 @@ static void pamu_init_ppaace(struct paace *ppaace)
}
/*
- * Set the PAACE type as secondary and set the coherency required domain
- * attribute.
- */
-static void pamu_init_spaace(struct paace *spaace)
-{
- set_bf(spaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_SECONDARY);
- set_bf(spaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
- PAACE_M_COHERENCE_REQ);
-}
-
-/*
- * Return the spaace (corresponding to the secondary window index)
- * for a particular ppaace.
- */
-static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum)
-{
- u32 subwin_cnt;
- struct paace *spaace = NULL;
-
- subwin_cnt = 1UL << (get_bf(paace->impl_attr, PAACE_IA_WCE) + 1);
-
- if (wnum < subwin_cnt)
- spaace = &spaact[paace->fspi + wnum];
- else
- pr_debug("secondary paace out of bounds\n");
-
- return spaace;
-}
-
-/**
- * pamu_get_fspi_and_allocate() - Allocates fspi index and reserves subwindows
- * required for primary PAACE in the secondary
- * PAACE table.
- * @subwin_cnt: Number of subwindows to be reserved.
- *
- * A PPAACE entry may have a number of associated subwindows. A subwindow
- * corresponds to a SPAACE entry in the SPAACT table. Each PAACE entry stores
- * the index (fspi) of the first SPAACE entry in the SPAACT table. This
- * function returns the index of the first SPAACE entry. The remaining
- * SPAACE entries are reserved contiguously from that index.
- *
- * Returns a valid fspi index in the range of 0 - SPAACE_NUMBER_ENTRIES on success.
- * If no SPAACE entry is available or the allocator can not reserve the required
- * number of contiguous entries function returns ULONG_MAX indicating a failure.
- *
- */
-static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
-{
- unsigned long spaace_addr;
-
- spaace_addr = gen_pool_alloc(spaace_pool, subwin_cnt * sizeof(struct paace));
- if (!spaace_addr)
- return ULONG_MAX;
-
- return (spaace_addr - (unsigned long)spaact) / (sizeof(struct paace));
-}
-
-/* Release the subwindows reserved for a particular LIODN */
-void pamu_free_subwins(int liodn)
-{
- struct paace *ppaace;
- u32 subwin_cnt, size;
-
- ppaace = pamu_get_ppaace(liodn);
- if (!ppaace) {
- pr_debug("Invalid liodn entry\n");
- return;
- }
-
- if (get_bf(ppaace->addr_bitfields, PPAACE_AF_MW)) {
- subwin_cnt = 1UL << (get_bf(ppaace->impl_attr, PAACE_IA_WCE) + 1);
- size = (subwin_cnt - 1) * sizeof(struct paace);
- gen_pool_free(spaace_pool, (unsigned long)&spaact[ppaace->fspi], size);
- set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
- }
-}
-
-/*
* Function used for updating stash destination for the coressponding
* LIODN.
*/
-int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
+int pamu_update_paace_stash(int liodn, u32 value)
{
struct paace *paace;
@@ -265,11 +167,6 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
pr_debug("Invalid liodn entry\n");
return -ENOENT;
}
- if (subwin) {
- paace = pamu_get_spaace(paace, subwin - 1);
- if (!paace)
- return -ENOENT;
- }
set_bf(paace->impl_attr, PAACE_IA_CID, value);
mb();
@@ -277,65 +174,20 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
return 0;
}
-/* Disable a subwindow corresponding to the LIODN */
-int pamu_disable_spaace(int liodn, u32 subwin)
-{
- struct paace *paace;
-
- paace = pamu_get_ppaace(liodn);
- if (!paace) {
- pr_debug("Invalid liodn entry\n");
- return -ENOENT;
- }
- if (subwin) {
- paace = pamu_get_spaace(paace, subwin - 1);
- if (!paace)
- return -ENOENT;
- set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
- } else {
- set_bf(paace->addr_bitfields, PAACE_AF_AP,
- PAACE_AP_PERMS_DENIED);
- }
-
- mb();
-
- return 0;
-}
-
/**
* pamu_config_paace() - Sets up PPAACE entry for specified liodn
*
* @liodn: Logical IO device number
- * @win_addr: starting address of DSA window
- * @win-size: size of DSA window
* @omi: Operation mapping index -- if ~omi == 0 then omi not defined
- * @rpn: real (true physical) page number
* @stashid: cache stash id for associated cpu -- if ~stashid == 0 then
* stashid not defined
- * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
- * snoopid not defined
- * @subwin_cnt: number of sub-windows
* @prot: window permissions
*
* Returns 0 upon success else error code < 0 returned
*/
-int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
- u32 omi, unsigned long rpn, u32 snoopid, u32 stashid,
- u32 subwin_cnt, int prot)
+int pamu_config_ppaace(int liodn, u32 omi, u32 stashid, int prot)
{
struct paace *ppaace;
- unsigned long fspi;
-
- if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
- pr_debug("window size too small or not a power of two %pa\n",
- &win_size);
- return -EINVAL;
- }
-
- if (win_addr & (win_size - 1)) {
- pr_debug("window address is not aligned with window size\n");
- return -EINVAL;
- }
ppaace = pamu_get_ppaace(liodn);
if (!ppaace)
@@ -343,13 +195,12 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
/* window size is 2^(WSE+1) bytes */
set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
- map_addrspace_size_to_wse(win_size));
+ map_addrspace_size_to_wse(1ULL << 36));
pamu_init_ppaace(ppaace);
- ppaace->wbah = win_addr >> (PAMU_PAGE_SHIFT + 20);
- set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL,
- (win_addr >> PAMU_PAGE_SHIFT));
+ ppaace->wbah = 0;
+ set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
/* set up operation mapping if it's configured */
if (omi < OME_NUMBER_ENTRIES) {
@@ -364,120 +215,12 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
if (~stashid != 0)
set_bf(ppaace->impl_attr, PAACE_IA_CID, stashid);
- /* configure snoop id */
- if (~snoopid != 0)
- ppaace->domain_attr.to_host.snpid = snoopid;
-
- if (subwin_cnt) {
- /* The first entry is in the primary PAACE instead */
- fspi = pamu_get_fspi_and_allocate(subwin_cnt - 1);
- if (fspi == ULONG_MAX) {
- pr_debug("spaace indexes exhausted\n");
- return -EINVAL;
- }
-
- /* window count is 2^(WCE+1) bytes */
- set_bf(ppaace->impl_attr, PAACE_IA_WCE,
- map_subwindow_cnt_to_wce(subwin_cnt));
- set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0x1);
- ppaace->fspi = fspi;
- } else {
- set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
- ppaace->twbah = rpn >> 20;
- set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, rpn);
- set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot);
- set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0);
- set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
- }
- mb();
-
- return 0;
-}
-
-/**
- * pamu_config_spaace() - Sets up SPAACE entry for specified subwindow
- *
- * @liodn: Logical IO device number
- * @subwin_cnt: number of sub-windows associated with dma-window
- * @subwin: subwindow index
- * @subwin_size: size of subwindow
- * @omi: Operation mapping index
- * @rpn: real (true physical) page number
- * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
- * snoopid not defined
- * @stashid: cache stash id for associated cpu
- * @enable: enable/disable subwindow after reconfiguration
- * @prot: sub window permissions
- *
- * Returns 0 upon success else error code < 0 returned
- */
-int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
- phys_addr_t subwin_size, u32 omi, unsigned long rpn,
- u32 snoopid, u32 stashid, int enable, int prot)
-{
- struct paace *paace;
-
- /* setup sub-windows */
- if (!subwin_cnt) {
- pr_debug("Invalid subwindow count\n");
- return -EINVAL;
- }
-
- paace = pamu_get_ppaace(liodn);
- if (subwin > 0 && subwin < subwin_cnt && paace) {
- paace = pamu_get_spaace(paace, subwin - 1);
-
- if (paace && !(paace->addr_bitfields & PAACE_V_VALID)) {
- pamu_init_spaace(paace);
- set_bf(paace->addr_bitfields, SPAACE_AF_LIODN, liodn);
- }
- }
-
- if (!paace) {
- pr_debug("Invalid liodn entry\n");
- return -ENOENT;
- }
-
- if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) {
- pr_debug("subwindow size out of range, or not a power of 2\n");
- return -EINVAL;
- }
-
- if (rpn == ULONG_MAX) {
- pr_debug("real page number out of range\n");
- return -EINVAL;
- }
-
- /* window size is 2^(WSE+1) bytes */
- set_bf(paace->win_bitfields, PAACE_WIN_SWSE,
- map_addrspace_size_to_wse(subwin_size));
-
- set_bf(paace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
- paace->twbah = rpn >> 20;
- set_bf(paace->win_bitfields, PAACE_WIN_TWBAL, rpn);
- set_bf(paace->addr_bitfields, PAACE_AF_AP, prot);
-
- /* configure snoop id */
- if (~snoopid != 0)
- paace->domain_attr.to_host.snpid = snoopid;
-
- /* set up operation mapping if it's configured */
- if (omi < OME_NUMBER_ENTRIES) {
- set_bf(paace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
- paace->op_encode.index_ot.omi = omi;
- } else if (~omi != 0) {
- pr_debug("bad operation mapping index: %d\n", omi);
- return -EINVAL;
- }
-
- if (~stashid != 0)
- set_bf(paace->impl_attr, PAACE_IA_CID, stashid);
-
- smp_wmb();
-
- if (enable)
- set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
-
+ set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
+ ppaace->twbah = 0;
+ set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, 0);
+ set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot);
+ set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0);
+ set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
mb();
return 0;
@@ -1129,17 +872,6 @@ static int fsl_pamu_probe(struct platform_device *pdev)
spaact_phys = virt_to_phys(spaact);
omt_phys = virt_to_phys(omt);
- spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1);
- if (!spaace_pool) {
- ret = -ENOMEM;
- dev_err(dev, "Failed to allocate spaace gen pool\n");
- goto error;
- }
-
- ret = gen_pool_add(spaace_pool, (unsigned long)spaact, SPAACT_SIZE, -1);
- if (ret)
- goto error_genpool;
-
pamubypenr = in_be32(&guts_regs->pamubypenr);
for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
@@ -1167,9 +899,6 @@ static int fsl_pamu_probe(struct platform_device *pdev)
return 0;
-error_genpool:
- gen_pool_destroy(spaace_pool);
-
error:
if (irq != NO_IRQ)
free_irq(irq, data);
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h
index e1496ba96160..36df7975ff64 100644
--- a/drivers/iommu/fsl_pamu.h
+++ b/drivers/iommu/fsl_pamu.h
@@ -383,18 +383,10 @@ struct ome {
int pamu_domain_init(void);
int pamu_enable_liodn(int liodn);
int pamu_disable_liodn(int liodn);
-void pamu_free_subwins(int liodn);
-int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
- u32 omi, unsigned long rpn, u32 snoopid, uint32_t stashid,
- u32 subwin_cnt, int prot);
-int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin_addr,
- phys_addr_t subwin_size, u32 omi, unsigned long rpn,
- uint32_t snoopid, u32 stashid, int enable, int prot);
+int pamu_config_ppaace(int liodn, u32 omi, uint32_t stashid, int prot);
u32 get_stash_id(u32 stash_dest_hint, u32 vcpu);
void get_ome_index(u32 *omi_index, struct device *dev);
-int pamu_update_paace_stash(int liodn, u32 subwin, u32 value);
-int pamu_disable_spaace(int liodn, u32 subwin);
-u32 pamu_get_max_subwin_cnt(void);
+int pamu_update_paace_stash(int liodn, u32 value);
#endif /* __FSL_PAMU_H */
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index b2110767caf4..a47f47307109 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -54,159 +54,18 @@ static int __init iommu_init_mempool(void)
return 0;
}
-static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
-{
- u32 win_cnt = dma_domain->win_cnt;
- struct dma_window *win_ptr = &dma_domain->win_arr[0];
- struct iommu_domain_geometry *geom;
-
- geom = &dma_domain->iommu_domain.geometry;
-
- if (!win_cnt || !dma_domain->geom_size) {
- pr_debug("Number of windows/geometry not configured for the domain\n");
- return 0;
- }
-
- if (win_cnt > 1) {
- u64 subwin_size;
- dma_addr_t subwin_iova;
- u32 wnd;
-
- subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
- subwin_iova = iova & ~(subwin_size - 1);
- wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
- win_ptr = &dma_domain->win_arr[wnd];
- }
-
- if (win_ptr->valid)
- return win_ptr->paddr + (iova & (win_ptr->size - 1));
-
- return 0;
-}
-
-static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
-{
- struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
- int i, ret;
- unsigned long rpn, flags;
-
- for (i = 0; i < dma_domain->win_cnt; i++) {
- if (sub_win_ptr[i].valid) {
- rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
- spin_lock_irqsave(&iommu_lock, flags);
- ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
- sub_win_ptr[i].size,
- ~(u32)0,
- rpn,
- dma_domain->snoop_id,
- dma_domain->stash_id,
- (i > 0) ? 1 : 0,
- sub_win_ptr[i].prot);
- spin_unlock_irqrestore(&iommu_lock, flags);
- if (ret) {
- pr_debug("SPAACE configuration failed for liodn %d\n",
- liodn);
- return ret;
- }
- }
- }
-
- return ret;
-}
-
-static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
-{
- int ret;
- struct dma_window *wnd = &dma_domain->win_arr[0];
- phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu_lock, flags);
- ret = pamu_config_ppaace(liodn, wnd_addr,
- wnd->size,
- ~(u32)0,
- wnd->paddr >> PAMU_PAGE_SHIFT,
- dma_domain->snoop_id, dma_domain->stash_id,
- 0, wnd->prot);
- spin_unlock_irqrestore(&iommu_lock, flags);
- if (ret)
- pr_debug("PAACE configuration failed for liodn %d\n", liodn);
-
- return ret;
-}
-
-/* Map the DMA window corresponding to the LIODN */
-static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
-{
- if (dma_domain->win_cnt > 1)
- return map_subwins(liodn, dma_domain);
- else
- return map_win(liodn, dma_domain);
-}
-
-/* Update window/subwindow mapping for the LIODN */
-static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
-{
- int ret;
- struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
- unsigned long flags;
-
- spin_lock_irqsave(&iommu_lock, flags);
- if (dma_domain->win_cnt > 1) {
- ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
- wnd->size,
- ~(u32)0,
- wnd->paddr >> PAMU_PAGE_SHIFT,
- dma_domain->snoop_id,
- dma_domain->stash_id,
- (wnd_nr > 0) ? 1 : 0,
- wnd->prot);
- if (ret)
- pr_debug("Subwindow reconfiguration failed for liodn %d\n",
- liodn);
- } else {
- phys_addr_t wnd_addr;
-
- wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
-
- ret = pamu_config_ppaace(liodn, wnd_addr,
- wnd->size,
- ~(u32)0,
- wnd->paddr >> PAMU_PAGE_SHIFT,
- dma_domain->snoop_id, dma_domain->stash_id,
- 0, wnd->prot);
- if (ret)
- pr_debug("Window reconfiguration failed for liodn %d\n",
- liodn);
- }
-
- spin_unlock_irqrestore(&iommu_lock, flags);
-
- return ret;
-}
-
static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
u32 val)
{
- int ret = 0, i;
+ int ret = 0;
unsigned long flags;
spin_lock_irqsave(&iommu_lock, flags);
- if (!dma_domain->win_arr) {
- pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
- liodn);
+ ret = pamu_update_paace_stash(liodn, val);
+ if (ret) {
+ pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
- return -EINVAL;
- }
-
- for (i = 0; i < dma_domain->win_cnt; i++) {
- ret = pamu_update_paace_stash(liodn, i, val);
- if (ret) {
- pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
- i, liodn);
- spin_unlock_irqrestore(&iommu_lock, flags);
- return ret;
- }
+ return ret;
}
spin_unlock_irqrestore(&iommu_lock, flags);
@@ -215,16 +74,12 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
}
/* Set the geometry parameters for a LIODN */
-static int pamu_set_liodn(int liodn, struct device *dev,
- struct fsl_dma_domain *dma_domain,
- struct iommu_domain_geometry *geom_attr,
- u32 win_cnt)
+static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
+ int liodn)
{
- phys_addr_t window_addr, window_size;
- phys_addr_t subwin_size;
- int ret = 0, i;
u32 omi_index = ~(u32)0;
unsigned long flags;
+ int ret;
/*
* Configure the omi_index at the geometry setup time.
@@ -233,93 +88,30 @@ static int pamu_set_liodn(int liodn, struct device *dev,
*/
get_ome_index(&omi_index, dev);
- window_addr = geom_attr->aperture_start;
- window_size = dma_domain->geom_size;
-
spin_lock_irqsave(&iommu_lock, flags);
ret = pamu_disable_liodn(liodn);
- if (!ret)
- ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
- 0, dma_domain->snoop_id,
- dma_domain->stash_id, win_cnt, 0);
+ if (ret)
+ goto out_unlock;
+ ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0);
+ if (ret)
+ goto out_unlock;
+ ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id,
+ PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
+out_unlock:
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
- pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
- liodn, win_cnt);
- return ret;
- }
-
- if (win_cnt > 1) {
- subwin_size = window_size >> ilog2(win_cnt);
- for (i = 0; i < win_cnt; i++) {
- spin_lock_irqsave(&iommu_lock, flags);
- ret = pamu_disable_spaace(liodn, i);
- if (!ret)
- ret = pamu_config_spaace(liodn, win_cnt, i,
- subwin_size, omi_index,
- 0, dma_domain->snoop_id,
- dma_domain->stash_id,
- 0, 0);
- spin_unlock_irqrestore(&iommu_lock, flags);
- if (ret) {
- pr_debug("SPAACE configuration failed for liodn %d\n",
- liodn);
- return ret;
- }
- }
+ pr_debug("PAACE configuration failed for liodn %d\n",
+ liodn);
}
-
return ret;
}
-static int check_size(u64 size, dma_addr_t iova)
-{
- /*
- * Size must be a power of two and at least be equal
- * to PAMU page size.
- */
- if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
- pr_debug("Size too small or not a power of two\n");
- return -EINVAL;
- }
-
- /* iova must be page size aligned */
- if (iova & (size - 1)) {
- pr_debug("Address is not aligned with window size\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
-{
- struct fsl_dma_domain *domain;
-
- domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
- if (!domain)
- return NULL;
-
- domain->stash_id = ~(u32)0;
- domain->snoop_id = ~(u32)0;
- domain->win_cnt = pamu_get_max_subwin_cnt();
- domain->geom_size = 0;
-
- INIT_LIST_HEAD(&domain->devices);
-
- spin_lock_init(&domain->domain_lock);
-
- return domain;
-}
-
-static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
+static void remove_device_ref(struct device_domain_info *info)
{
unsigned long flags;
list_del(&info->link);
spin_lock_irqsave(&iommu_lock, flags);
- if (win_cnt > 1)
- pamu_free_subwins(info->liodn);
pamu_disable_liodn(info->liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
spin_lock_irqsave(&device_domain_lock, flags);
@@ -337,7 +129,7 @@ static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
/* Remove the device from the domain device list */
list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
if (!dev || (info->dev == dev))
- remove_device_ref(info, dma_domain->win_cnt);
+ remove_device_ref(info);
}
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
}
@@ -379,13 +171,10 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
-
if (iova < domain->geometry.aperture_start ||
iova > domain->geometry.aperture_end)
return 0;
-
- return get_phys_addr(dma_domain, iova);
+ return iova;
}
static bool fsl_pamu_capable(enum iommu_cap cap)
@@ -399,10 +188,6 @@ static void fsl_pamu_domain_free(struct iommu_domain *domain)
/* remove all the devices from the device list */
detach_device(NULL, dma_domain);
-
- dma_domain->enabled = 0;
- dma_domain->mapped = 0;
-
kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
}
@@ -413,12 +198,15 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
- dma_domain = iommu_alloc_dma_domain();
- if (!dma_domain) {
- pr_debug("dma_domain allocation failed\n");
+ dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
+ if (!dma_domain)
return NULL;
- }
- /* defaul geometry 64 GB i.e. maximum system address */
+
+ dma_domain->stash_id = ~(u32)0;
+ INIT_LIST_HEAD(&dma_domain->devices);
+ spin_lock_init(&dma_domain->domain_lock);
+
+ /* default geometry 64 GB i.e. maximum system address */
dma_domain->iommu_domain. geometry.aperture_start = 0;
dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
dma_domain->iommu_domain.geometry.force_aperture = true;
@@ -426,24 +214,6 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
return &dma_domain->iommu_domain;
}
-/* Configure geometry settings for all LIODNs associated with domain */
-static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
- struct iommu_domain_geometry *geom_attr,
- u32 win_cnt)
-{
- struct device_domain_info *info;
- int ret = 0;
-
- list_for_each_entry(info, &dma_domain->devices, link) {
- ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
- geom_attr, win_cnt);
- if (ret)
- break;
- }
-
- return ret;
-}
-
/* Update stash destination for all LIODNs associated with the domain */
static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
{
@@ -459,198 +229,13 @@ static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
return ret;
}
-/* Update domain mappings for all LIODNs associated with the domain */
-static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
-{
- struct device_domain_info *info;
- int ret = 0;
-
- list_for_each_entry(info, &dma_domain->devices, link) {
- ret = update_liodn(info->liodn, dma_domain, wnd_nr);
- if (ret)
- break;
- }
- return ret;
-}
-
-static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
-{
- struct device_domain_info *info;
- int ret = 0;
-
- list_for_each_entry(info, &dma_domain->devices, link) {
- if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
- ret = pamu_disable_liodn(info->liodn);
- if (!ret)
- dma_domain->enabled = 0;
- } else {
- ret = pamu_disable_spaace(info->liodn, wnd_nr);
- }
- }
-
- return ret;
-}
-
-static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
- if (!dma_domain->win_arr) {
- pr_debug("Number of windows not configured\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return;
- }
-
- if (wnd_nr >= dma_domain->win_cnt) {
- pr_debug("Invalid window index\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return;
- }
-
- if (dma_domain->win_arr[wnd_nr].valid) {
- ret = disable_domain_win(dma_domain, wnd_nr);
- if (!ret) {
- dma_domain->win_arr[wnd_nr].valid = 0;
- dma_domain->mapped--;
- }
- }
-
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-}
-
-static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
- phys_addr_t paddr, u64 size, int prot)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- struct dma_window *wnd;
- int pamu_prot = 0;
- int ret;
- unsigned long flags;
- u64 win_size;
-
- if (prot & IOMMU_READ)
- pamu_prot |= PAACE_AP_PERMS_QUERY;
- if (prot & IOMMU_WRITE)
- pamu_prot |= PAACE_AP_PERMS_UPDATE;
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
- if (!dma_domain->win_arr) {
- pr_debug("Number of windows not configured\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -ENODEV;
- }
-
- if (wnd_nr >= dma_domain->win_cnt) {
- pr_debug("Invalid window index\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
-
- win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
- if (size > win_size) {
- pr_debug("Invalid window size\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
-
- if (dma_domain->win_cnt == 1) {
- if (dma_domain->enabled) {
- pr_debug("Disable the window before updating the mapping\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EBUSY;
- }
-
- ret = check_size(size, domain->geometry.aperture_start);
- if (ret) {
- pr_debug("Aperture start not aligned to the size\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
- }
-
- wnd = &dma_domain->win_arr[wnd_nr];
- if (!wnd->valid) {
- wnd->paddr = paddr;
- wnd->size = size;
- wnd->prot = pamu_prot;
-
- ret = update_domain_mapping(dma_domain, wnd_nr);
- if (!ret) {
- wnd->valid = 1;
- dma_domain->mapped++;
- }
- } else {
- pr_debug("Disable the window before updating the mapping\n");
- ret = -EBUSY;
- }
-
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
- return ret;
-}
-
-/*
- * Attach the LIODN to the DMA domain and configure the geometry
- * and window mappings.
- */
-static int handle_attach_device(struct fsl_dma_domain *dma_domain,
- struct device *dev, const u32 *liodn,
- int num)
-{
- unsigned long flags;
- struct iommu_domain *domain = &dma_domain->iommu_domain;
- int ret = 0;
- int i;
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
- for (i = 0; i < num; i++) {
- /* Ensure that LIODN value is valid */
- if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
- pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
- liodn[i], dev->of_node);
- ret = -EINVAL;
- break;
- }
-
- attach_device(dma_domain, liodn[i], dev);
- /*
- * Check if geometry has already been configured
- * for the domain. If yes, set the geometry for
- * the LIODN.
- */
- if (dma_domain->win_arr) {
- u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
-
- ret = pamu_set_liodn(liodn[i], dev, dma_domain,
- &domain->geometry, win_cnt);
- if (ret)
- break;
- if (dma_domain->mapped) {
- /*
- * Create window/subwindow mapping for
- * the LIODN.
- */
- ret = map_liodn(liodn[i], dma_domain);
- if (ret)
- break;
- }
- }
- }
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
- return ret;
-}
-
static int fsl_pamu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
+ unsigned long flags;
+ int len, ret = 0, i;
const u32 *liodn;
- u32 liodn_cnt;
- int len, ret = 0;
struct pci_dev *pdev = NULL;
struct pci_controller *pci_ctl;
@@ -670,14 +255,30 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
}
liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
- if (liodn) {
- liodn_cnt = len / sizeof(u32);
- ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
- } else {
+ if (!liodn) {
pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
- ret = -EINVAL;
+ return -EINVAL;
}
+ spin_lock_irqsave(&dma_domain->domain_lock, flags);
+ for (i = 0; i < len / sizeof(u32); i++) {
+ /* Ensure that LIODN value is valid */
+ if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
+ pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
+ liodn[i], dev->of_node);
+ ret = -EINVAL;
+ break;
+ }
+
+ attach_device(dma_domain, liodn[i], dev);
+ ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
+ if (ret)
+ break;
+ ret = pamu_enable_liodn(liodn[i]);
+ if (ret)
+ break;
+ }
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return ret;
}
@@ -712,202 +313,26 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
}
-static int configure_domain_geometry(struct iommu_domain *domain, void *data)
-{
- struct iommu_domain_geometry *geom_attr = data;
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- dma_addr_t geom_size;
- unsigned long flags;
-
- geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
- /*
- * Sanity check the geometry size. Also, we do not support
- * DMA outside of the geometry.
- */
- if (check_size(geom_size, geom_attr->aperture_start) ||
- !geom_attr->force_aperture) {
- pr_debug("Invalid PAMU geometry attributes\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
- if (dma_domain->enabled) {
- pr_debug("Can't set geometry attributes as domain is active\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EBUSY;
- }
-
- /* Copy the domain geometry information */
- memcpy(&domain->geometry, geom_attr,
- sizeof(struct iommu_domain_geometry));
- dma_domain->geom_size = geom_size;
-
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
- return 0;
-}
-
/* Set the domain stash attribute */
-static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
+int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
{
- struct pamu_stash_attribute *stash_attr = data;
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
unsigned long flags;
int ret;
spin_lock_irqsave(&dma_domain->domain_lock, flags);
-
- memcpy(&dma_domain->dma_stash, stash_attr,
- sizeof(struct pamu_stash_attribute));
-
- dma_domain->stash_id = get_stash_id(stash_attr->cache,
- stash_attr->cpu);
+ dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
if (dma_domain->stash_id == ~(u32)0) {
pr_debug("Invalid stash attributes\n");
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return -EINVAL;
}
-
ret = update_domain_stash(dma_domain, dma_domain->stash_id);
-
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return ret;
}
-/* Configure domain dma state i.e. enable/disable DMA */
-static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
-{
- struct device_domain_info *info;
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
-
- if (enable && !dma_domain->mapped) {
- pr_debug("Can't enable DMA domain without valid mapping\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -ENODEV;
- }
-
- dma_domain->enabled = enable;
- list_for_each_entry(info, &dma_domain->devices, link) {
- ret = (enable) ? pamu_enable_liodn(info->liodn) :
- pamu_disable_liodn(info->liodn);
- if (ret)
- pr_debug("Unable to set dma state for liodn %d",
- info->liodn);
- }
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
- return 0;
-}
-
-static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
- /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
- if (dma_domain->enabled) {
- pr_debug("Can't set geometry attributes as domain is active\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EBUSY;
- }
-
- /* Ensure that the geometry has been set for the domain */
- if (!dma_domain->geom_size) {
- pr_debug("Please configure geometry before setting the number of windows\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
-
- /*
- * Ensure we have valid window count i.e. it should be less than
- * maximum permissible limit and should be a power of two.
- */
- if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
- pr_debug("Invalid window count\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
-
- ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
- w_count > 1 ? w_count : 0);
- if (!ret) {
- kfree(dma_domain->win_arr);
- dma_domain->win_arr = kcalloc(w_count,
- sizeof(*dma_domain->win_arr),
- GFP_ATOMIC);
- if (!dma_domain->win_arr) {
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -ENOMEM;
- }
- dma_domain->win_cnt = w_count;
- }
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
- return ret;
-}
-
-static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
- enum iommu_attr attr_type, void *data)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- int ret = 0;
-
- switch (attr_type) {
- case DOMAIN_ATTR_GEOMETRY:
- ret = configure_domain_geometry(domain, data);
- break;
- case DOMAIN_ATTR_FSL_PAMU_STASH:
- ret = configure_domain_stash(dma_domain, data);
- break;
- case DOMAIN_ATTR_FSL_PAMU_ENABLE:
- ret = configure_domain_dma_state(dma_domain, *(int *)data);
- break;
- case DOMAIN_ATTR_WINDOWS:
- ret = fsl_pamu_set_windows(domain, *(u32 *)data);
- break;
- default:
- pr_debug("Unsupported attribute type\n");
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
- enum iommu_attr attr_type, void *data)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- int ret = 0;
-
- switch (attr_type) {
- case DOMAIN_ATTR_FSL_PAMU_STASH:
- memcpy(data, &dma_domain->dma_stash,
- sizeof(struct pamu_stash_attribute));
- break;
- case DOMAIN_ATTR_FSL_PAMU_ENABLE:
- *(int *)data = dma_domain->enabled;
- break;
- case DOMAIN_ATTR_FSL_PAMUV1:
- *(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
- break;
- case DOMAIN_ATTR_WINDOWS:
- *(u32 *)data = dma_domain->win_cnt;
- break;
- default:
- pr_debug("Unsupported attribute type\n");
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static struct iommu_group *get_device_iommu_group(struct device *dev)
{
struct iommu_group *group;
@@ -1031,11 +456,7 @@ static const struct iommu_ops fsl_pamu_ops = {
.domain_free = fsl_pamu_domain_free,
.attach_dev = fsl_pamu_attach_device,
.detach_dev = fsl_pamu_detach_device,
- .domain_window_enable = fsl_pamu_window_enable,
- .domain_window_disable = fsl_pamu_window_disable,
.iova_to_phys = fsl_pamu_iova_to_phys,
- .domain_set_attr = fsl_pamu_set_domain_attr,
- .domain_get_attr = fsl_pamu_get_domain_attr,
.probe_device = fsl_pamu_probe_device,
.release_device = fsl_pamu_release_device,
.device_group = fsl_pamu_device_group,
@@ -1053,9 +474,7 @@ int __init pamu_domain_init(void)
if (ret)
return ret;
- iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);
-
- ret = iommu_device_register(&pamu_iommu);
+ ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL);
if (ret) {
iommu_device_sysfs_remove(&pamu_iommu);
pr_err("Can't register iommu device\n");
diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h
index 2865d42782e8..95ac1b3cab3b 100644
--- a/drivers/iommu/fsl_pamu_domain.h
+++ b/drivers/iommu/fsl_pamu_domain.h
@@ -9,56 +9,10 @@
#include "fsl_pamu.h"
-struct dma_window {
- phys_addr_t paddr;
- u64 size;
- int valid;
- int prot;
-};
-
struct fsl_dma_domain {
- /*
- * Indicates the geometry size for the domain.
- * This would be set when the geometry is
- * configured for the domain.
- */
- dma_addr_t geom_size;
- /*
- * Number of windows assocaited with this domain.
- * During domain initialization, it is set to the
- * the maximum number of subwindows allowed for a LIODN.
- * Minimum value for this is 1 indicating a single PAMU
- * window, without any sub windows. Value can be set/
- * queried by set_attr/get_attr API for DOMAIN_ATTR_WINDOWS.
- * Value can only be set once the geometry has been configured.
- */
- u32 win_cnt;
- /*
- * win_arr contains information of the configured
- * windows for a domain. This is allocated only
- * when the number of windows for the domain are
- * set.
- */
- struct dma_window *win_arr;
/* list of devices associated with the domain */
struct list_head devices;
- /* dma_domain states:
- * mapped - A particular mapping has been created
- * within the configured geometry.
- * enabled - DMA has been enabled for the given
- * domain. This translates to setting of the
- * valid bit for the primary PAACE in the PAMU
- * PAACT table. Domain geometry should be set and
- * it must have a valid mapping before DMA can be
- * enabled for it.
- *
- */
- int mapped;
- int enabled;
- /* stash_id obtained from the stash attribute details */
u32 stash_id;
- struct pamu_stash_attribute dma_stash;
- u32 snoop_id;
struct iommu_domain iommu_domain;
spinlock_t domain_lock;
};
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index d5c51b5c20af..1757ac1e1623 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1140,9 +1140,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (err)
goto err_unmap;
- iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
-
- err = iommu_device_register(&iommu->iommu);
+ err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
if (err)
goto err_unmap;
}
@@ -1205,6 +1203,63 @@ static inline void reclaim_free_desc(struct q_inval *qi)
}
}
+static const char *qi_type_string(u8 type)
+{
+ switch (type) {
+ case QI_CC_TYPE:
+ return "Context-cache Invalidation";
+ case QI_IOTLB_TYPE:
+ return "IOTLB Invalidation";
+ case QI_DIOTLB_TYPE:
+ return "Device-TLB Invalidation";
+ case QI_IEC_TYPE:
+ return "Interrupt Entry Cache Invalidation";
+ case QI_IWD_TYPE:
+ return "Invalidation Wait";
+ case QI_EIOTLB_TYPE:
+ return "PASID-based IOTLB Invalidation";
+ case QI_PC_TYPE:
+ return "PASID-cache Invalidation";
+ case QI_DEIOTLB_TYPE:
+ return "PASID-based Device-TLB Invalidation";
+ case QI_PGRP_RESP_TYPE:
+ return "Page Group Response";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static void qi_dump_fault(struct intel_iommu *iommu, u32 fault)
+{
+ unsigned int head = dmar_readl(iommu->reg + DMAR_IQH_REG);
+ u64 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
+ struct qi_desc *desc = iommu->qi->desc + head;
+
+ if (fault & DMA_FSTS_IQE)
+ pr_err("VT-d detected Invalidation Queue Error: Reason %llx",
+ DMAR_IQER_REG_IQEI(iqe_err));
+ if (fault & DMA_FSTS_ITE)
+ pr_err("VT-d detected Invalidation Time-out Error: SID %llx",
+ DMAR_IQER_REG_ITESID(iqe_err));
+ if (fault & DMA_FSTS_ICE)
+ pr_err("VT-d detected Invalidation Completion Error: SID %llx",
+ DMAR_IQER_REG_ICESID(iqe_err));
+
+ pr_err("QI HEAD: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
+ qi_type_string(desc->qw0 & 0xf),
+ (unsigned long long)desc->qw0,
+ (unsigned long long)desc->qw1);
+
+ head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH;
+ head <<= qi_shift(iommu);
+ desc = iommu->qi->desc + head;
+
+ pr_err("QI PRIOR: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
+ qi_type_string(desc->qw0 & 0xf),
+ (unsigned long long)desc->qw0,
+ (unsigned long long)desc->qw1);
+}
+
static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
{
u32 fault;
@@ -1216,6 +1271,8 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
return -EAGAIN;
fault = readl(iommu->reg + DMAR_FSTS_REG);
+ if (fault & (DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE))
+ qi_dump_fault(iommu, fault);
/*
* If IQE happens, the head points to the descriptor associated
@@ -1232,12 +1289,10 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
* used by software as private data. We won't print
* out these two qw's for security consideration.
*/
- pr_err("VT-d detected invalid descriptor: qw0 = %llx, qw1 = %llx\n",
- (unsigned long long)desc->qw0,
- (unsigned long long)desc->qw1);
memcpy(desc, qi->desc + (wait_index << shift),
1 << shift);
writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
+ pr_info("Invalidation Queue Error (IQE) cleared\n");
return -EINVAL;
}
}
@@ -1254,6 +1309,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
+ pr_info("Invalidation Time-out Error (ITE) cleared\n");
do {
if (qi->desc_status[head] == QI_IN_USE)
@@ -1265,8 +1321,10 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
return -EAGAIN;
}
- if (fault & DMA_FSTS_ICE)
+ if (fault & DMA_FSTS_ICE) {
writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
+ pr_info("Invalidation Completion Error (ICE) cleared\n");
+ }
return 0;
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index ee0932307d64..708f430af1c4 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -360,7 +360,6 @@ int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
static int dmar_map_gfx = 1;
-static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
@@ -451,8 +450,8 @@ static int __init intel_iommu_setup(char *str)
dmar_map_gfx = 0;
pr_info("Disable GFX device mapping\n");
} else if (!strncmp(str, "forcedac", 8)) {
- pr_info("Forcing DAC for PCI devices\n");
- dmar_forcedac = 1;
+ pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
+ iommu_dma_forcedac = true;
} else if (!strncmp(str, "strict", 6)) {
pr_info("Disable batched IOTLB flush\n");
intel_iommu_strict = 1;
@@ -658,7 +657,14 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip)
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
if (iommu != skip) {
- if (!ecap_sc_support(iommu->ecap)) {
+ /*
+ * If the hardware is operating in the scalable mode,
+ * the snooping control is always supported since we
+ * always set PASID-table-entry.PGSNP bit if the domain
+ * is managed outside (UNMANAGED).
+ */
+ if (!sm_supported(iommu) &&
+ !ecap_sc_support(iommu->ecap)) {
ret = 0;
break;
}
@@ -1340,6 +1346,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
readl, (sts & DMA_GSTS_RTPS), sts);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+ if (sm_supported(iommu))
+ qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
void iommu_flush_write_buffer(struct intel_iommu *iommu)
@@ -2289,6 +2300,41 @@ static inline int hardware_largepage_caps(struct dmar_domain *domain,
return level;
}
+/*
+ * Ensure that old small page tables are removed to make room for superpage(s).
+ * We're going to add new large pages, so make sure we don't remove their parent
+ * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
+ */
+static void switch_to_super_page(struct dmar_domain *domain,
+ unsigned long start_pfn,
+ unsigned long end_pfn, int level)
+{
+ unsigned long lvl_pages = lvl_to_nr_pages(level);
+ struct dma_pte *pte = NULL;
+ int i;
+
+ while (start_pfn <= end_pfn) {
+ if (!pte)
+ pte = pfn_to_dma_pte(domain, start_pfn, &level);
+
+ if (dma_pte_present(pte)) {
+ dma_pte_free_pagetable(domain, start_pfn,
+ start_pfn + lvl_pages - 1,
+ level + 1);
+
+ for_each_domain_iommu(i, domain)
+ iommu_flush_iotlb_psi(g_iommus[i], domain,
+ start_pfn, lvl_pages,
+ 0, 0);
+ }
+
+ pte++;
+ start_pfn += lvl_pages;
+ if (first_pte_in_page(pte))
+ pte = NULL;
+ }
+}
+
static int
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages, int prot)
@@ -2305,8 +2351,9 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return -EINVAL;
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
+ attr |= DMA_FL_PTE_PRESENT;
if (domain_use_first_level(domain)) {
- attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
+ attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
if (domain->domain.type == IOMMU_DOMAIN_DMA) {
attr |= DMA_FL_PTE_ACCESS;
@@ -2329,22 +2376,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return -ENOMEM;
/* It is large page*/
if (largepage_lvl > 1) {
- unsigned long nr_superpages, end_pfn;
+ unsigned long end_pfn;
pteval |= DMA_PTE_LARGE_PAGE;
- lvl_pages = lvl_to_nr_pages(largepage_lvl);
-
- nr_superpages = nr_pages / lvl_pages;
- end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
-
- /*
- * Ensure that old small page tables are
- * removed to make room for superpage(s).
- * We're adding new large pages, so make sure
- * we don't remove their parent tables.
- */
- dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
- largepage_lvl + 1);
+ end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
+ switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
} else {
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
}
@@ -2422,6 +2458,10 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
+
+ if (sm_supported(iommu))
+ qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
+
iommu->flush.flush_iotlb(iommu,
did_old,
0,
@@ -2505,6 +2545,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
+ if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
+ flags |= PASID_FLAG_PAGE_SNOOP;
+
return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
domain->iommu_did[iommu->seq_id],
flags);
@@ -3267,8 +3310,6 @@ static int __init init_dmars(void)
register_pasid_allocator(iommu);
#endif
iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
@@ -3458,12 +3499,7 @@ static int init_iommu_hw(void)
}
iommu_flush_write_buffer(iommu);
-
iommu_set_root_entry(iommu);
-
- iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
iommu_enable_translation(iommu);
iommu_disable_protect_mem_regions(iommu);
}
@@ -3846,8 +3882,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
goto disable_iommu;
iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
iommu_enable_translation(iommu);
iommu_disable_protect_mem_regions(iommu);
@@ -4065,35 +4099,6 @@ static struct notifier_block intel_iommu_memory_nb = {
.priority = 0
};
-static void free_all_cpu_cached_iovas(unsigned int cpu)
-{
- int i;
-
- for (i = 0; i < g_num_of_iommus; i++) {
- struct intel_iommu *iommu = g_iommus[i];
- struct dmar_domain *domain;
- int did;
-
- if (!iommu)
- continue;
-
- for (did = 0; did < cap_ndoms(iommu->cap); did++) {
- domain = get_iommu_domain(iommu, (u16)did);
-
- if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA)
- continue;
-
- iommu_dma_free_cpu_cached_iovas(cpu, &domain->domain);
- }
- }
-}
-
-static int intel_iommu_cpu_dead(unsigned int cpu)
-{
- free_all_cpu_cached_iovas(cpu);
- return 0;
-}
-
static void intel_disable_iommus(void)
{
struct intel_iommu *iommu = NULL;
@@ -4377,19 +4382,28 @@ int __init intel_iommu_init(void)
down_read(&dmar_global_lock);
for_each_active_iommu(iommu, drhd) {
+ /*
+ * The flush queue implementation does not perform
+ * page-selective invalidations that are required for efficient
+ * TLB flushes in virtual environments. The benefit of batching
+ * is likely to be much lower than the overhead of synchronizing
+ * the virtual and physical IOMMU page-tables.
+ */
+ if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
+ pr_warn("IOMMU batching is disabled due to virtualization");
+ intel_iommu_strict = 1;
+ }
iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups,
"%s", iommu->name);
- iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
- iommu_device_register(&iommu->iommu);
+ iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
}
up_read(&dmar_global_lock);
+ iommu_set_dma_strict(intel_iommu_strict);
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
- cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
- intel_iommu_cpu_dead);
down_read(&dmar_global_lock);
if (probe_acpi_namespace_devices())
@@ -5343,6 +5357,8 @@ static int siov_find_pci_dvsec(struct pci_dev *pdev)
static bool
intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
{
+ struct device_domain_info *info = get_domain_info(dev);
+
if (feat == IOMMU_DEV_FEAT_AUX) {
int ret;
@@ -5357,13 +5373,13 @@ intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
return !!siov_find_pci_dvsec(to_pci_dev(dev));
}
- if (feat == IOMMU_DEV_FEAT_SVA) {
- struct device_domain_info *info = get_domain_info(dev);
+ if (feat == IOMMU_DEV_FEAT_IOPF)
+ return info && info->pri_supported;
+ if (feat == IOMMU_DEV_FEAT_SVA)
return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) &&
info->pasid_supported && info->pri_supported &&
info->ats_supported;
- }
return false;
}
@@ -5374,12 +5390,18 @@ intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
if (feat == IOMMU_DEV_FEAT_AUX)
return intel_iommu_enable_auxd(dev);
+ if (feat == IOMMU_DEV_FEAT_IOPF)
+ return intel_iommu_dev_has_feat(dev, feat) ? 0 : -ENODEV;
+
if (feat == IOMMU_DEV_FEAT_SVA) {
struct device_domain_info *info = get_domain_info(dev);
if (!info)
return -EINVAL;
+ if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
+ return -EINVAL;
+
if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)
return 0;
}
@@ -5423,87 +5445,23 @@ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
}
static int
-intel_iommu_domain_set_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+intel_iommu_enable_nesting(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long flags;
- int ret = 0;
-
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
- return -EINVAL;
+ int ret = -ENODEV;
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- spin_lock_irqsave(&device_domain_lock, flags);
- if (nested_mode_support() &&
- list_empty(&dmar_domain->devices)) {
- dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
- dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
- } else {
- ret = -ENODEV;
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
- break;
- default:
- ret = -EINVAL;
- break;
+ spin_lock_irqsave(&device_domain_lock, flags);
+ if (nested_mode_support() && list_empty(&dmar_domain->devices)) {
+ dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
+ dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
+ ret = 0;
}
+ spin_unlock_irqrestore(&device_domain_lock, flags);
return ret;
}
-static bool domain_use_flush_queue(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool r = true;
-
- if (intel_iommu_strict)
- return false;
-
- /*
- * The flush queue implementation does not perform page-selective
- * invalidations that are required for efficient TLB flushes in virtual
- * environments. The benefit of batching is likely to be much lower than
- * the overhead of synchronizing the virtual and physical IOMMU
- * page-tables.
- */
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!cap_caching_mode(iommu->cap))
- continue;
-
- pr_warn_once("IOMMU batching is disabled due to virtualization");
- r = false;
- break;
- }
- rcu_read_unlock();
-
- return r;
-}
-
-static int
-intel_iommu_domain_get_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
-{
- switch (domain->type) {
- case IOMMU_DOMAIN_UNMANAGED:
- return -ENODEV;
- case IOMMU_DOMAIN_DMA:
- switch (attr) {
- case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- *(int *)data = domain_use_flush_queue();
- return 0;
- default:
- return -ENODEV;
- }
- break;
- default:
- return -EINVAL;
- }
-}
-
/*
* Check that the device does not live on an external facing PCI port that is
* marked as untrusted. Such devices should not be able to apply quirks and
@@ -5576,8 +5534,7 @@ const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
.domain_free = intel_iommu_domain_free,
- .domain_get_attr = intel_iommu_domain_get_attr,
- .domain_set_attr = intel_iommu_domain_set_attr,
+ .enable_nesting = intel_iommu_enable_nesting,
.attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device,
.aux_attach_dev = intel_iommu_aux_attach_device,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 611ef5243cb6..f912fe45bea2 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -736,7 +736,7 @@ static int __init intel_prepare_irq_remapping(void)
return -ENODEV;
if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
- goto error;
+ return -ENODEV;
if (!dmar_ir_support())
return -ENODEV;
@@ -1280,7 +1280,8 @@ static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
break;
case X86_IRQ_ALLOC_TYPE_PCI_MSI:
case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
- set_msi_sid(irte, msi_desc_to_pci_dev(info->desc));
+ set_msi_sid(irte,
+ pci_real_dma_dev(msi_desc_to_pci_dev(info->desc)));
break;
default:
BUG_ON(1);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index f26cb6195b2c..72646bafc52f 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -24,7 +24,6 @@
/*
* Intel IOMMU system wide PASID name space:
*/
-static DEFINE_SPINLOCK(pasid_lock);
u32 intel_pasid_max_id = PASID_MAX;
int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
@@ -231,7 +230,7 @@ struct pasid_table *intel_pasid_get_table(struct device *dev)
return info->pasid_table;
}
-int intel_pasid_get_dev_max_id(struct device *dev)
+static int intel_pasid_get_dev_max_id(struct device *dev)
{
struct device_domain_info *info;
@@ -242,7 +241,7 @@ int intel_pasid_get_dev_max_id(struct device *dev)
return info->pasid_table->max_pasid;
}
-struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
+static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
{
struct device_domain_info *info;
struct pasid_table *pasid_table;
@@ -259,19 +258,25 @@ struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
dir_index = pasid >> PASID_PDE_SHIFT;
index = pasid & PASID_PTE_MASK;
- spin_lock(&pasid_lock);
+retry:
entries = get_pasid_table_from_pde(&dir[dir_index]);
if (!entries) {
entries = alloc_pgtable_page(info->iommu->node);
- if (!entries) {
- spin_unlock(&pasid_lock);
+ if (!entries)
return NULL;
- }
- WRITE_ONCE(dir[dir_index].val,
- (u64)virt_to_phys(entries) | PASID_PTE_PRESENT);
+ /*
+ * The pasid directory table entry won't be freed after
+ * allocation. No worry about the race with free and
+ * clear. However, this entry might be populated by others
+ * while we are preparing it. Use theirs with a retry.
+ */
+ if (cmpxchg64(&dir[dir_index].val, 0ULL,
+ (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
+ free_pgtable_page(entries);
+ goto retry;
+ }
}
- spin_unlock(&pasid_lock);
return &entries[index];
}
@@ -394,6 +399,15 @@ static inline void pasid_set_sre(struct pasid_entry *pe)
}
/*
+ * Setup the WPE(Write Protect Enable) field (Bit 132) of a
+ * scalable mode PASID entry.
+ */
+static inline void pasid_set_wpe(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
+}
+
+/*
* Setup the P(Present) field (Bit 0) of a scalable mode PASID
* entry.
*/
@@ -412,6 +426,16 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
}
/*
+ * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
+ * PASID entry.
+ */
+static inline void
+pasid_set_pgsnp(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
+}
+
+/*
* Setup the First Level Page table Pointer field (Bit 140~191)
* of a scalable mode PASID entry.
*/
@@ -493,6 +517,9 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
if (WARN_ON(!pte))
return;
+ if (!(pte->val[0] & PASID_PTE_PRESENT))
+ return;
+
did = pasid_get_domain_id(pte);
intel_pasid_clear_entry(dev, pasid, fault_ignore);
@@ -522,6 +549,22 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
}
}
+static inline int pasid_enable_wpe(struct pasid_entry *pte)
+{
+#ifdef CONFIG_X86
+ unsigned long cr0 = read_cr0();
+
+ /* CR0.WP is normally set but just to be sure */
+ if (unlikely(!(cr0 & X86_CR0_WP))) {
+ pr_err_ratelimited("No CPU write protect!\n");
+ return -EINVAL;
+ }
+#endif
+ pasid_set_wpe(pte);
+
+ return 0;
+};
+
/*
* Set up the scalable mode pasid table entry for first only
* translation type.
@@ -553,6 +596,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
return -EINVAL;
}
pasid_set_sre(pte);
+ if (pasid_enable_wpe(pte))
+ return -EINVAL;
+
}
if (flags & PASID_FLAG_FL5LP) {
@@ -565,6 +611,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
}
}
+ if (flags & PASID_FLAG_PAGE_SNOOP)
+ pasid_set_pgsnp(pte);
+
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, iommu->agaw);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
@@ -643,6 +692,9 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
pasid_set_fault_enable(pte);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
+ pasid_set_pgsnp(pte);
+
/*
* Since it is a second level only translation setup, we should
* set SRE bit as well (addresses are expected to be GPAs).
@@ -706,6 +758,9 @@ intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
return -EINVAL;
}
pasid_set_sre(pte);
+ /* Enable write protect WP if guest requested */
+ if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_WPE)
+ pasid_set_wpe(pte);
}
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) {
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 444c0bec221a..5ff61c3d401f 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -48,6 +48,7 @@
*/
#define PASID_FLAG_SUPERVISOR_MODE BIT(0)
#define PASID_FLAG_NESTED BIT(1)
+#define PASID_FLAG_PAGE_SNOOP BIT(2)
/*
* The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
@@ -99,14 +100,9 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
}
extern unsigned int intel_pasid_max_id;
-int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp);
-void intel_pasid_free_id(u32 pasid);
-void *intel_pasid_lookup_id(u32 pasid);
int intel_pasid_alloc_table(struct device *dev);
void intel_pasid_free_table(struct device *dev);
struct pasid_table *intel_pasid_get_table(struct device *dev);
-int intel_pasid_get_dev_max_id(struct device *dev);
-struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid);
int intel_pasid_setup_first_level(struct intel_iommu *iommu,
struct device *dev, pgd_t *pgd,
u32 pasid, u16 did, int flags);
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 574a7e657a9a..5165cea90421 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -462,13 +462,12 @@ static void load_pasid(struct mm_struct *mm, u32 pasid)
/* Caller must hold pasid_mutex, mm reference */
static int
intel_svm_bind_mm(struct device *dev, unsigned int flags,
- struct svm_dev_ops *ops,
struct mm_struct *mm, struct intel_svm_dev **sd)
{
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
+ struct intel_svm *svm = NULL, *t;
struct device_domain_info *info;
struct intel_svm_dev *sdev;
- struct intel_svm *svm = NULL;
unsigned long iflags;
int pasid_max;
int ret;
@@ -494,34 +493,26 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
}
}
- if (!(flags & SVM_FLAG_PRIVATE_PASID)) {
- struct intel_svm *t;
-
- list_for_each_entry(t, &global_svm_list, list) {
- if (t->mm != mm || (t->flags & SVM_FLAG_PRIVATE_PASID))
- continue;
-
- svm = t;
- if (svm->pasid >= pasid_max) {
- dev_warn(dev,
- "Limited PASID width. Cannot use existing PASID %d\n",
- svm->pasid);
- ret = -ENOSPC;
- goto out;
- }
+ list_for_each_entry(t, &global_svm_list, list) {
+ if (t->mm != mm)
+ continue;
- /* Find the matching device in svm list */
- for_each_svm_dev(sdev, svm, dev) {
- if (sdev->ops != ops) {
- ret = -EBUSY;
- goto out;
- }
- sdev->users++;
- goto success;
- }
+ svm = t;
+ if (svm->pasid >= pasid_max) {
+ dev_warn(dev,
+ "Limited PASID width. Cannot use existing PASID %d\n",
+ svm->pasid);
+ ret = -ENOSPC;
+ goto out;
+ }
- break;
+ /* Find the matching device in svm list */
+ for_each_svm_dev(sdev, svm, dev) {
+ sdev->users++;
+ goto success;
}
+
+ break;
}
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
@@ -550,7 +541,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
/* Finish the setup now we know we're keeping it */
sdev->users = 1;
- sdev->ops = ops;
init_rcu_head(&sdev->rcu);
if (!svm) {
@@ -862,7 +852,7 @@ intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
/* Fill in event data for device specific processing */
memset(&event, 0, sizeof(struct iommu_fault_event));
event.fault.type = IOMMU_FAULT_PAGE_REQ;
- event.fault.prm.addr = desc->addr;
+ event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
event.fault.prm.pasid = desc->pasid;
event.fault.prm.grpid = desc->prg_index;
event.fault.prm.perm = prq_to_iommu_prot(desc);
@@ -895,6 +885,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
struct intel_iommu *iommu = d;
struct intel_svm *svm = NULL;
int head, tail, handled = 0;
+ unsigned int flags = 0;
/* Clear PPR bit before reading head/tail registers, to
* ensure that we get a new interrupt if needed. */
@@ -920,7 +911,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
((unsigned long long *)req)[1]);
goto no_pasid;
}
-
+ /* We shall not receive page request for supervisor SVM */
+ if (req->pm_req && (req->rd_req | req->wr_req)) {
+ pr_err("Unexpected page request in Privilege Mode");
+ /* No need to find the matching sdev as for bad_req */
+ goto no_pasid;
+ }
+ /* DMA read with exec requeset is not supported. */
+ if (req->exe_req && req->rd_req) {
+ pr_err("Execution request not supported\n");
+ goto no_pasid;
+ }
if (!svm || svm->pasid != req->pasid) {
rcu_read_lock();
svm = ioasid_find(NULL, req->pasid, NULL);
@@ -982,9 +983,11 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (access_error(vma, req))
goto invalid;
- ret = handle_mm_fault(vma, address,
- req->wr_req ? FAULT_FLAG_WRITE : 0,
- NULL);
+ flags = FAULT_FLAG_USER | FAULT_FLAG_REMOTE;
+ if (req->wr_req)
+ flags |= FAULT_FLAG_WRITE;
+
+ ret = handle_mm_fault(vma, address, flags, NULL);
if (ret & VM_FAULT_ERROR)
goto invalid;
@@ -993,13 +996,6 @@ invalid:
mmap_read_unlock(svm->mm);
mmput(svm->mm);
bad_req:
- WARN_ON(!sdev);
- if (sdev && sdev->ops && sdev->ops->fault_cb) {
- int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
- (req->exe_req << 1) | (req->pm_req);
- sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr,
- req->priv_data, rwxp, result);
- }
/* We get here in the error case where the PASID lookup failed,
and these can be NULL. Do not use them below this point! */
sdev = NULL;
@@ -1021,12 +1017,12 @@ no_pasid:
QI_PGRP_RESP_TYPE;
resp.qw1 = QI_PGRP_IDX(req->prg_index) |
QI_PGRP_LPIG(req->lpig);
+ resp.qw2 = 0;
+ resp.qw3 = 0;
if (req->priv_data_present)
memcpy(&resp.qw2, req->priv_data,
sizeof(req->priv_data));
- resp.qw2 = 0;
- resp.qw3 = 0;
qi_submit_sync(iommu, &resp, 1, 0);
}
prq_advance:
@@ -1074,7 +1070,7 @@ intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
if (drvdata)
flags = *(unsigned int *)drvdata;
mutex_lock(&pasid_mutex);
- ret = intel_svm_bind_mm(dev, flags, NULL, mm, &sdev);
+ ret = intel_svm_bind_mm(dev, flags, mm, &sdev);
if (ret)
sva = ERR_PTR(ret);
else if (sdev)
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
new file mode 100644
index 000000000000..1df8c1dcae77
--- /dev/null
+++ b/drivers/iommu/io-pgfault.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Handle device page faults
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ */
+
+#include <linux/iommu.h>
+#include <linux/list.h>
+#include <linux/sched/mm.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "iommu-sva-lib.h"
+
+/**
+ * struct iopf_queue - IO Page Fault queue
+ * @wq: the fault workqueue
+ * @devices: devices attached to this queue
+ * @lock: protects the device list
+ */
+struct iopf_queue {
+ struct workqueue_struct *wq;
+ struct list_head devices;
+ struct mutex lock;
+};
+
+/**
+ * struct iopf_device_param - IO Page Fault data attached to a device
+ * @dev: the device that owns this param
+ * @queue: IOPF queue
+ * @queue_list: index into queue->devices
+ * @partial: faults that are part of a Page Request Group for which the last
+ * request hasn't been submitted yet.
+ */
+struct iopf_device_param {
+ struct device *dev;
+ struct iopf_queue *queue;
+ struct list_head queue_list;
+ struct list_head partial;
+};
+
+struct iopf_fault {
+ struct iommu_fault fault;
+ struct list_head list;
+};
+
+struct iopf_group {
+ struct iopf_fault last_fault;
+ struct list_head faults;
+ struct work_struct work;
+ struct device *dev;
+};
+
+static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
+ enum iommu_page_response_code status)
+{
+ struct iommu_page_response resp = {
+ .version = IOMMU_PAGE_RESP_VERSION_1,
+ .pasid = iopf->fault.prm.pasid,
+ .grpid = iopf->fault.prm.grpid,
+ .code = status,
+ };
+
+ if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
+ (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
+ resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
+
+ return iommu_page_response(dev, &resp);
+}
+
+static enum iommu_page_response_code
+iopf_handle_single(struct iopf_fault *iopf)
+{
+ vm_fault_t ret;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ unsigned int access_flags = 0;
+ unsigned int fault_flags = FAULT_FLAG_REMOTE;
+ struct iommu_fault_page_request *prm = &iopf->fault.prm;
+ enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
+
+ if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
+ return status;
+
+ mm = iommu_sva_find(prm->pasid);
+ if (IS_ERR_OR_NULL(mm))
+ return status;
+
+ mmap_read_lock(mm);
+
+ vma = find_extend_vma(mm, prm->addr);
+ if (!vma)
+ /* Unmapped area */
+ goto out_put_mm;
+
+ if (prm->perm & IOMMU_FAULT_PERM_READ)
+ access_flags |= VM_READ;
+
+ if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
+ access_flags |= VM_WRITE;
+ fault_flags |= FAULT_FLAG_WRITE;
+ }
+
+ if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
+ access_flags |= VM_EXEC;
+ fault_flags |= FAULT_FLAG_INSTRUCTION;
+ }
+
+ if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
+ fault_flags |= FAULT_FLAG_USER;
+
+ if (access_flags & ~vma->vm_flags)
+ /* Access fault */
+ goto out_put_mm;
+
+ ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
+ status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
+ IOMMU_PAGE_RESP_SUCCESS;
+
+out_put_mm:
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return status;
+}
+
+static void iopf_handle_group(struct work_struct *work)
+{
+ struct iopf_group *group;
+ struct iopf_fault *iopf, *next;
+ enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
+
+ group = container_of(work, struct iopf_group, work);
+
+ list_for_each_entry_safe(iopf, next, &group->faults, list) {
+ /*
+ * For the moment, errors are sticky: don't handle subsequent
+ * faults in the group if there is an error.
+ */
+ if (status == IOMMU_PAGE_RESP_SUCCESS)
+ status = iopf_handle_single(iopf);
+
+ if (!(iopf->fault.prm.flags &
+ IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
+ kfree(iopf);
+ }
+
+ iopf_complete_group(group->dev, &group->last_fault, status);
+ kfree(group);
+}
+
+/**
+ * iommu_queue_iopf - IO Page Fault handler
+ * @fault: fault event
+ * @cookie: struct device, passed to iommu_register_device_fault_handler.
+ *
+ * Add a fault to the device workqueue, to be handled by mm.
+ *
+ * This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
+ * them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
+ * expect a response. It may be generated when disabling a PASID (issuing a
+ * PASID stop request) by some PCI devices.
+ *
+ * The PASID stop request is issued by the device driver before unbind(). Once
+ * it completes, no page request is generated for this PASID anymore and
+ * outstanding ones have been pushed to the IOMMU (as per PCIe 4.0r1.0 - 6.20.1
+ * and 10.4.1.2 - Managing PASID TLP Prefix Usage). Some PCI devices will wait
+ * for all outstanding page requests to come back with a response before
+ * completing the PASID stop request. Others do not wait for page responses, and
+ * instead issue this Stop Marker that tells us when the PASID can be
+ * reallocated.
+ *
+ * It is safe to discard the Stop Marker because it is an optimization.
+ * a. Page requests, which are posted requests, have been flushed to the IOMMU
+ * when the stop request completes.
+ * b. The IOMMU driver flushes all fault queues on unbind() before freeing the
+ * PASID.
+ *
+ * So even though the Stop Marker might be issued by the device *after* the stop
+ * request completes, outstanding faults will have been dealt with by the time
+ * the PASID is freed.
+ *
+ * Return: 0 on success and <0 on error.
+ */
+int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
+{
+ int ret;
+ struct iopf_group *group;
+ struct iopf_fault *iopf, *next;
+ struct iopf_device_param *iopf_param;
+
+ struct device *dev = cookie;
+ struct dev_iommu *param = dev->iommu;
+
+ lockdep_assert_held(&param->lock);
+
+ if (fault->type != IOMMU_FAULT_PAGE_REQ)
+ /* Not a recoverable page fault */
+ return -EOPNOTSUPP;
+
+ /*
+ * As long as we're holding param->lock, the queue can't be unlinked
+ * from the device and therefore cannot disappear.
+ */
+ iopf_param = param->iopf_param;
+ if (!iopf_param)
+ return -ENODEV;
+
+ if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
+ iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
+ if (!iopf)
+ return -ENOMEM;
+
+ iopf->fault = *fault;
+
+ /* Non-last request of a group. Postpone until the last one */
+ list_add(&iopf->list, &iopf_param->partial);
+
+ return 0;
+ }
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group) {
+ /*
+ * The caller will send a response to the hardware. But we do
+ * need to clean up before leaving, otherwise partial faults
+ * will be stuck.
+ */
+ ret = -ENOMEM;
+ goto cleanup_partial;
+ }
+
+ group->dev = dev;
+ group->last_fault.fault = *fault;
+ INIT_LIST_HEAD(&group->faults);
+ list_add(&group->last_fault.list, &group->faults);
+ INIT_WORK(&group->work, iopf_handle_group);
+
+ /* See if we have partial faults for this group */
+ list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
+ if (iopf->fault.prm.grpid == fault->prm.grpid)
+ /* Insert *before* the last fault */
+ list_move(&iopf->list, &group->faults);
+ }
+
+ queue_work(iopf_param->queue->wq, &group->work);
+ return 0;
+
+cleanup_partial:
+ list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
+ if (iopf->fault.prm.grpid == fault->prm.grpid) {
+ list_del(&iopf->list);
+ kfree(iopf);
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_queue_iopf);
+
+/**
+ * iopf_queue_flush_dev - Ensure that all queued faults have been processed
+ * @dev: the endpoint whose faults need to be flushed.
+ *
+ * The IOMMU driver calls this before releasing a PASID, to ensure that all
+ * pending faults for this PASID have been handled, and won't hit the address
+ * space of the next process that uses this PASID. The driver must make sure
+ * that no new fault is added to the queue. In particular it must flush its
+ * low-level queue before calling this function.
+ *
+ * Return: 0 on success and <0 on error.
+ */
+int iopf_queue_flush_dev(struct device *dev)
+{
+ int ret = 0;
+ struct iopf_device_param *iopf_param;
+ struct dev_iommu *param = dev->iommu;
+
+ if (!param)
+ return -ENODEV;
+
+ mutex_lock(&param->lock);
+ iopf_param = param->iopf_param;
+ if (iopf_param)
+ flush_workqueue(iopf_param->queue->wq);
+ else
+ ret = -ENODEV;
+ mutex_unlock(&param->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
+
+/**
+ * iopf_queue_discard_partial - Remove all pending partial fault
+ * @queue: the queue whose partial faults need to be discarded
+ *
+ * When the hardware queue overflows, last page faults in a group may have been
+ * lost and the IOMMU driver calls this to discard all partial faults. The
+ * driver shouldn't be adding new faults to this queue concurrently.
+ *
+ * Return: 0 on success and <0 on error.
+ */
+int iopf_queue_discard_partial(struct iopf_queue *queue)
+{
+ struct iopf_fault *iopf, *next;
+ struct iopf_device_param *iopf_param;
+
+ if (!queue)
+ return -EINVAL;
+
+ mutex_lock(&queue->lock);
+ list_for_each_entry(iopf_param, &queue->devices, queue_list) {
+ list_for_each_entry_safe(iopf, next, &iopf_param->partial,
+ list) {
+ list_del(&iopf->list);
+ kfree(iopf);
+ }
+ }
+ mutex_unlock(&queue->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iopf_queue_discard_partial);
+
+/**
+ * iopf_queue_add_device - Add producer to the fault queue
+ * @queue: IOPF queue
+ * @dev: device to add
+ *
+ * Return: 0 on success and <0 on error.
+ */
+int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
+{
+ int ret = -EBUSY;
+ struct iopf_device_param *iopf_param;
+ struct dev_iommu *param = dev->iommu;
+
+ if (!param)
+ return -ENODEV;
+
+ iopf_param = kzalloc(sizeof(*iopf_param), GFP_KERNEL);
+ if (!iopf_param)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&iopf_param->partial);
+ iopf_param->queue = queue;
+ iopf_param->dev = dev;
+
+ mutex_lock(&queue->lock);
+ mutex_lock(&param->lock);
+ if (!param->iopf_param) {
+ list_add(&iopf_param->queue_list, &queue->devices);
+ param->iopf_param = iopf_param;
+ ret = 0;
+ }
+ mutex_unlock(&param->lock);
+ mutex_unlock(&queue->lock);
+
+ if (ret)
+ kfree(iopf_param);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iopf_queue_add_device);
+
+/**
+ * iopf_queue_remove_device - Remove producer from fault queue
+ * @queue: IOPF queue
+ * @dev: device to remove
+ *
+ * Caller makes sure that no more faults are reported for this device.
+ *
+ * Return: 0 on success and <0 on error.
+ */
+int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
+{
+ int ret = -EINVAL;
+ struct iopf_fault *iopf, *next;
+ struct iopf_device_param *iopf_param;
+ struct dev_iommu *param = dev->iommu;
+
+ if (!param || !queue)
+ return -EINVAL;
+
+ mutex_lock(&queue->lock);
+ mutex_lock(&param->lock);
+ iopf_param = param->iopf_param;
+ if (iopf_param && iopf_param->queue == queue) {
+ list_del(&iopf_param->queue_list);
+ param->iopf_param = NULL;
+ ret = 0;
+ }
+ mutex_unlock(&param->lock);
+ mutex_unlock(&queue->lock);
+ if (ret)
+ return ret;
+
+ /* Just in case some faults are still stuck */
+ list_for_each_entry_safe(iopf, next, &iopf_param->partial, list)
+ kfree(iopf);
+
+ kfree(iopf_param);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iopf_queue_remove_device);
+
+/**
+ * iopf_queue_alloc - Allocate and initialize a fault queue
+ * @name: a unique string identifying the queue (for workqueue)
+ *
+ * Return: the queue on success and NULL on error.
+ */
+struct iopf_queue *iopf_queue_alloc(const char *name)
+{
+ struct iopf_queue *queue;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return NULL;
+
+ /*
+ * The WQ is unordered because the low-level handler enqueues faults by
+ * group. PRI requests within a group have to be ordered, but once
+ * that's dealt with, the high-level function can handle groups out of
+ * order.
+ */
+ queue->wq = alloc_workqueue("iopf_queue/%s", WQ_UNBOUND, 0, name);
+ if (!queue->wq) {
+ kfree(queue);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&queue->devices);
+ mutex_init(&queue->lock);
+
+ return queue;
+}
+EXPORT_SYMBOL_GPL(iopf_queue_alloc);
+
+/**
+ * iopf_queue_free - Free IOPF queue
+ * @queue: queue to free
+ *
+ * Counterpart to iopf_queue_alloc(). The driver must not be queuing faults or
+ * adding/removing devices on this queue anymore.
+ */
+void iopf_queue_free(struct iopf_queue *queue)
+{
+ struct iopf_device_param *iopf_param, *next;
+
+ if (!queue)
+ return;
+
+ list_for_each_entry_safe(iopf_param, next, &queue->devices, queue_list)
+ iopf_queue_remove_device(queue, iopf_param->dev);
+
+ destroy_workqueue(queue->wq);
+ kfree(queue);
+}
+EXPORT_SYMBOL_GPL(iopf_queue_free);
diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva-lib.h
index b40990aef3fd..031155010ca8 100644
--- a/drivers/iommu/iommu-sva-lib.h
+++ b/drivers/iommu/iommu-sva-lib.h
@@ -12,4 +12,57 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max);
void iommu_sva_free_pasid(struct mm_struct *mm);
struct mm_struct *iommu_sva_find(ioasid_t pasid);
+/* I/O Page fault */
+struct device;
+struct iommu_fault;
+struct iopf_queue;
+
+#ifdef CONFIG_IOMMU_SVA_LIB
+int iommu_queue_iopf(struct iommu_fault *fault, void *cookie);
+
+int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
+int iopf_queue_remove_device(struct iopf_queue *queue,
+ struct device *dev);
+int iopf_queue_flush_dev(struct device *dev);
+struct iopf_queue *iopf_queue_alloc(const char *name);
+void iopf_queue_free(struct iopf_queue *queue);
+int iopf_queue_discard_partial(struct iopf_queue *queue);
+
+#else /* CONFIG_IOMMU_SVA_LIB */
+static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
+{
+ return -ENODEV;
+}
+
+static inline int iopf_queue_add_device(struct iopf_queue *queue,
+ struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline int iopf_queue_remove_device(struct iopf_queue *queue,
+ struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline int iopf_queue_flush_dev(struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline struct iopf_queue *iopf_queue_alloc(const char *name)
+{
+ return NULL;
+}
+
+static inline void iopf_queue_free(struct iopf_queue *queue)
+{
+}
+
+static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_IOMMU_SVA_LIB */
#endif /* _IOMMU_SVA_LIB_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d0b0a15dba84..808ab70d5df5 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -69,16 +69,7 @@ static const char * const iommu_group_resv_type_string[] = {
};
#define IOMMU_CMD_LINE_DMA_API BIT(0)
-
-static void iommu_set_cmd_line_dma_api(void)
-{
- iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
-}
-
-static bool iommu_cmd_line_dma_api(void)
-{
- return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
-}
+#define IOMMU_CMD_LINE_STRICT BIT(1)
static int iommu_alloc_default_domain(struct iommu_group *group,
struct device *dev);
@@ -130,9 +121,7 @@ static const char *iommu_domain_type_str(unsigned int t)
static int __init iommu_subsys_init(void)
{
- bool cmd_line = iommu_cmd_line_dma_api();
-
- if (!cmd_line) {
+ if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
iommu_set_default_passthrough(false);
else
@@ -146,14 +135,32 @@ static int __init iommu_subsys_init(void)
pr_info("Default domain type: %s %s\n",
iommu_domain_type_str(iommu_def_domain_type),
- cmd_line ? "(set via kernel command line)" : "");
+ (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ?
+ "(set via kernel command line)" : "");
return 0;
}
subsys_initcall(iommu_subsys_init);
-int iommu_device_register(struct iommu_device *iommu)
+/**
+ * iommu_device_register() - Register an IOMMU hardware instance
+ * @iommu: IOMMU handle for the instance
+ * @ops: IOMMU ops to associate with the instance
+ * @hwdev: (optional) actual instance device, used for fwnode lookup
+ *
+ * Return: 0 on success, or an error.
+ */
+int iommu_device_register(struct iommu_device *iommu,
+ const struct iommu_ops *ops, struct device *hwdev)
{
+ /* We need to be able to take module references appropriately */
+ if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
+ return -EINVAL;
+
+ iommu->ops = ops;
+ if (hwdev)
+ iommu->fwnode = hwdev->fwnode;
+
spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
@@ -329,10 +336,29 @@ early_param("iommu.passthrough", iommu_set_def_domain_type);
static int __init iommu_dma_setup(char *str)
{
- return kstrtobool(str, &iommu_dma_strict);
+ int ret = kstrtobool(str, &iommu_dma_strict);
+
+ if (!ret)
+ iommu_cmd_line |= IOMMU_CMD_LINE_STRICT;
+ return ret;
}
early_param("iommu.strict", iommu_dma_setup);
+void iommu_set_dma_strict(bool strict)
+{
+ if (strict || !(iommu_cmd_line & IOMMU_CMD_LINE_STRICT))
+ iommu_dma_strict = strict;
+}
+
+bool iommu_get_dma_strict(struct iommu_domain *domain)
+{
+ /* only allow lazy flushing for DMA domains */
+ if (domain->type == IOMMU_DOMAIN_DMA)
+ return iommu_dma_strict;
+ return true;
+}
+EXPORT_SYMBOL_GPL(iommu_get_dma_strict);
+
static ssize_t iommu_group_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
{
@@ -1511,14 +1537,6 @@ static int iommu_group_alloc_default_domain(struct bus_type *bus,
group->default_domain = dom;
if (!group->domain)
group->domain = dom;
-
- if (!iommu_dma_strict) {
- int attr = 1;
- iommu_domain_set_attr(dom,
- DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
- &attr);
- }
-
return 0;
}
@@ -2610,17 +2628,6 @@ size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
}
-int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
- phys_addr_t paddr, u64 size, int prot)
-{
- if (unlikely(domain->ops->domain_window_enable == NULL))
- return -ENODEV;
-
- return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
- prot);
-}
-EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
-
/**
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
* @domain: the iommu domain where the fault has happened
@@ -2675,50 +2682,26 @@ static int __init iommu_init(void)
}
core_initcall(iommu_init);
-int iommu_domain_get_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+int iommu_enable_nesting(struct iommu_domain *domain)
{
- struct iommu_domain_geometry *geometry;
- bool *paging;
- int ret = 0;
-
- switch (attr) {
- case DOMAIN_ATTR_GEOMETRY:
- geometry = data;
- *geometry = domain->geometry;
-
- break;
- case DOMAIN_ATTR_PAGING:
- paging = data;
- *paging = (domain->pgsize_bitmap != 0UL);
- break;
- default:
- if (!domain->ops->domain_get_attr)
- return -EINVAL;
-
- ret = domain->ops->domain_get_attr(domain, attr, data);
- }
-
- return ret;
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+ if (!domain->ops->enable_nesting)
+ return -EINVAL;
+ return domain->ops->enable_nesting(domain);
}
-EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
+EXPORT_SYMBOL_GPL(iommu_enable_nesting);
-int iommu_domain_set_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+int iommu_set_pgtable_quirks(struct iommu_domain *domain,
+ unsigned long quirk)
{
- int ret = 0;
-
- switch (attr) {
- default:
- if (domain->ops->domain_set_attr == NULL)
- return -EINVAL;
-
- ret = domain->ops->domain_set_attr(domain, attr, data);
- }
-
- return ret;
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+ if (!domain->ops->set_pgtable_quirks)
+ return -EINVAL;
+ return domain->ops->set_pgtable_quirks(domain, quirk);
}
-EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
+EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
{
@@ -2777,16 +2760,14 @@ EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
void iommu_set_default_passthrough(bool cmd_line)
{
if (cmd_line)
- iommu_set_cmd_line_dma_api();
-
+ iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
}
void iommu_set_default_translated(bool cmd_line)
{
if (cmd_line)
- iommu_set_cmd_line_dma_api();
-
+ iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
iommu_def_domain_type = IOMMU_DOMAIN_DMA;
}
@@ -2878,10 +2859,12 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
*/
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ if (dev->iommu && dev->iommu->iommu_dev) {
+ const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
- if (ops && ops->dev_enable_feat)
- return ops->dev_enable_feat(dev, feat);
+ if (ops->dev_enable_feat)
+ return ops->dev_enable_feat(dev, feat);
+ }
return -ENODEV;
}
@@ -2894,10 +2877,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
*/
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ if (dev->iommu && dev->iommu->iommu_dev) {
+ const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
- if (ops && ops->dev_disable_feat)
- return ops->dev_disable_feat(dev, feat);
+ if (ops->dev_disable_feat)
+ return ops->dev_disable_feat(dev, feat);
+ }
return -EBUSY;
}
@@ -2905,10 +2890,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ if (dev->iommu && dev->iommu->iommu_dev) {
+ const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
- if (ops && ops->dev_feat_enabled)
- return ops->dev_feat_enabled(dev, feat);
+ if (ops->dev_feat_enabled)
+ return ops->dev_feat_enabled(dev, feat);
+ }
return false;
}
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index e6e2fa85271c..b7ecd5b08039 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -22,11 +22,28 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn);
static void init_iova_rcaches(struct iova_domain *iovad);
+static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
static void fq_flush_timeout(struct timer_list *t);
+
+static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct iova_domain *iovad;
+
+ iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
+
+ free_cpu_cached_iovas(cpu, iovad);
+ return 0;
+}
+
static void free_global_cached_iovas(struct iova_domain *iovad);
+static struct iova *to_iova(struct rb_node *node)
+{
+ return rb_entry(node, struct iova, node);
+}
+
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn)
@@ -51,6 +68,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
+ cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
init_iova_rcaches(iovad);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
@@ -136,7 +154,7 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
{
struct iova *cached_iova;
- cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
+ cached_iova = to_iova(iovad->cached32_node);
if (free == cached_iova ||
(free->pfn_hi < iovad->dma_32bit_pfn &&
free->pfn_lo >= cached_iova->pfn_lo)) {
@@ -144,11 +162,48 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
}
- cached_iova = rb_entry(iovad->cached_node, struct iova, node);
+ cached_iova = to_iova(iovad->cached_node);
if (free->pfn_lo >= cached_iova->pfn_lo)
iovad->cached_node = rb_next(&free->node);
}
+static struct rb_node *iova_find_limit(struct iova_domain *iovad, unsigned long limit_pfn)
+{
+ struct rb_node *node, *next;
+ /*
+ * Ideally what we'd like to judge here is whether limit_pfn is close
+ * enough to the highest-allocated IOVA that starting the allocation
+ * walk from the anchor node will be quicker than this initial work to
+ * find an exact starting point (especially if that ends up being the
+ * anchor node anyway). This is an incredibly crude approximation which
+ * only really helps the most likely case, but is at least trivially easy.
+ */
+ if (limit_pfn > iovad->dma_32bit_pfn)
+ return &iovad->anchor.node;
+
+ node = iovad->rbroot.rb_node;
+ while (to_iova(node)->pfn_hi < limit_pfn)
+ node = node->rb_right;
+
+search_left:
+ while (node->rb_left && to_iova(node->rb_left)->pfn_lo >= limit_pfn)
+ node = node->rb_left;
+
+ if (!node->rb_left)
+ return node;
+
+ next = node->rb_left;
+ while (next->rb_right) {
+ next = next->rb_right;
+ if (to_iova(next)->pfn_lo >= limit_pfn) {
+ node = next;
+ goto search_left;
+ }
+ }
+
+ return node;
+}
+
/* Insert the iova into domain rbtree by holding writer lock */
static void
iova_insert_rbtree(struct rb_root *root, struct iova *iova,
@@ -159,7 +214,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova,
new = (start) ? &start : &(root->rb_node);
/* Figure out where to put new node */
while (*new) {
- struct iova *this = rb_entry(*new, struct iova, node);
+ struct iova *this = to_iova(*new);
parent = *new;
@@ -198,7 +253,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
goto iova32_full;
curr = __get_cached_rbnode(iovad, limit_pfn);
- curr_iova = rb_entry(curr, struct iova, node);
+ curr_iova = to_iova(curr);
retry_pfn = curr_iova->pfn_hi + 1;
retry:
@@ -207,15 +262,15 @@ retry:
new_pfn = (high_pfn - size) & align_mask;
prev = curr;
curr = rb_prev(curr);
- curr_iova = rb_entry(curr, struct iova, node);
+ curr_iova = to_iova(curr);
} while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
if (high_pfn < size || new_pfn < low_pfn) {
if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
high_pfn = limit_pfn;
low_pfn = retry_pfn;
- curr = &iovad->anchor.node;
- curr_iova = rb_entry(curr, struct iova, node);
+ curr = iova_find_limit(iovad, limit_pfn);
+ curr_iova = to_iova(curr);
goto retry;
}
iovad->max32_alloc_size = size;
@@ -257,10 +312,21 @@ int iova_cache_get(void)
{
mutex_lock(&iova_cache_mutex);
if (!iova_cache_users) {
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
+ iova_cpuhp_dead);
+ if (ret) {
+ mutex_unlock(&iova_cache_mutex);
+ pr_err("Couldn't register cpuhp handler\n");
+ return ret;
+ }
+
iova_cache = kmem_cache_create(
"iommu_iova", sizeof(struct iova), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!iova_cache) {
+ cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
mutex_unlock(&iova_cache_mutex);
pr_err("Couldn't create iova cache\n");
return -ENOMEM;
@@ -282,8 +348,10 @@ void iova_cache_put(void)
return;
}
iova_cache_users--;
- if (!iova_cache_users)
+ if (!iova_cache_users) {
+ cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
kmem_cache_destroy(iova_cache);
+ }
mutex_unlock(&iova_cache_mutex);
}
EXPORT_SYMBOL_GPL(iova_cache_put);
@@ -331,7 +399,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
assert_spin_locked(&iovad->iova_rbtree_lock);
while (node) {
- struct iova *iova = rb_entry(node, struct iova, node);
+ struct iova *iova = to_iova(node);
if (pfn < iova->pfn_lo)
node = node->rb_left;
@@ -467,7 +535,6 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
free_iova(iovad, pfn);
}
-EXPORT_SYMBOL_GPL(free_iova_fast);
#define fq_ring_for_each(i, fq) \
for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
@@ -606,6 +673,9 @@ void put_iova_domain(struct iova_domain *iovad)
{
struct iova *iova, *tmp;
+ cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
+ &iovad->cpuhp_dead);
+
free_iova_flush_queue(iovad);
free_iova_rcaches(iovad);
rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
@@ -617,7 +687,7 @@ static int
__is_range_overlap(struct rb_node *node,
unsigned long pfn_lo, unsigned long pfn_hi)
{
- struct iova *iova = rb_entry(node, struct iova, node);
+ struct iova *iova = to_iova(node);
if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
return 1;
@@ -685,7 +755,7 @@ reserve_iova(struct iova_domain *iovad,
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
- iova = rb_entry(node, struct iova, node);
+ iova = to_iova(node);
__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
if ((pfn_lo >= iova->pfn_lo) &&
(pfn_hi <= iova->pfn_hi))
@@ -970,7 +1040,7 @@ static void free_iova_rcaches(struct iova_domain *iovad)
/*
* free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
*/
-void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
+static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
{
struct iova_cpu_rcache *cpu_rcache;
struct iova_rcache *rcache;
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index eaaec0a55cc6..aaa6a4d59057 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1076,11 +1076,7 @@ static int ipmmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
- iommu_device_set_fwnode(&mmu->iommu,
- &pdev->dev.of_node->fwnode);
-
- ret = iommu_device_register(&mmu->iommu);
+ ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
if (ret)
return ret;
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index f0ba6a09b434..7880f307cb2d 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -792,10 +792,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
goto fail;
}
- iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
- iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
-
- ret = iommu_device_register(&iommu->iommu);
+ ret = iommu_device_register(&iommu->iommu, &msm_iommu_ops, &pdev->dev);
if (ret) {
pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
goto fail;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 6ecc007f07cd..e06b8a0e2b56 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -17,6 +17,7 @@
#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_iommu.h>
#include <linux/of_irq.h>
@@ -683,18 +684,12 @@ static const struct iommu_ops mtk_iommu_ops = {
.get_resv_regions = mtk_iommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
+ .owner = THIS_MODULE,
};
static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
{
u32 regval;
- int ret;
-
- ret = clk_prepare_enable(data->bclk);
- if (ret) {
- dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
- return ret;
- }
if (data->plat_data->m4u_plat == M4U_MT8173) {
regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
@@ -760,7 +755,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
dev_name(data->dev), (void *)data)) {
writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
- clk_disable_unprepare(data->bclk);
dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
return -ENODEV;
}
@@ -898,10 +892,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret)
goto out_link_remove;
- iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
- iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
-
- ret = iommu_device_register(&data->iommu);
+ ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
if (ret)
goto out_sysfs_remove;
@@ -977,14 +968,19 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
void __iomem *base = data->base;
int ret;
- /* Avoid first resume to affect the default value of registers below. */
- if (!m4u_dom)
- return 0;
ret = clk_prepare_enable(data->bclk);
if (ret) {
dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
return ret;
}
+
+ /*
+ * Uppon first resume, only enable the clk and return, since the values of the
+ * registers are not yet set.
+ */
+ if (!m4u_dom)
+ return 0;
+
writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
@@ -1079,16 +1075,7 @@ static struct platform_driver mtk_iommu_driver = {
.pm = &mtk_iommu_pm_ops,
}
};
+module_platform_driver(mtk_iommu_driver);
-static int __init mtk_iommu_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&mtk_iommu_driver);
- if (ret != 0)
- pr_err("Failed to register MTK IOMMU driver\n");
-
- return ret;
-}
-
-subsys_initcall(mtk_iommu_init)
+MODULE_DESCRIPTION("IOMMU API for MediaTek M4U implementations");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 82ddfe9170d4..5915d7b38211 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -20,6 +20,7 @@
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_iommu.h>
#include <linux/of_irq.h>
@@ -423,23 +424,21 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct of_phandle_args iommu_spec;
- struct of_phandle_iterator it;
struct mtk_iommu_data *data;
- int err;
+ int err, idx = 0;
- of_for_each_phandle(&it, err, dev->of_node, "iommus",
- "#iommu-cells", -1) {
- int count = of_phandle_iterator_args(&it, iommu_spec.args,
- MAX_PHANDLE_ARGS);
- iommu_spec.np = of_node_get(it.node);
- iommu_spec.args_count = count;
+ while (!of_parse_phandle_with_args(dev->of_node, "iommus",
+ "#iommu-cells",
+ idx, &iommu_spec)) {
- mtk_iommu_create_mapping(dev, &iommu_spec);
+ err = mtk_iommu_create_mapping(dev, &iommu_spec);
+ of_node_put(iommu_spec.np);
+ if (err)
+ return ERR_PTR(err);
/* dev->iommu_fwspec might have changed */
fwspec = dev_iommu_fwspec_get(dev);
-
- of_node_put(iommu_spec.np);
+ idx++;
}
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
@@ -529,6 +528,7 @@ static const struct iommu_ops mtk_iommu_ops = {
.def_domain_type = mtk_iommu_def_domain_type,
.device_group = generic_device_group,
.pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
+ .owner = THIS_MODULE,
};
static const struct of_device_id mtk_iommu_of_ids[] = {
@@ -547,10 +547,8 @@ static int mtk_iommu_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct component_match *match = NULL;
- struct of_phandle_args larb_spec;
- struct of_phandle_iterator it;
void *protect;
- int larb_nr, ret, err;
+ int larb_nr, ret, i;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -578,35 +576,33 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (IS_ERR(data->bclk))
return PTR_ERR(data->bclk);
- larb_nr = 0;
- of_for_each_phandle(&it, err, dev->of_node,
- "mediatek,larbs", NULL, 0) {
+ larb_nr = of_count_phandle_with_args(dev->of_node,
+ "mediatek,larbs", NULL);
+ if (larb_nr < 0)
+ return larb_nr;
+
+ for (i = 0; i < larb_nr; i++) {
+ struct device_node *larbnode;
struct platform_device *plarbdev;
- int count = of_phandle_iterator_args(&it, larb_spec.args,
- MAX_PHANDLE_ARGS);
- if (count)
- continue;
+ larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
+ if (!larbnode)
+ return -EINVAL;
- larb_spec.np = of_node_get(it.node);
- if (!of_device_is_available(larb_spec.np))
+ if (!of_device_is_available(larbnode)) {
+ of_node_put(larbnode);
continue;
+ }
- plarbdev = of_find_device_by_node(larb_spec.np);
+ plarbdev = of_find_device_by_node(larbnode);
if (!plarbdev) {
- plarbdev = of_platform_device_create(
- larb_spec.np, NULL,
- platform_bus_type.dev_root);
- if (!plarbdev) {
- of_node_put(larb_spec.np);
- return -EPROBE_DEFER;
- }
+ of_node_put(larbnode);
+ return -EPROBE_DEFER;
}
+ data->larb_imu[i].dev = &plarbdev->dev;
- data->larb_imu[larb_nr].dev = &plarbdev->dev;
component_match_add_release(dev, &match, release_of,
- compare_of, larb_spec.np);
- larb_nr++;
+ compare_of, larbnode);
}
platform_set_drvdata(pdev, data);
@@ -620,16 +616,28 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret)
return ret;
- iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
-
- ret = iommu_device_register(&data->iommu);
+ ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
if (ret)
- return ret;
+ goto out_sysfs_remove;
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
+ if (!iommu_present(&platform_bus_type)) {
+ ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
+ if (ret)
+ goto out_dev_unreg;
+ }
+
+ ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+ if (ret)
+ goto out_bus_set_null;
+ return ret;
- return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+out_bus_set_null:
+ bus_set_iommu(&platform_bus_type, NULL);
+out_dev_unreg:
+ iommu_device_unregister(&data->iommu);
+out_sysfs_remove:
+ iommu_device_sysfs_remove(&data->iommu);
+ return ret;
}
static int mtk_iommu_remove(struct platform_device *pdev)
@@ -691,9 +699,7 @@ static struct platform_driver mtk_iommu_driver = {
.pm = &mtk_iommu_pm_ops,
}
};
+module_platform_driver(mtk_iommu_driver);
-static int __init m4u_init(void)
-{
- return platform_driver_register(&mtk_iommu_driver);
-}
-subsys_initcall(m4u_init);
+MODULE_DESCRIPTION("IOMMU API for MediaTek M4U v1 implementations");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index e505b9130a1c..a9d2df001149 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -210,11 +210,6 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
of_pci_iommu_init, &info);
} else {
err = of_iommu_configure_device(master_np, dev, id);
-
- fwspec = dev_iommu_fwspec_get(dev);
- if (!err && fwspec)
- of_property_read_u32(master_np, "pasid-num-bits",
- &fwspec->num_pasid_bits);
}
/*
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 71f29c0927fc..26e517eb0dd3 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1235,10 +1235,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
if (err)
goto out_group;
- iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
- iommu_device_set_fwnode(&obj->iommu, &of->fwnode);
-
- err = iommu_device_register(&obj->iommu);
+ err = iommu_device_register(&obj->iommu, &omap_iommu_ops, &pdev->dev);
if (err)
goto out_sysfs;
}
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index e5d86b7177de..7a2932772fdf 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1196,10 +1196,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (err)
goto err_put_group;
- iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
- iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
-
- err = iommu_device_register(&iommu->iommu);
+ err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev);
if (err)
goto err_remove_sysfs;
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 8895dbb705eb..6019e58ce4fb 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -333,9 +333,7 @@ int zpci_init_iommu(struct zpci_dev *zdev)
if (rc)
goto out_err;
- iommu_device_set_ops(&zdev->iommu_dev, &s390_iommu_ops);
-
- rc = iommu_device_register(&zdev->iommu_dev);
+ rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, NULL);
if (rc)
goto out_sysfs;
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
new file mode 100644
index 000000000000..73dfd9946312
--- /dev/null
+++ b/drivers/iommu/sprd-iommu.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Unisoc IOMMU driver
+ *
+ * Copyright (C) 2020 Unisoc, Inc.
+ * Author: Chunyan Zhang <chunyan.zhang@unisoc.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/iommu.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define SPRD_IOMMU_PAGE_SHIFT 12
+#define SPRD_IOMMU_PAGE_SIZE SZ_4K
+
+#define SPRD_EX_CFG 0x0
+#define SPRD_IOMMU_VAOR_BYPASS BIT(4)
+#define SPRD_IOMMU_GATE_EN BIT(1)
+#define SPRD_IOMMU_EN BIT(0)
+#define SPRD_EX_UPDATE 0x4
+#define SPRD_EX_FIRST_VPN 0x8
+#define SPRD_EX_VPN_RANGE 0xc
+#define SPRD_EX_FIRST_PPN 0x10
+#define SPRD_EX_DEFAULT_PPN 0x14
+
+#define SPRD_IOMMU_VERSION 0x0
+#define SPRD_VERSION_MASK GENMASK(15, 8)
+#define SPRD_VERSION_SHIFT 0x8
+#define SPRD_VAU_CFG 0x4
+#define SPRD_VAU_UPDATE 0x8
+#define SPRD_VAU_AUTH_CFG 0xc
+#define SPRD_VAU_FIRST_PPN 0x10
+#define SPRD_VAU_DEFAULT_PPN_RD 0x14
+#define SPRD_VAU_DEFAULT_PPN_WR 0x18
+#define SPRD_VAU_FIRST_VPN 0x1c
+#define SPRD_VAU_VPN_RANGE 0x20
+
+enum sprd_iommu_version {
+ SPRD_IOMMU_EX,
+ SPRD_IOMMU_VAU,
+};
+
+/*
+ * struct sprd_iommu_device - high-level sprd IOMMU device representation,
+ * including hardware information and configuration, also driver data, etc
+ *
+ * @ver: sprd IOMMU IP version
+ * @prot_page_va: protect page base virtual address
+ * @prot_page_pa: protect page base physical address, data would be
+ * written to here while translation fault
+ * @base: mapped base address for accessing registers
+ * @dev: pointer to basic device structure
+ * @iommu: IOMMU core representation
+ * @group: IOMMU group
+ * @eb: gate clock which controls IOMMU access
+ */
+struct sprd_iommu_device {
+ enum sprd_iommu_version ver;
+ u32 *prot_page_va;
+ dma_addr_t prot_page_pa;
+ void __iomem *base;
+ struct device *dev;
+ struct iommu_device iommu;
+ struct iommu_group *group;
+ struct clk *eb;
+};
+
+struct sprd_iommu_domain {
+ spinlock_t pgtlock; /* lock for page table */
+ struct iommu_domain domain;
+ u32 *pgt_va; /* page table virtual address base */
+ dma_addr_t pgt_pa; /* page table physical address base */
+ struct sprd_iommu_device *sdev;
+};
+
+static const struct iommu_ops sprd_iommu_ops;
+
+static struct sprd_iommu_domain *to_sprd_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct sprd_iommu_domain, domain);
+}
+
+static inline void
+sprd_iommu_write(struct sprd_iommu_device *sdev, unsigned int reg, u32 val)
+{
+ writel_relaxed(val, sdev->base + reg);
+}
+
+static inline u32
+sprd_iommu_read(struct sprd_iommu_device *sdev, unsigned int reg)
+{
+ return readl_relaxed(sdev->base + reg);
+}
+
+static inline void
+sprd_iommu_update_bits(struct sprd_iommu_device *sdev, unsigned int reg,
+ u32 mask, u32 shift, u32 val)
+{
+ u32 t = sprd_iommu_read(sdev, reg);
+
+ t = (t & (~(mask << shift))) | ((val & mask) << shift);
+ sprd_iommu_write(sdev, reg, t);
+}
+
+static inline int
+sprd_iommu_get_version(struct sprd_iommu_device *sdev)
+{
+ int ver = (sprd_iommu_read(sdev, SPRD_IOMMU_VERSION) &
+ SPRD_VERSION_MASK) >> SPRD_VERSION_SHIFT;
+
+ switch (ver) {
+ case SPRD_IOMMU_EX:
+ case SPRD_IOMMU_VAU:
+ return ver;
+ default:
+ return -EINVAL;
+ }
+}
+
+static size_t
+sprd_iommu_pgt_size(struct iommu_domain *domain)
+{
+ return ((domain->geometry.aperture_end -
+ domain->geometry.aperture_start + 1) >>
+ SPRD_IOMMU_PAGE_SHIFT) * sizeof(u32);
+}
+
+static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type)
+{
+ struct sprd_iommu_domain *dom;
+
+ if (domain_type != IOMMU_DOMAIN_DMA && domain_type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ dom = kzalloc(sizeof(*dom), GFP_KERNEL);
+ if (!dom)
+ return NULL;
+
+ if (iommu_get_dma_cookie(&dom->domain)) {
+ kfree(dom);
+ return NULL;
+ }
+
+ spin_lock_init(&dom->pgtlock);
+
+ dom->domain.geometry.aperture_start = 0;
+ dom->domain.geometry.aperture_end = SZ_256M - 1;
+
+ return &dom->domain;
+}
+
+static void sprd_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+
+ iommu_put_dma_cookie(domain);
+ kfree(dom);
+}
+
+static void sprd_iommu_first_vpn(struct sprd_iommu_domain *dom)
+{
+ struct sprd_iommu_device *sdev = dom->sdev;
+ u32 val;
+ unsigned int reg;
+
+ if (sdev->ver == SPRD_IOMMU_EX)
+ reg = SPRD_EX_FIRST_VPN;
+ else
+ reg = SPRD_VAU_FIRST_VPN;
+
+ val = dom->domain.geometry.aperture_start >> SPRD_IOMMU_PAGE_SHIFT;
+ sprd_iommu_write(sdev, reg, val);
+}
+
+static void sprd_iommu_vpn_range(struct sprd_iommu_domain *dom)
+{
+ struct sprd_iommu_device *sdev = dom->sdev;
+ u32 val;
+ unsigned int reg;
+
+ if (sdev->ver == SPRD_IOMMU_EX)
+ reg = SPRD_EX_VPN_RANGE;
+ else
+ reg = SPRD_VAU_VPN_RANGE;
+
+ val = (dom->domain.geometry.aperture_end -
+ dom->domain.geometry.aperture_start) >> SPRD_IOMMU_PAGE_SHIFT;
+ sprd_iommu_write(sdev, reg, val);
+}
+
+static void sprd_iommu_first_ppn(struct sprd_iommu_domain *dom)
+{
+ u32 val = dom->pgt_pa >> SPRD_IOMMU_PAGE_SHIFT;
+ struct sprd_iommu_device *sdev = dom->sdev;
+ unsigned int reg;
+
+ if (sdev->ver == SPRD_IOMMU_EX)
+ reg = SPRD_EX_FIRST_PPN;
+ else
+ reg = SPRD_VAU_FIRST_PPN;
+
+ sprd_iommu_write(sdev, reg, val);
+}
+
+static void sprd_iommu_default_ppn(struct sprd_iommu_device *sdev)
+{
+ u32 val = sdev->prot_page_pa >> SPRD_IOMMU_PAGE_SHIFT;
+
+ if (sdev->ver == SPRD_IOMMU_EX) {
+ sprd_iommu_write(sdev, SPRD_EX_DEFAULT_PPN, val);
+ } else if (sdev->ver == SPRD_IOMMU_VAU) {
+ sprd_iommu_write(sdev, SPRD_VAU_DEFAULT_PPN_RD, val);
+ sprd_iommu_write(sdev, SPRD_VAU_DEFAULT_PPN_WR, val);
+ }
+}
+
+static void sprd_iommu_hw_en(struct sprd_iommu_device *sdev, bool en)
+{
+ unsigned int reg_cfg;
+ u32 mask, val;
+
+ if (sdev->ver == SPRD_IOMMU_EX)
+ reg_cfg = SPRD_EX_CFG;
+ else
+ reg_cfg = SPRD_VAU_CFG;
+
+ mask = SPRD_IOMMU_EN | SPRD_IOMMU_GATE_EN;
+ val = en ? mask : 0;
+ sprd_iommu_update_bits(sdev, reg_cfg, mask, 0, val);
+}
+
+static int sprd_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+ size_t pgt_size = sprd_iommu_pgt_size(domain);
+
+ if (dom->sdev) {
+ pr_err("There's already a device attached to this domain.\n");
+ return -EINVAL;
+ }
+
+ dom->pgt_va = dma_alloc_coherent(sdev->dev, pgt_size, &dom->pgt_pa, GFP_KERNEL);
+ if (!dom->pgt_va)
+ return -ENOMEM;
+
+ dom->sdev = sdev;
+
+ sprd_iommu_first_ppn(dom);
+ sprd_iommu_first_vpn(dom);
+ sprd_iommu_vpn_range(dom);
+ sprd_iommu_default_ppn(sdev);
+ sprd_iommu_hw_en(sdev, true);
+
+ return 0;
+}
+
+static void sprd_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+ struct sprd_iommu_device *sdev = dom->sdev;
+ size_t pgt_size = sprd_iommu_pgt_size(domain);
+
+ if (!sdev)
+ return;
+
+ dma_free_coherent(sdev->dev, pgt_size, dom->pgt_va, dom->pgt_pa);
+ sprd_iommu_hw_en(sdev, false);
+ dom->sdev = NULL;
+}
+
+static int sprd_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+ unsigned int page_num = size >> SPRD_IOMMU_PAGE_SHIFT;
+ unsigned long flags;
+ unsigned int i;
+ u32 *pgt_base_iova;
+ u32 pabase = (u32)paddr;
+ unsigned long start = domain->geometry.aperture_start;
+ unsigned long end = domain->geometry.aperture_end;
+
+ if (!dom->sdev) {
+ pr_err("No sprd_iommu_device attached to the domain\n");
+ return -EINVAL;
+ }
+
+ if (iova < start || (iova + size) > (end + 1)) {
+ dev_err(dom->sdev->dev, "(iova(0x%lx) + size(%zx)) are not in the range!\n",
+ iova, size);
+ return -EINVAL;
+ }
+
+ pgt_base_iova = dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT);
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ for (i = 0; i < page_num; i++) {
+ pgt_base_iova[i] = pabase >> SPRD_IOMMU_PAGE_SHIFT;
+ pabase += SPRD_IOMMU_PAGE_SIZE;
+ }
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ return 0;
+}
+
+static size_t sprd_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *iotlb_gather)
+{
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+ unsigned long flags;
+ u32 *pgt_base_iova;
+ unsigned int page_num = size >> SPRD_IOMMU_PAGE_SHIFT;
+ unsigned long start = domain->geometry.aperture_start;
+ unsigned long end = domain->geometry.aperture_end;
+
+ if (iova < start || (iova + size) > (end + 1))
+ return -EINVAL;
+
+ pgt_base_iova = dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT);
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ memset(pgt_base_iova, 0, page_num * sizeof(u32));
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ return 0;
+}
+
+static void sprd_iommu_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+ unsigned int reg;
+
+ if (dom->sdev->ver == SPRD_IOMMU_EX)
+ reg = SPRD_EX_UPDATE;
+ else
+ reg = SPRD_VAU_UPDATE;
+
+ /* clear IOMMU TLB buffer after page table updated */
+ sprd_iommu_write(dom->sdev, reg, 0xffffffff);
+}
+
+static void sprd_iommu_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather)
+{
+ sprd_iommu_sync_map(domain, 0, 0);
+}
+
+static phys_addr_t sprd_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+ unsigned long flags;
+ phys_addr_t pa;
+ unsigned long start = domain->geometry.aperture_start;
+ unsigned long end = domain->geometry.aperture_end;
+
+ if (WARN_ON(iova < start || iova > end))
+ return 0;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ pa = *(dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT));
+ pa = (pa << SPRD_IOMMU_PAGE_SHIFT) + ((iova - start) & (SPRD_IOMMU_PAGE_SIZE - 1));
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ return pa;
+}
+
+static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct sprd_iommu_device *sdev;
+
+ if (!fwspec || fwspec->ops != &sprd_iommu_ops)
+ return ERR_PTR(-ENODEV);
+
+ sdev = dev_iommu_priv_get(dev);
+
+ return &sdev->iommu;
+}
+
+static void sprd_iommu_release_device(struct device *dev)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ if (!fwspec || fwspec->ops != &sprd_iommu_ops)
+ return;
+
+ iommu_fwspec_free(dev);
+}
+
+static struct iommu_group *sprd_iommu_device_group(struct device *dev)
+{
+ struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
+
+ return iommu_group_ref_get(sdev->group);
+}
+
+static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+ struct platform_device *pdev;
+
+ if (!dev_iommu_priv_get(dev)) {
+ pdev = of_find_device_by_node(args->np);
+ dev_iommu_priv_set(dev, platform_get_drvdata(pdev));
+ platform_device_put(pdev);
+ }
+
+ return 0;
+}
+
+
+static const struct iommu_ops sprd_iommu_ops = {
+ .domain_alloc = sprd_iommu_domain_alloc,
+ .domain_free = sprd_iommu_domain_free,
+ .attach_dev = sprd_iommu_attach_device,
+ .detach_dev = sprd_iommu_detach_device,
+ .map = sprd_iommu_map,
+ .unmap = sprd_iommu_unmap,
+ .iotlb_sync_map = sprd_iommu_sync_map,
+ .iotlb_sync = sprd_iommu_sync,
+ .iova_to_phys = sprd_iommu_iova_to_phys,
+ .probe_device = sprd_iommu_probe_device,
+ .release_device = sprd_iommu_release_device,
+ .device_group = sprd_iommu_device_group,
+ .of_xlate = sprd_iommu_of_xlate,
+ .pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id sprd_iommu_of_match[] = {
+ { .compatible = "sprd,iommu-v1" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_iommu_of_match);
+
+/*
+ * Clock is not required, access to some of IOMMUs is controlled by gate
+ * clk, enabled clocks for that kind of IOMMUs before accessing.
+ * Return 0 for success or no clocks found.
+ */
+static int sprd_iommu_clk_enable(struct sprd_iommu_device *sdev)
+{
+ struct clk *eb;
+
+ eb = devm_clk_get_optional(sdev->dev, NULL);
+ if (!eb)
+ return 0;
+
+ if (IS_ERR(eb))
+ return PTR_ERR(eb);
+
+ sdev->eb = eb;
+ return clk_prepare_enable(eb);
+}
+
+static void sprd_iommu_clk_disable(struct sprd_iommu_device *sdev)
+{
+ if (sdev->eb)
+ clk_disable_unprepare(sdev->eb);
+}
+
+static int sprd_iommu_probe(struct platform_device *pdev)
+{
+ struct sprd_iommu_device *sdev;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ int ret;
+
+ sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ dev_err(dev, "Failed to get ioremap resource.\n");
+ return PTR_ERR(base);
+ }
+ sdev->base = base;
+
+ sdev->prot_page_va = dma_alloc_coherent(dev, SPRD_IOMMU_PAGE_SIZE,
+ &sdev->prot_page_pa, GFP_KERNEL);
+ if (!sdev->prot_page_va)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sdev);
+ sdev->dev = dev;
+
+ /* All the client devices are in the same iommu-group */
+ sdev->group = iommu_group_alloc();
+ if (IS_ERR(sdev->group)) {
+ ret = PTR_ERR(sdev->group);
+ goto free_page;
+ }
+
+ ret = iommu_device_sysfs_add(&sdev->iommu, dev, NULL, dev_name(dev));
+ if (ret)
+ goto put_group;
+
+ ret = iommu_device_register(&sdev->iommu, &sprd_iommu_ops, dev);
+ if (ret)
+ goto remove_sysfs;
+
+ if (!iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, &sprd_iommu_ops);
+
+ ret = sprd_iommu_clk_enable(sdev);
+ if (ret)
+ goto unregister_iommu;
+
+ ret = sprd_iommu_get_version(sdev);
+ if (ret < 0) {
+ dev_err(dev, "IOMMU version(%d) is invalid.\n", ret);
+ goto disable_clk;
+ }
+ sdev->ver = ret;
+
+ return 0;
+
+disable_clk:
+ sprd_iommu_clk_disable(sdev);
+unregister_iommu:
+ iommu_device_unregister(&sdev->iommu);
+remove_sysfs:
+ iommu_device_sysfs_remove(&sdev->iommu);
+put_group:
+ iommu_group_put(sdev->group);
+free_page:
+ dma_free_coherent(sdev->dev, SPRD_IOMMU_PAGE_SIZE, sdev->prot_page_va, sdev->prot_page_pa);
+ return ret;
+}
+
+static int sprd_iommu_remove(struct platform_device *pdev)
+{
+ struct sprd_iommu_device *sdev = platform_get_drvdata(pdev);
+
+ dma_free_coherent(sdev->dev, SPRD_IOMMU_PAGE_SIZE, sdev->prot_page_va, sdev->prot_page_pa);
+
+ iommu_group_put(sdev->group);
+ sdev->group = NULL;
+
+ bus_set_iommu(&platform_bus_type, NULL);
+
+ platform_set_drvdata(pdev, NULL);
+ iommu_device_sysfs_remove(&sdev->iommu);
+ iommu_device_unregister(&sdev->iommu);
+
+ return 0;
+}
+
+static struct platform_driver sprd_iommu_driver = {
+ .driver = {
+ .name = "sprd-iommu",
+ .of_match_table = sprd_iommu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = sprd_iommu_probe,
+ .remove = sprd_iommu_remove,
+};
+module_platform_driver(sprd_iommu_driver);
+
+MODULE_DESCRIPTION("IOMMU driver for Unisoc SoCs");
+MODULE_ALIAS("platform:sprd-iommu");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index ea6db1341916..181bb1c3437c 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -968,10 +968,7 @@ static int sun50i_iommu_probe(struct platform_device *pdev)
if (ret)
goto err_free_group;
- iommu_device_set_ops(&iommu->iommu, &sun50i_iommu_ops);
- iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
-
- ret = iommu_device_register(&iommu->iommu);
+ ret = iommu_device_register(&iommu->iommu, &sun50i_iommu_ops, &pdev->dev);
if (ret)
goto err_remove_sysfs;
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 6f130e51f072..6a358f92c7e5 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -353,10 +353,7 @@ struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
if (err)
goto free_gart;
- iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
- iommu_device_set_fwnode(&gart->iommu, dev->fwnode);
-
- err = iommu_device_register(&gart->iommu);
+ err = iommu_device_register(&gart->iommu, &gart_iommu_ops, dev);
if (err)
goto remove_sysfs;
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 602aab98c079..1e98dc63ad13 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -1145,10 +1145,7 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
if (err)
return ERR_PTR(err);
- iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
- iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
-
- err = iommu_device_register(&smmu->iommu);
+ err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev);
if (err)
goto remove_sysfs;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 2bfdd5734844..7c02481a81b4 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -945,6 +945,7 @@ static struct iommu_ops viommu_ops = {
.get_resv_regions = viommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.of_xlate = viommu_of_xlate,
+ .owner = THIS_MODULE,
};
static int viommu_init_vqs(struct viommu_dev *viommu)
@@ -1065,10 +1066,7 @@ static int viommu_probe(struct virtio_device *vdev)
if (ret)
goto err_free_vqs;
- iommu_device_set_ops(&viommu->iommu, &viommu_ops);
- iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode);
-
- iommu_device_register(&viommu->iommu);
+ iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
#ifdef CONFIG_PCI
if (pci_bus_type.iommu_ops != &viommu_ops) {
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c3485b230d70..2e6923c2c8a8 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -794,8 +794,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
its_encode_alloc(cmd, alloc);
- /* We can only signal PTZ when alloc==1. Why do we have two bits? */
- its_encode_ptz(cmd, alloc);
+ /*
+ * GICv4.1 provides a way to get the VLPI state, which needs the vPE
+ * to be unmapped first, and in this case, we may remap the vPE
+ * back while the VPT is not empty. So we can't assume that the
+ * VPT is empty on map. This is why we never advertise PTZ.
+ */
+ its_encode_ptz(cmd, false);
its_encode_vconf_addr(cmd, vconf_addr);
its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
@@ -4554,6 +4559,15 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
its_send_vmapp(its, vpe, false);
}
+
+ /*
+ * There may be a direct read to the VPT after unmapping the
+ * vPE, to guarantee the validity of this, we make the VPT
+ * memory coherent with the CPU caches here.
+ */
+ if (find_4_1_its() && !atomic_read(&vpe->vmapp_count))
+ gic_flush_dcache_to_poc(page_address(vpe->vpt_page),
+ LPI_PENDBASE_SZ);
}
static const struct irq_domain_ops its_vpe_domain_ops = {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b6742b4231bf..49d99cb084db 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -18,7 +18,7 @@ config LEDS_CLASS
tristate "LED Class Support"
help
This option enables the LED sysfs class in /sys/class/leds. You'll
- need this to do anything useful with LEDs. If unsure, say N.
+ need this to do anything useful with LEDs. If unsure, say Y.
config LEDS_CLASS_FLASH
tristate "LED Flash Class Support"
@@ -928,13 +928,12 @@ config LEDS_ACER_A500
This option enables support for the Power Button LED of
Acer Iconia Tab A500.
+source "drivers/leds/blink/Kconfig"
+
comment "Flash and Torch LED drivers"
source "drivers/leds/flash/Kconfig"
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
-comment "LED Blink"
-source "drivers/leds/blink/Kconfig"
-
endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2a698df9da57..7e604d3028c8 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -110,4 +110,4 @@ obj-$(CONFIG_LEDS_CLASS_FLASH) += flash/
obj-$(CONFIG_LEDS_TRIGGERS) += trigger/
# LED Blink
-obj-$(CONFIG_LEDS_BLINK) += blink/
+obj-y += blink/
diff --git a/drivers/leds/blink/Kconfig b/drivers/leds/blink/Kconfig
index 265b53476a80..59ba81e40e85 100644
--- a/drivers/leds/blink/Kconfig
+++ b/drivers/leds/blink/Kconfig
@@ -1,20 +1,17 @@
-menuconfig LEDS_BLINK
- bool "LED Blink support"
- depends on LEDS_CLASS
- help
- This option enables blink support for the leds class.
- If unsure, say Y.
+config LEDS_LGM
+ tristate "LED support for LGM SoC series"
+ depends on X86 || COMPILE_TEST
+ depends on GPIOLIB && LEDS_CLASS && MFD_SYSCON && OF
+ help
+ This option enables support for LEDs connected to GPIO lines on
+ Lightning Mountain (LGM) SoC. Lightning Mountain is a AnyWAN
+ gateway-on-a-chip SoC to be shipped on mid and high end home
+ gateways and routers.
-if LEDS_BLINK
+ These LEDs are driven by a Serial Shift Output (SSO) controller.
+ The driver supports hardware blinking and the LEDs can be configured
+ to be triggered by software/CPU or by hardware.
-config LEDS_BLINK_LGM
- tristate "LED support for Intel LGM SoC series"
- depends on LEDS_CLASS
- depends on MFD_SYSCON
- depends on OF
- help
- Parallel to serial conversion, which is also called SSO controller,
- can drive external shift register for LED outputs.
- This enables LED support for Serial Shift Output controller(SSO).
-
-endif # LEDS_BLINK
+ Say 'Y' here if you are working on LGM SoC based platform. Otherwise,
+ say 'N'. To compile this driver as a module, choose M here: the module
+ will be called leds-lgm-sso.
diff --git a/drivers/leds/blink/Makefile b/drivers/leds/blink/Makefile
index 2fa6c7b7b67e..fa5d04dccf13 100644
--- a/drivers/leds/blink/Makefile
+++ b/drivers/leds/blink/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_LEDS_BLINK_LGM) += leds-lgm-sso.o
+obj-$(CONFIG_LEDS_LGM) += leds-lgm-sso.o
diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
index 7d5c9ca007d6..6a63846d10b5 100644
--- a/drivers/leds/blink/leds-lgm-sso.c
+++ b/drivers/leds/blink/leds-lgm-sso.c
@@ -793,7 +793,7 @@ static int intel_sso_led_probe(struct platform_device *pdev)
ret = clk_prepare_enable(priv->gclk);
if (ret) {
- dev_err(dev, "Failed to prepate/enable sso gate clock!\n");
+ dev_err(dev, "Failed to prepare/enable sso gate clock!\n");
return ret;
}
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
index b580b416b9a4..3f49f3edbffb 100644
--- a/drivers/leds/flash/Kconfig
+++ b/drivers/leds/flash/Kconfig
@@ -2,6 +2,17 @@
if LEDS_CLASS_FLASH
+config LEDS_RT4505
+ tristate "LED support for RT4505 flashlight controller"
+ depends on I2C && OF
+ depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
+ select REGMAP_I2C
+ help
+ This option enables support for the RT4505 flash LED controller.
+ RT4505 includes torch and flash functions with programmable current.
+ And it's commonly used to compensate the illuminance for the camera
+ inside the mobile product like as phones or tablets.
+
config LEDS_RT8515
tristate "LED support for Richtek RT8515 flash/torch LED"
depends on GPIOLIB
diff --git a/drivers/leds/flash/Makefile b/drivers/leds/flash/Makefile
index e990e257f4d7..09aee561f769 100644
--- a/drivers/leds/flash/Makefile
+++ b/drivers/leds/flash/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LEDS_RT4505) += leds-rt4505.o
obj-$(CONFIG_LEDS_RT8515) += leds-rt8515.o
diff --git a/drivers/leds/flash/leds-rt4505.c b/drivers/leds/flash/leds-rt4505.c
new file mode 100644
index 000000000000..ee129ab7255d
--- /dev/null
+++ b/drivers/leds/flash/leds-rt4505.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/led-class-flash.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <media/v4l2-flash-led-class.h>
+
+#define RT4505_REG_RESET 0x0
+#define RT4505_REG_CONFIG 0x8
+#define RT4505_REG_ILED 0x9
+#define RT4505_REG_ENABLE 0xA
+#define RT4505_REG_FLAGS 0xB
+
+#define RT4505_RESET_MASK BIT(7)
+#define RT4505_FLASHTO_MASK GENMASK(2, 0)
+#define RT4505_ITORCH_MASK GENMASK(7, 5)
+#define RT4505_ITORCH_SHIFT 5
+#define RT4505_IFLASH_MASK GENMASK(4, 0)
+#define RT4505_ENABLE_MASK GENMASK(5, 0)
+#define RT4505_TORCH_SET (BIT(0) | BIT(4))
+#define RT4505_FLASH_SET (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define RT4505_EXT_FLASH_SET (BIT(0) | BIT(1) | BIT(4) | BIT(5))
+#define RT4505_FLASH_GET (BIT(0) | BIT(1) | BIT(4))
+#define RT4505_OVP_MASK BIT(3)
+#define RT4505_SHORT_MASK BIT(2)
+#define RT4505_OTP_MASK BIT(1)
+#define RT4505_TIMEOUT_MASK BIT(0)
+
+#define RT4505_ITORCH_MINUA 46000
+#define RT4505_ITORCH_MAXUA 375000
+#define RT4505_ITORCH_STPUA 47000
+#define RT4505_IFLASH_MINUA 93750
+#define RT4505_IFLASH_MAXUA 1500000
+#define RT4505_IFLASH_STPUA 93750
+#define RT4505_FLASHTO_MINUS 100000
+#define RT4505_FLASHTO_MAXUS 800000
+#define RT4505_FLASHTO_STPUS 100000
+
+struct rt4505_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ struct led_classdev_flash flash;
+ struct v4l2_flash *v4l2_flash;
+};
+
+static int rt4505_torch_brightness_set(struct led_classdev *lcdev,
+ enum led_brightness level)
+{
+ struct rt4505_priv *priv =
+ container_of(lcdev, struct rt4505_priv, flash.led_cdev);
+ u32 val = 0;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ if (level != LED_OFF) {
+ ret = regmap_update_bits(priv->regmap,
+ RT4505_REG_ILED, RT4505_ITORCH_MASK,
+ (level - 1) << RT4505_ITORCH_SHIFT);
+ if (ret)
+ goto unlock;
+
+ val = RT4505_TORCH_SET;
+ }
+
+ ret = regmap_update_bits(priv->regmap, RT4505_REG_ENABLE,
+ RT4505_ENABLE_MASK, val);
+
+unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static enum led_brightness rt4505_torch_brightness_get(
+ struct led_classdev *lcdev)
+{
+ struct rt4505_priv *priv =
+ container_of(lcdev, struct rt4505_priv, flash.led_cdev);
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ ret = regmap_read(priv->regmap, RT4505_REG_ENABLE, &val);
+ if (ret) {
+ dev_err(lcdev->dev, "Failed to get LED enable\n");
+ ret = LED_OFF;
+ goto unlock;
+ }
+
+ if ((val & RT4505_ENABLE_MASK) != RT4505_TORCH_SET) {
+ ret = LED_OFF;
+ goto unlock;
+ }
+
+ ret = regmap_read(priv->regmap, RT4505_REG_ILED, &val);
+ if (ret) {
+ dev_err(lcdev->dev, "Failed to get LED brightness\n");
+ ret = LED_OFF;
+ goto unlock;
+ }
+
+ ret = ((val & RT4505_ITORCH_MASK) >> RT4505_ITORCH_SHIFT) + 1;
+
+unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int rt4505_flash_brightness_set(struct led_classdev_flash *fled_cdev,
+ u32 brightness)
+{
+ struct rt4505_priv *priv =
+ container_of(fled_cdev, struct rt4505_priv, flash);
+ struct led_flash_setting *s = &fled_cdev->brightness;
+ u32 val = (brightness - s->min) / s->step;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = regmap_update_bits(priv->regmap, RT4505_REG_ILED,
+ RT4505_IFLASH_MASK, val);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int rt4505_flash_strobe_set(struct led_classdev_flash *fled_cdev,
+ bool state)
+{
+ struct rt4505_priv *priv =
+ container_of(fled_cdev, struct rt4505_priv, flash);
+ u32 val = state ? RT4505_FLASH_SET : 0;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = regmap_update_bits(priv->regmap, RT4505_REG_ENABLE,
+ RT4505_ENABLE_MASK, val);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int rt4505_flash_strobe_get(struct led_classdev_flash *fled_cdev,
+ bool *state)
+{
+ struct rt4505_priv *priv =
+ container_of(fled_cdev, struct rt4505_priv, flash);
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ ret = regmap_read(priv->regmap, RT4505_REG_ENABLE, &val);
+ if (ret)
+ goto unlock;
+
+ *state = (val & RT4505_FLASH_GET) == RT4505_FLASH_GET;
+
+unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int rt4505_flash_timeout_set(struct led_classdev_flash *fled_cdev,
+ u32 timeout)
+{
+ struct rt4505_priv *priv =
+ container_of(fled_cdev, struct rt4505_priv, flash);
+ struct led_flash_setting *s = &fled_cdev->timeout;
+ u32 val = (timeout - s->min) / s->step;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = regmap_update_bits(priv->regmap, RT4505_REG_CONFIG,
+ RT4505_FLASHTO_MASK, val);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int rt4505_fault_get(struct led_classdev_flash *fled_cdev, u32 *fault)
+{
+ struct rt4505_priv *priv =
+ container_of(fled_cdev, struct rt4505_priv, flash);
+ u32 val, led_faults = 0;
+ int ret;
+
+ ret = regmap_read(priv->regmap, RT4505_REG_FLAGS, &val);
+ if (ret)
+ return ret;
+
+ if (val & RT4505_OVP_MASK)
+ led_faults |= LED_FAULT_OVER_VOLTAGE;
+
+ if (val & RT4505_SHORT_MASK)
+ led_faults |= LED_FAULT_SHORT_CIRCUIT;
+
+ if (val & RT4505_OTP_MASK)
+ led_faults |= LED_FAULT_OVER_TEMPERATURE;
+
+ if (val & RT4505_TIMEOUT_MASK)
+ led_faults |= LED_FAULT_TIMEOUT;
+
+ *fault = led_faults;
+ return 0;
+}
+
+static const struct led_flash_ops rt4505_flash_ops = {
+ .flash_brightness_set = rt4505_flash_brightness_set,
+ .strobe_set = rt4505_flash_strobe_set,
+ .strobe_get = rt4505_flash_strobe_get,
+ .timeout_set = rt4505_flash_timeout_set,
+ .fault_get = rt4505_fault_get,
+};
+
+static bool rt4505_is_accessible_reg(struct device *dev, unsigned int reg)
+{
+ if (reg == RT4505_REG_RESET ||
+ (reg >= RT4505_REG_CONFIG && reg <= RT4505_REG_FLAGS))
+ return true;
+ return false;
+}
+
+static const struct regmap_config rt4505_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RT4505_REG_FLAGS,
+
+ .readable_reg = rt4505_is_accessible_reg,
+ .writeable_reg = rt4505_is_accessible_reg,
+};
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+static int rt4505_flash_external_strobe_set(struct v4l2_flash *v4l2_flash,
+ bool enable)
+{
+ struct led_classdev_flash *flash = v4l2_flash->fled_cdev;
+ struct rt4505_priv *priv =
+ container_of(flash, struct rt4505_priv, flash);
+ u32 val = enable ? RT4505_EXT_FLASH_SET : 0;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = regmap_update_bits(priv->regmap, RT4505_REG_ENABLE,
+ RT4505_ENABLE_MASK, val);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static const struct v4l2_flash_ops v4l2_flash_ops = {
+ .external_strobe_set = rt4505_flash_external_strobe_set,
+};
+
+static void rt4505_init_v4l2_config(struct rt4505_priv *priv,
+ struct v4l2_flash_config *config)
+{
+ struct led_classdev_flash *flash = &priv->flash;
+ struct led_classdev *lcdev = &flash->led_cdev;
+ struct led_flash_setting *s;
+
+ strscpy(config->dev_name, lcdev->dev->kobj.name,
+ sizeof(config->dev_name));
+
+ s = &config->intensity;
+ s->min = RT4505_ITORCH_MINUA;
+ s->step = RT4505_ITORCH_STPUA;
+ s->max = s->val = s->min + (lcdev->max_brightness - 1) * s->step;
+
+ config->flash_faults = LED_FAULT_OVER_VOLTAGE |
+ LED_FAULT_SHORT_CIRCUIT |
+ LED_FAULT_LED_OVER_TEMPERATURE |
+ LED_FAULT_TIMEOUT;
+ config->has_external_strobe = 1;
+}
+#else
+static const struct v4l2_flash_ops v4l2_flash_ops;
+static void rt4505_init_v4l2_config(struct rt4505_priv *priv,
+ struct v4l2_flash_config *config)
+{
+}
+#endif
+
+static void rt4505_init_flash_properties(struct rt4505_priv *priv,
+ struct fwnode_handle *child)
+{
+ struct led_classdev_flash *flash = &priv->flash;
+ struct led_classdev *lcdev = &flash->led_cdev;
+ struct led_flash_setting *s;
+ u32 val;
+ int ret;
+
+ ret = fwnode_property_read_u32(child, "led-max-microamp", &val);
+ if (ret) {
+ dev_warn(priv->dev, "led-max-microamp DT property missing\n");
+ val = RT4505_ITORCH_MINUA;
+ } else
+ val = clamp_val(val, RT4505_ITORCH_MINUA, RT4505_ITORCH_MAXUA);
+
+ lcdev->max_brightness =
+ (val - RT4505_ITORCH_MINUA) / RT4505_ITORCH_STPUA + 1;
+ lcdev->brightness_set_blocking = rt4505_torch_brightness_set;
+ lcdev->brightness_get = rt4505_torch_brightness_get;
+ lcdev->flags |= LED_DEV_CAP_FLASH;
+
+ ret = fwnode_property_read_u32(child, "flash-max-microamp", &val);
+ if (ret) {
+ dev_warn(priv->dev, "flash-max-microamp DT property missing\n");
+ val = RT4505_IFLASH_MINUA;
+ } else
+ val = clamp_val(val, RT4505_IFLASH_MINUA, RT4505_IFLASH_MAXUA);
+
+ s = &flash->brightness;
+ s->min = RT4505_IFLASH_MINUA;
+ s->step = RT4505_IFLASH_STPUA;
+ s->max = s->val = val;
+
+ ret = fwnode_property_read_u32(child, "flash-max-timeout-us", &val);
+ if (ret) {
+ dev_warn(priv->dev,
+ "flash-max-timeout-us DT property missing\n");
+ val = RT4505_FLASHTO_MINUS;
+ } else
+ val = clamp_val(val, RT4505_FLASHTO_MINUS,
+ RT4505_FLASHTO_MAXUS);
+
+ s = &flash->timeout;
+ s->min = RT4505_FLASHTO_MINUS;
+ s->step = RT4505_FLASHTO_STPUS;
+ s->max = s->val = val;
+
+ flash->ops = &rt4505_flash_ops;
+}
+
+static int rt4505_probe(struct i2c_client *client)
+{
+ struct rt4505_priv *priv;
+ struct fwnode_handle *child;
+ struct led_init_data init_data = {};
+ struct v4l2_flash_config v4l2_config = {};
+ int ret;
+
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &client->dev;
+ mutex_init(&priv->lock);
+
+ priv->regmap = devm_regmap_init_i2c(client, &rt4505_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(priv->dev, "Failed to allocate register map\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ ret = regmap_write(priv->regmap, RT4505_REG_RESET, RT4505_RESET_MASK);
+ if (ret) {
+ dev_err(priv->dev, "Failed to reset registers\n");
+ return ret;
+ }
+
+ child = fwnode_get_next_available_child_node(client->dev.fwnode, NULL);
+ if (!child) {
+ dev_err(priv->dev, "Failed to get child node\n");
+ return -EINVAL;
+ }
+ init_data.fwnode = child;
+
+ rt4505_init_flash_properties(priv, child);
+ ret = devm_led_classdev_flash_register_ext(priv->dev, &priv->flash,
+ &init_data);
+ if (ret) {
+ dev_err(priv->dev, "Failed to register flash\n");
+ return ret;
+ }
+
+ rt4505_init_v4l2_config(priv, &v4l2_config);
+ priv->v4l2_flash = v4l2_flash_init(priv->dev, init_data.fwnode,
+ &priv->flash, &v4l2_flash_ops,
+ &v4l2_config);
+ if (IS_ERR(priv->v4l2_flash)) {
+ dev_err(priv->dev, "Failed to register v4l2 flash\n");
+ return PTR_ERR(priv->v4l2_flash);
+ }
+
+ i2c_set_clientdata(client, priv);
+ return 0;
+}
+
+static int rt4505_remove(struct i2c_client *client)
+{
+ struct rt4505_priv *priv = i2c_get_clientdata(client);
+
+ v4l2_flash_release(priv->v4l2_flash);
+ return 0;
+}
+
+static void rt4505_shutdown(struct i2c_client *client)
+{
+ struct rt4505_priv *priv = i2c_get_clientdata(client);
+
+ /* Reset registers to make sure all off before shutdown */
+ regmap_write(priv->regmap, RT4505_REG_RESET, RT4505_RESET_MASK);
+}
+
+static const struct of_device_id __maybe_unused rt4505_leds_match[] = {
+ { .compatible = "richtek,rt4505", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rt4505_leds_match);
+
+static struct i2c_driver rt4505_driver = {
+ .driver = {
+ .name = "rt4505",
+ .of_match_table = of_match_ptr(rt4505_leds_match),
+ },
+ .probe_new = rt4505_probe,
+ .remove = rt4505_remove,
+ .shutdown = rt4505_shutdown,
+};
+module_i2c_driver(rt4505_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 8007b82985a8..435309154e6b 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -339,7 +339,7 @@ static int lm3642_probe(struct i2c_client *client,
chip->cdev_flash.max_brightness = 16;
chip->cdev_flash.brightness_set_blocking = lm3642_strobe_brightness_set;
chip->cdev_flash.default_trigger = "flash";
- chip->cdev_flash.groups = lm3642_flash_groups,
+ chip->cdev_flash.groups = lm3642_flash_groups;
err = led_classdev_register(&client->dev, &chip->cdev_flash);
if (err < 0) {
dev_err(chip->dev, "failed to register flash\n");
@@ -351,7 +351,7 @@ static int lm3642_probe(struct i2c_client *client,
chip->cdev_torch.max_brightness = 8;
chip->cdev_torch.brightness_set_blocking = lm3642_torch_brightness_set;
chip->cdev_torch.default_trigger = "torch";
- chip->cdev_torch.groups = lm3642_torch_groups,
+ chip->cdev_torch.groups = lm3642_torch_groups;
err = led_classdev_register(&client->dev, &chip->cdev_torch);
if (err < 0) {
dev_err(chip->dev, "failed to register torch\n");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 27d027165472..017794bb87ae 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -480,6 +480,8 @@ pca9532_of_populate_pdata(struct device *dev, struct device_node *np)
if (!pdata)
return ERR_PTR(-ENOMEM);
+ pdata->gpio_base = -1;
+
of_property_read_u8_array(np, "nxp,pwm", &pdata->pwm[0],
ARRAY_SIZE(pdata->pwm));
of_property_read_u8_array(np, "nxp,psc", &pdata->psc[0],
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index 4d138d5317e9..43a265dc4696 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -333,7 +333,7 @@ static DEVICE_ATTR_RW(hw_pattern);
static umode_t pattern_trig_attrs_mode(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct led_classdev *led_cdev = dev_get_drvdata(dev);
if (attr == &dev_attr_repeat.attr || attr == &dev_attr_pattern.attr)
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 73e6ae88fafd..4bdd4c45e7a7 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -180,14 +180,13 @@ static struct proc_dir_entry *proc_pmu_options;
static int option_server_mode;
int pmu_battery_count;
-int pmu_cur_battery;
+static int pmu_cur_battery;
unsigned int pmu_power_flags = PMU_PWR_AC_PRESENT;
struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES];
static int query_batt_timer = BATTERY_POLLING_COUNT;
static struct adb_request batt_req;
static struct proc_dir_entry *proc_pmu_batt[PMU_MAX_BATTERIES];
-int __fake_sleep;
int asleep;
#ifdef CONFIG_ADB
@@ -1833,6 +1832,7 @@ pmu_present(void)
*/
static u32 save_via[8];
+static int __fake_sleep;
static void
save_via_state(void)
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 77612303841e..07f91ec1f960 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -56,7 +56,7 @@ static BLOCKING_NOTIFIER_HEAD(wf_client_list);
static int wf_client_count;
static unsigned int wf_overtemp;
static unsigned int wf_overtemp_counter;
-struct task_struct *wf_thread;
+static struct task_struct *wf_thread;
static struct platform_device wf_platform_device = {
.name = "windfarm",
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c
index ab467b9c31be..ba1ec6fc11d2 100644
--- a/drivers/macintosh/windfarm_pm121.c
+++ b/drivers/macintosh/windfarm_pm121.c
@@ -433,7 +433,7 @@ struct pm121_sys_state {
struct wf_pid_state pid;
};
-struct pm121_sys_state *pm121_sys_state[N_LOOPS] = {};
+static struct pm121_sys_state *pm121_sys_state[N_LOOPS] = {};
/*
* ****** CPU Fans Control Loop ******
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index 79cb1ad09bfd..75966052819a 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -94,7 +94,7 @@ static int smu_set_fan(int pwm, u8 id, u16 value)
return rc;
wait_for_completion(&comp);
- /* Handle fallback (see coment above) */
+ /* Handle fallback (see comment above) */
if (cmd.status != 0 && smu_supports_new_fans_ops) {
printk(KERN_WARNING "windfarm: SMU failed new fan command "
"falling back to old method\n");
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 541c45027cc8..6ab01ff25747 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3387,7 +3387,7 @@ static bool origin_dev_supports_discard(struct block_device *origin_bdev)
{
struct request_queue *q = bdev_get_queue(origin_bdev);
- return q && blk_queue_discard(q);
+ return blk_queue_discard(q);
}
/*
diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
index 17712456fa63..c43d55672bce 100644
--- a/drivers/md/dm-clone-metadata.c
+++ b/drivers/md/dm-clone-metadata.c
@@ -276,12 +276,6 @@ static inline int superblock_read_lock(struct dm_clone_metadata *cmd,
return dm_bm_read_lock(cmd->bm, SUPERBLOCK_LOCATION, &sb_validator, sblock);
}
-static inline int superblock_write_lock(struct dm_clone_metadata *cmd,
- struct dm_block **sblock)
-{
- return dm_bm_write_lock(cmd->bm, SUPERBLOCK_LOCATION, &sb_validator, sblock);
-}
-
static inline int superblock_write_lock_zero(struct dm_clone_metadata *cmd,
struct dm_block **sblock)
{
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 55bcfb74f51f..71475a2410be 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -28,7 +28,7 @@ struct ebs_c {
spinlock_t lock; /* Guard bios input list above. */
sector_t start; /* <start> table line argument, see ebs_ctr below. */
unsigned int e_bs; /* Emulated block size in sectors exposed to upper layer. */
- unsigned int u_bs; /* Underlying block size in sectors retrievd from/set on lower layer device. */
+ unsigned int u_bs; /* Underlying block size in sectors retrieved from/set on lower layer device. */
unsigned char block_shift; /* bitshift sectors -> blocks used in dm-bufio API. */
bool u_bs_set:1; /* Flag to indicate underlying block size is set on table line. */
};
@@ -43,7 +43,7 @@ static inline sector_t __block_mod(sector_t sector, unsigned int bs)
return sector & (bs - 1);
}
-/* Return number of blocks for a bio, accounting for misalignement of start and end sectors. */
+/* Return number of blocks for a bio, accounting for misalignment of start and end sectors. */
static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio)
{
sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio);
@@ -171,7 +171,7 @@ static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio)
dm_bufio_forget_buffers(ec->bufio, __sector_to_block(ec, sector), blocks);
}
-/* Worker funtion to process incoming bios. */
+/* Worker function to process incoming bios. */
static void __ebs_process_bios(struct work_struct *ws)
{
int r;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 46b5d542b8fe..781942aeddd1 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -35,7 +35,7 @@
#define MIN_LOG2_INTERLEAVE_SECTORS 3
#define MAX_LOG2_INTERLEAVE_SECTORS 31
#define METADATA_WORKQUEUE_MAX_ACTIVE 16
-#define RECALC_SECTORS 8192
+#define RECALC_SECTORS 32768
#define RECALC_WRITE_SUPER 16
#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
#define BITMAP_FLUSH_INTERVAL (10 * HZ)
@@ -262,6 +262,7 @@ struct dm_integrity_c {
bool journal_uptodate;
bool just_formatted;
bool recalculate_flag;
+ bool reset_recalculate_flag;
bool discard;
bool fix_padding;
bool fix_hmac;
@@ -1428,8 +1429,10 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
if (op == TAG_READ) {
memcpy(tag, dp, to_copy);
} else if (op == TAG_WRITE) {
- memcpy(dp, tag, to_copy);
- dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
+ if (memcmp(dp, tag, to_copy)) {
+ memcpy(dp, tag, to_copy);
+ dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
+ }
} else {
/* e.g.: op == TAG_CMP */
@@ -2686,26 +2689,30 @@ next_chunk:
if (unlikely(dm_integrity_failed(ic)))
goto err;
- io_req.bi_op = REQ_OP_READ;
- io_req.bi_op_flags = 0;
- io_req.mem.type = DM_IO_VMA;
- io_req.mem.ptr.addr = ic->recalc_buffer;
- io_req.notify.fn = NULL;
- io_req.client = ic->io;
- io_loc.bdev = ic->dev->bdev;
- io_loc.sector = get_data_sector(ic, area, offset);
- io_loc.count = n_sectors;
+ if (!ic->discard) {
+ io_req.bi_op = REQ_OP_READ;
+ io_req.bi_op_flags = 0;
+ io_req.mem.type = DM_IO_VMA;
+ io_req.mem.ptr.addr = ic->recalc_buffer;
+ io_req.notify.fn = NULL;
+ io_req.client = ic->io;
+ io_loc.bdev = ic->dev->bdev;
+ io_loc.sector = get_data_sector(ic, area, offset);
+ io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
- if (unlikely(r)) {
- dm_integrity_io_error(ic, "reading data", r);
- goto err;
- }
+ r = dm_io(&io_req, 1, &io_loc, NULL);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "reading data", r);
+ goto err;
+ }
- t = ic->recalc_tags;
- for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
- integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
- t += ic->tag_size;
+ t = ic->recalc_tags;
+ for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
+ integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
+ t += ic->tag_size;
+ }
+ } else {
+ t = ic->recalc_tags + (n_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
}
metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
@@ -3134,7 +3141,8 @@ static void dm_integrity_resume(struct dm_target *ti)
rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
if (ic->mode == 'B') {
- if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
+ if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
+ !ic->reset_recalculate_flag) {
block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
@@ -3156,7 +3164,8 @@ static void dm_integrity_resume(struct dm_target *ti)
}
} else {
if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
- block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
+ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) ||
+ ic->reset_recalculate_flag) {
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
ic->sb->recalc_sector = cpu_to_le64(0);
}
@@ -3169,6 +3178,10 @@ static void dm_integrity_resume(struct dm_target *ti)
dm_integrity_io_error(ic, "writing superblock", r);
} else {
replay_journal(ic);
+ if (ic->reset_recalculate_flag) {
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
+ ic->sb->recalc_sector = cpu_to_le64(0);
+ }
if (ic->mode == 'B') {
ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
@@ -3242,6 +3255,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->meta_dev;
arg_count += ic->sectors_per_block != 1;
arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
+ arg_count += ic->reset_recalculate_flag;
arg_count += ic->discard;
arg_count += ic->mode == 'J';
arg_count += ic->mode == 'J';
@@ -3261,6 +3275,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
DMEMIT(" recalculate");
+ if (ic->reset_recalculate_flag)
+ DMEMIT(" reset_recalculate");
if (ic->discard)
DMEMIT(" allow_discards");
DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
@@ -3914,7 +3930,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
unsigned extra_args;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
- {0, 17, "Invalid number of feature args"},
+ {0, 18, "Invalid number of feature args"},
};
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
bool should_write_sb;
@@ -4039,6 +4055,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
r = -EINVAL;
ti->error = "Invalid bitmap_flush_interval argument";
+ goto bad;
}
ic->bitmap_flush_interval = msecs_to_jiffies(val);
} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
@@ -4058,6 +4075,9 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
} else if (!strcmp(opt_string, "recalculate")) {
ic->recalculate_flag = true;
+ } else if (!strcmp(opt_string, "reset_recalculate")) {
+ ic->recalculate_flag = true;
+ ic->reset_recalculate_flag = true;
} else if (!strcmp(opt_string, "allow_discards")) {
ic->discard = true;
} else if (!strcmp(opt_string, "fix_padding")) {
@@ -4348,11 +4368,13 @@ try_smaller_buffer:
goto bad;
}
INIT_WORK(&ic->recalc_work, integrity_recalc);
- ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
- if (!ic->recalc_buffer) {
- ti->error = "Cannot allocate buffer for recalculating";
- r = -ENOMEM;
- goto bad;
+ if (!ic->discard) {
+ ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
+ if (!ic->recalc_buffer) {
+ ti->error = "Cannot allocate buffer for recalculating";
+ r = -ENOMEM;
+ goto bad;
+ }
}
ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
ic->tag_size, GFP_KERNEL);
@@ -4361,6 +4383,9 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
+ if (ic->discard)
+ memset(ic->recalc_tags, DISCARD_FILLER,
+ (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size);
} else {
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
ti->error = "Recalculate can only be specified with internal_hash";
@@ -4554,7 +4579,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 7, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 1ca65b434f1f..2209cbcd84db 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/slab.h>
+#include <linux/rbtree.h>
#include <linux/dm-ioctl.h>
#include <linux/hdreg.h>
#include <linux/compat.h>
@@ -36,8 +37,10 @@ struct dm_file {
* name or uuid.
*---------------------------------------------------------------*/
struct hash_cell {
- struct list_head name_list;
- struct list_head uuid_list;
+ struct rb_node name_node;
+ struct rb_node uuid_node;
+ bool name_set;
+ bool uuid_set;
char *name;
char *uuid;
@@ -53,10 +56,8 @@ struct vers_iter {
};
-#define NUM_BUCKETS 64
-#define MASK_BUCKETS (NUM_BUCKETS - 1)
-static struct list_head _name_buckets[NUM_BUCKETS];
-static struct list_head _uuid_buckets[NUM_BUCKETS];
+static struct rb_root name_rb_tree = RB_ROOT;
+static struct rb_root uuid_rb_tree = RB_ROOT;
static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred);
@@ -70,73 +71,110 @@ static DECLARE_RWSEM(_hash_lock);
*/
static DEFINE_MUTEX(dm_hash_cells_mutex);
-static void init_buckets(struct list_head *buckets)
-{
- unsigned int i;
-
- for (i = 0; i < NUM_BUCKETS; i++)
- INIT_LIST_HEAD(buckets + i);
-}
-
-static int dm_hash_init(void)
-{
- init_buckets(_name_buckets);
- init_buckets(_uuid_buckets);
- return 0;
-}
-
static void dm_hash_exit(void)
{
dm_hash_remove_all(false, false, false);
}
/*-----------------------------------------------------------------
- * Hash function:
- * We're not really concerned with the str hash function being
- * fast since it's only used by the ioctl interface.
- *---------------------------------------------------------------*/
-static unsigned int hash_str(const char *str)
-{
- const unsigned int hash_mult = 2654435387U;
- unsigned int h = 0;
-
- while (*str)
- h = (h + (unsigned int) *str++) * hash_mult;
-
- return h & MASK_BUCKETS;
-}
-
-/*-----------------------------------------------------------------
* Code for looking up a device by name
*---------------------------------------------------------------*/
static struct hash_cell *__get_name_cell(const char *str)
{
- struct hash_cell *hc;
- unsigned int h = hash_str(str);
+ struct rb_node *n = name_rb_tree.rb_node;
- list_for_each_entry (hc, _name_buckets + h, name_list)
- if (!strcmp(hc->name, str)) {
+ while (n) {
+ struct hash_cell *hc = container_of(n, struct hash_cell, name_node);
+ int c = strcmp(hc->name, str);
+ if (!c) {
dm_get(hc->md);
return hc;
}
+ n = c >= 0 ? n->rb_left : n->rb_right;
+ }
return NULL;
}
static struct hash_cell *__get_uuid_cell(const char *str)
{
- struct hash_cell *hc;
- unsigned int h = hash_str(str);
+ struct rb_node *n = uuid_rb_tree.rb_node;
- list_for_each_entry (hc, _uuid_buckets + h, uuid_list)
- if (!strcmp(hc->uuid, str)) {
+ while (n) {
+ struct hash_cell *hc = container_of(n, struct hash_cell, uuid_node);
+ int c = strcmp(hc->uuid, str);
+ if (!c) {
dm_get(hc->md);
return hc;
}
+ n = c >= 0 ? n->rb_left : n->rb_right;
+ }
return NULL;
}
+static void __unlink_name(struct hash_cell *hc)
+{
+ if (hc->name_set) {
+ hc->name_set = false;
+ rb_erase(&hc->name_node, &name_rb_tree);
+ }
+}
+
+static void __unlink_uuid(struct hash_cell *hc)
+{
+ if (hc->uuid_set) {
+ hc->uuid_set = false;
+ rb_erase(&hc->uuid_node, &uuid_rb_tree);
+ }
+}
+
+static void __link_name(struct hash_cell *new_hc)
+{
+ struct rb_node **n, *parent;
+
+ __unlink_name(new_hc);
+
+ new_hc->name_set = true;
+
+ n = &name_rb_tree.rb_node;
+ parent = NULL;
+
+ while (*n) {
+ struct hash_cell *hc = container_of(*n, struct hash_cell, name_node);
+ int c = strcmp(hc->name, new_hc->name);
+ BUG_ON(!c);
+ parent = *n;
+ n = c >= 0 ? &hc->name_node.rb_left : &hc->name_node.rb_right;
+ }
+
+ rb_link_node(&new_hc->name_node, parent, n);
+ rb_insert_color(&new_hc->name_node, &name_rb_tree);
+}
+
+static void __link_uuid(struct hash_cell *new_hc)
+{
+ struct rb_node **n, *parent;
+
+ __unlink_uuid(new_hc);
+
+ new_hc->uuid_set = true;
+
+ n = &uuid_rb_tree.rb_node;
+ parent = NULL;
+
+ while (*n) {
+ struct hash_cell *hc = container_of(*n, struct hash_cell, uuid_node);
+ int c = strcmp(hc->uuid, new_hc->uuid);
+ BUG_ON(!c);
+ parent = *n;
+ n = c > 0 ? &hc->uuid_node.rb_left : &hc->uuid_node.rb_right;
+ }
+
+ rb_link_node(&new_hc->uuid_node, parent, n);
+ rb_insert_color(&new_hc->uuid_node, &uuid_rb_tree);
+}
+
static struct hash_cell *__get_dev_cell(uint64_t dev)
{
struct mapped_device *md;
@@ -185,8 +223,7 @@ static struct hash_cell *alloc_cell(const char *name, const char *uuid,
}
}
- INIT_LIST_HEAD(&hc->name_list);
- INIT_LIST_HEAD(&hc->uuid_list);
+ hc->name_set = hc->uuid_set = false;
hc->md = md;
hc->new_map = NULL;
return hc;
@@ -226,16 +263,16 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi
goto bad;
}
- list_add(&cell->name_list, _name_buckets + hash_str(name));
+ __link_name(cell);
if (uuid) {
hc = __get_uuid_cell(uuid);
if (hc) {
- list_del(&cell->name_list);
+ __unlink_name(cell);
dm_put(hc->md);
goto bad;
}
- list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
+ __link_uuid(cell);
}
dm_get(md);
mutex_lock(&dm_hash_cells_mutex);
@@ -256,9 +293,9 @@ static struct dm_table *__hash_remove(struct hash_cell *hc)
struct dm_table *table;
int srcu_idx;
- /* remove from the dev hash */
- list_del(&hc->uuid_list);
- list_del(&hc->name_list);
+ /* remove from the dev trees */
+ __unlink_name(hc);
+ __unlink_uuid(hc);
mutex_lock(&dm_hash_cells_mutex);
dm_set_mdptr(hc->md, NULL);
mutex_unlock(&dm_hash_cells_mutex);
@@ -279,7 +316,8 @@ static struct dm_table *__hash_remove(struct hash_cell *hc)
static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred)
{
- int i, dev_skipped;
+ int dev_skipped;
+ struct rb_node *n;
struct hash_cell *hc;
struct mapped_device *md;
struct dm_table *t;
@@ -289,40 +327,39 @@ retry:
down_write(&_hash_lock);
- for (i = 0; i < NUM_BUCKETS; i++) {
- list_for_each_entry(hc, _name_buckets + i, name_list) {
- md = hc->md;
- dm_get(md);
+ for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
+ hc = container_of(n, struct hash_cell, name_node);
+ md = hc->md;
+ dm_get(md);
- if (keep_open_devices &&
- dm_lock_for_deletion(md, mark_deferred, only_deferred)) {
- dm_put(md);
- dev_skipped++;
- continue;
- }
+ if (keep_open_devices &&
+ dm_lock_for_deletion(md, mark_deferred, only_deferred)) {
+ dm_put(md);
+ dev_skipped++;
+ continue;
+ }
- t = __hash_remove(hc);
+ t = __hash_remove(hc);
- up_write(&_hash_lock);
+ up_write(&_hash_lock);
- if (t) {
- dm_sync_table(md);
- dm_table_destroy(t);
- }
- dm_put(md);
- if (likely(keep_open_devices))
- dm_destroy(md);
- else
- dm_destroy_immediate(md);
-
- /*
- * Some mapped devices may be using other mapped
- * devices, so repeat until we make no further
- * progress. If a new mapped device is created
- * here it will also get removed.
- */
- goto retry;
+ if (t) {
+ dm_sync_table(md);
+ dm_table_destroy(t);
}
+ dm_put(md);
+ if (likely(keep_open_devices))
+ dm_destroy(md);
+ else
+ dm_destroy_immediate(md);
+
+ /*
+ * Some mapped devices may be using other mapped
+ * devices, so repeat until we make no further
+ * progress. If a new mapped device is created
+ * here it will also get removed.
+ */
+ goto retry;
}
up_write(&_hash_lock);
@@ -340,7 +377,7 @@ static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid)
hc->uuid = new_uuid;
mutex_unlock(&dm_hash_cells_mutex);
- list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid));
+ __link_uuid(hc);
}
/*
@@ -354,14 +391,14 @@ static char *__change_cell_name(struct hash_cell *hc, char *new_name)
/*
* Rename and move the name cell.
*/
- list_del(&hc->name_list);
+ __unlink_name(hc);
old_name = hc->name;
mutex_lock(&dm_hash_cells_mutex);
hc->name = new_name;
mutex_unlock(&dm_hash_cells_mutex);
- list_add(&hc->name_list, _name_buckets + hash_str(new_name));
+ __link_name(hc);
return old_name;
}
@@ -503,9 +540,33 @@ static void *get_result_buffer(struct dm_ioctl *param, size_t param_size,
return ((void *) param) + param->data_start;
}
+static bool filter_device(struct hash_cell *hc, const char *pfx_name, const char *pfx_uuid)
+{
+ const char *val;
+ size_t val_len, pfx_len;
+
+ val = hc->name;
+ val_len = strlen(val);
+ pfx_len = strnlen(pfx_name, DM_NAME_LEN);
+ if (pfx_len > val_len)
+ return false;
+ if (memcmp(val, pfx_name, pfx_len))
+ return false;
+
+ val = hc->uuid ? hc->uuid : "";
+ val_len = strlen(val);
+ pfx_len = strnlen(pfx_uuid, DM_UUID_LEN);
+ if (pfx_len > val_len)
+ return false;
+ if (memcmp(val, pfx_uuid, pfx_len))
+ return false;
+
+ return true;
+}
+
static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
- unsigned int i;
+ struct rb_node *n;
struct hash_cell *hc;
size_t len, needed = 0;
struct gendisk *disk;
@@ -518,11 +579,14 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
* Loop through all the devices working out how much
* space we need.
*/
- for (i = 0; i < NUM_BUCKETS; i++) {
- list_for_each_entry (hc, _name_buckets + i, name_list) {
- needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
- needed += align_val(sizeof(uint32_t));
- }
+ for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
+ hc = container_of(n, struct hash_cell, name_node);
+ if (!filter_device(hc, param->name, param->uuid))
+ continue;
+ needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
+ needed += align_val(sizeof(uint32_t) * 2);
+ if (param->flags & DM_UUID_FLAG && hc->uuid)
+ needed += align_val(strlen(hc->uuid) + 1);
}
/*
@@ -540,21 +604,34 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
/*
* Now loop through filling out the names.
*/
- for (i = 0; i < NUM_BUCKETS; i++) {
- list_for_each_entry (hc, _name_buckets + i, name_list) {
- if (old_nl)
- old_nl->next = (uint32_t) ((void *) nl -
- (void *) old_nl);
- disk = dm_disk(hc->md);
- nl->dev = huge_encode_dev(disk_devt(disk));
- nl->next = 0;
- strcpy(nl->name, hc->name);
-
- old_nl = nl;
- event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
- *event_nr = dm_get_event_nr(hc->md);
- nl = align_ptr(event_nr + 1);
+ for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
+ void *uuid_ptr;
+ hc = container_of(n, struct hash_cell, name_node);
+ if (!filter_device(hc, param->name, param->uuid))
+ continue;
+ if (old_nl)
+ old_nl->next = (uint32_t) ((void *) nl -
+ (void *) old_nl);
+ disk = dm_disk(hc->md);
+ nl->dev = huge_encode_dev(disk_devt(disk));
+ nl->next = 0;
+ strcpy(nl->name, hc->name);
+
+ old_nl = nl;
+ event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
+ event_nr[0] = dm_get_event_nr(hc->md);
+ event_nr[1] = 0;
+ uuid_ptr = align_ptr(event_nr + 2);
+ if (param->flags & DM_UUID_FLAG) {
+ if (hc->uuid) {
+ event_nr[1] |= DM_NAME_LIST_FLAG_HAS_UUID;
+ strcpy(uuid_ptr, hc->uuid);
+ uuid_ptr = align_ptr(uuid_ptr + strlen(hc->uuid) + 1);
+ } else {
+ event_nr[1] |= DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID;
+ }
}
+ nl = uuid_ptr;
}
/*
* If mismatch happens, security may be compromised due to buffer
@@ -1991,14 +2068,9 @@ int __init dm_interface_init(void)
{
int r;
- r = dm_hash_init();
- if (r)
- return r;
-
r = misc_register(&_dm_misc);
if (r) {
DMERR("misc_register failed for control device");
- dm_hash_exit();
return r;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index cab12b2251ba..bf4a467fc73a 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1853,6 +1853,7 @@ static int rs_check_takeover(struct raid_set *rs)
((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
__within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
return 0;
+ break;
default:
break;
@@ -1868,6 +1869,14 @@ static bool rs_takeover_requested(struct raid_set *rs)
return rs->md.new_level != rs->md.level;
}
+/* True if layout is set to reshape. */
+static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
+{
+ return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
+ rs->md.new_layout != rs->md.layout ||
+ rs->md.new_chunk_sectors != rs->md.chunk_sectors;
+}
+
/* True if @rs is requested to reshape by ctr */
static bool rs_reshape_requested(struct raid_set *rs)
{
@@ -1880,9 +1889,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
if (rs_is_raid0(rs))
return false;
- change = mddev->new_layout != mddev->layout ||
- mddev->new_chunk_sectors != mddev->chunk_sectors ||
- rs->delta_disks;
+ change = rs_is_layout_change(rs, false);
/* Historical case to support raid1 reshape without delta disks */
if (rs_is_raid1(rs)) {
@@ -2817,7 +2824,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs)
}
/*
- *
+ * Reshape:
* - change raid layout
* - change chunk size
* - add disks
@@ -2927,6 +2934,20 @@ static int rs_setup_reshape(struct raid_set *rs)
}
/*
+ * If the md resync thread has updated superblock with max reshape position
+ * at the end of a reshape but not (yet) reset the layout configuration
+ * changes -> reset the latter.
+ */
+static void rs_reset_inconclusive_reshape(struct raid_set *rs)
+{
+ if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
+ rs_set_cur(rs);
+ rs->md.delta_disks = 0;
+ rs->md.reshape_backwards = 0;
+ }
+}
+
+/*
* Enable/disable discard support on RAID set depending on
* RAID level and discard properties of underlying RAID members.
*/
@@ -3212,11 +3233,14 @@ size_check:
if (r)
goto bad;
+ /* Catch any inconclusive reshape superblock content. */
+ rs_reset_inconclusive_reshape(rs);
+
/* Start raid set read-only and assumed clean to change in raid_resume() */
rs->md.ro = 1;
rs->md.in_sync = 1;
- /* Keep array frozen */
+ /* Keep array frozen until resume. */
set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
/* Has to be held on running the array */
@@ -3230,7 +3254,6 @@ size_check:
}
r = md_start(&rs->md);
-
if (r) {
ti->error = "Failed to start raid array";
mddev_unlock(&rs->md);
@@ -3727,15 +3750,6 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, chunk_size_bytes);
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
-
- /*
- * RAID0 and RAID10 personalities require bio splitting,
- * RAID1/4/5/6 don't and process large discard bios properly.
- */
- if (rs_is_raid0(rs) || rs_is_raid10(rs)) {
- limits->discard_granularity = chunk_size_bytes;
- limits->max_discard_sectors = rs->md.chunk_sectors;
- }
}
static void raid_postsuspend(struct dm_target *ti)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 13b4385f4d5a..9c3bc3711b33 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -569,6 +569,7 @@ out_tag_set:
blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
kfree(md->tag_set);
+ md->tag_set = NULL;
return err;
}
@@ -578,6 +579,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
if (md->tag_set) {
blk_mq_free_tag_set(md->tag_set);
kfree(md->tag_set);
+ md->tag_set = NULL;
}
}
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 8e329c3f3a78..9ab4bf651ca9 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -596,7 +596,7 @@ static void persistent_dtr(struct dm_exception_store *store)
free_area(ps);
/* Allocated in persistent_read_metadata */
- vfree(ps->callbacks);
+ kvfree(ps->callbacks);
kfree(ps);
}
@@ -621,8 +621,8 @@ static int persistent_read_metadata(struct dm_exception_store *store,
*/
ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
sizeof(struct disk_exception);
- ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
- sizeof(*ps->callbacks));
+ ps->callbacks = kvcalloc(ps->exceptions_per_area,
+ sizeof(*ps->callbacks), GFP_KERNEL);
if (!ps->callbacks)
return -ENOMEM;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 11890db71f3f..a2acb014c13a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -663,7 +663,8 @@ static int dm_exception_table_init(struct dm_exception_table *et,
et->hash_shift = hash_shift;
et->hash_mask = size - 1;
- et->table = dm_vcalloc(size, sizeof(struct hlist_bl_head));
+ et->table = kvmalloc_array(size, sizeof(struct hlist_bl_head),
+ GFP_KERNEL);
if (!et->table)
return -ENOMEM;
@@ -689,7 +690,7 @@ static void dm_exception_table_exit(struct dm_exception_table *et,
kmem_cache_free(mem, ex);
}
- vfree(et->table);
+ kvfree(et->table);
}
static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index e5f0f1703c5d..ee47a332b462 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -94,24 +94,6 @@ static int setup_btree_index(unsigned int l, struct dm_table *t)
return 0;
}
-void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
-{
- unsigned long size;
- void *addr;
-
- /*
- * Check that we're not going to overflow.
- */
- if (nmemb > (ULONG_MAX / elem_size))
- return NULL;
-
- size = nmemb * elem_size;
- addr = vzalloc(size);
-
- return addr;
-}
-EXPORT_SYMBOL(dm_vcalloc);
-
/*
* highs, and targets are managed as dynamic arrays during a
* table load.
@@ -124,15 +106,15 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
/*
* Allocate both the target array and offset array at once.
*/
- n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
- sizeof(sector_t));
+ n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t),
+ GFP_KERNEL);
if (!n_highs)
return -ENOMEM;
n_targets = (struct dm_target *) (n_highs + num);
memset(n_highs, -1, sizeof(*n_highs) * num);
- vfree(t->highs);
+ kvfree(t->highs);
t->num_allocated = num;
t->highs = n_highs;
@@ -198,7 +180,7 @@ void dm_table_destroy(struct dm_table *t)
/* free the indexes */
if (t->depth >= 2)
- vfree(t->index[t->depth - 2]);
+ kvfree(t->index[t->depth - 2]);
/* free the targets */
for (i = 0; i < t->num_targets; i++) {
@@ -210,7 +192,7 @@ void dm_table_destroy(struct dm_table *t)
dm_put_target_type(tgt->type);
}
- vfree(t->highs);
+ kvfree(t->highs);
/* free the device list */
free_devices(&t->devices, t->md);
@@ -1077,7 +1059,7 @@ static int setup_indexes(struct dm_table *t)
total += t->counts[i];
}
- indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
+ indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL);
if (!indexes)
return -ENOMEM;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index fff4c50df74d..985baee3a678 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2816,7 +2816,7 @@ static bool data_dev_supports_discard(struct pool_c *pt)
{
struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
- return q && blk_queue_discard(q);
+ return blk_queue_discard(q);
}
static bool is_factor(sector_t block_size, uint32_t n)
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 808a98ef624c..d3e76aefc1a6 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -893,6 +893,28 @@ out:
return r;
}
+static inline bool verity_is_verity_mode(const char *arg_name)
+{
+ return (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING) ||
+ !strcasecmp(arg_name, DM_VERITY_OPT_RESTART) ||
+ !strcasecmp(arg_name, DM_VERITY_OPT_PANIC));
+}
+
+static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name)
+{
+ if (v->mode)
+ return -EINVAL;
+
+ if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING))
+ v->mode = DM_VERITY_MODE_LOGGING;
+ else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART))
+ v->mode = DM_VERITY_MODE_RESTART;
+ else if (!strcasecmp(arg_name, DM_VERITY_OPT_PANIC))
+ v->mode = DM_VERITY_MODE_PANIC;
+
+ return 0;
+}
+
static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
struct dm_verity_sig_opts *verify_args)
{
@@ -916,16 +938,12 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
arg_name = dm_shift_arg(as);
argc--;
- if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING)) {
- v->mode = DM_VERITY_MODE_LOGGING;
- continue;
-
- } else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART)) {
- v->mode = DM_VERITY_MODE_RESTART;
- continue;
-
- } else if (!strcasecmp(arg_name, DM_VERITY_OPT_PANIC)) {
- v->mode = DM_VERITY_MODE_PANIC;
+ if (verity_is_verity_mode(arg_name)) {
+ r = verity_parse_verity_mode(v, arg_name);
+ if (r) {
+ ti->error = "Conflicting error handling parameters";
+ return r;
+ }
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
@@ -1242,7 +1260,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 7, 0},
+ .version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 4f72b6f66c3a..aecc246ade26 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -73,7 +73,7 @@ struct wc_memory_superblock {
};
__le64 padding[8];
};
- struct wc_memory_entry entries[0];
+ struct wc_memory_entry entries[];
};
struct wc_entry {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3f3be9408afa..ca2aedd8ee7d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -840,7 +840,6 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
*result = &td->dm_dev;
return 0;
}
-EXPORT_SYMBOL_GPL(dm_get_table_device);
void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
{
@@ -854,7 +853,6 @@ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
}
mutex_unlock(&md->table_devices_lock);
}
-EXPORT_SYMBOL(dm_put_table_device);
static void free_table_devices(struct list_head *devices)
{
@@ -1641,38 +1639,35 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
} else {
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
- while (ci.sector_count && !error) {
- error = __split_and_process_non_flush(&ci);
- if (ci.sector_count && !error) {
- /*
- * Remainder must be passed to submit_bio_noacct()
- * so that it gets handled *after* bios already submitted
- * have been completely processed.
- * We take a clone of the original to store in
- * ci.io->orig_bio to be used by end_io_acct() and
- * for dec_pending to use for completion handling.
- */
- struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
- GFP_NOIO, &md->queue->bio_split);
- ci.io->orig_bio = b;
-
- /*
- * Adjust IO stats for each split, otherwise upon queue
- * reentry there will be redundant IO accounting.
- * NOTE: this is a stop-gap fix, a proper fix involves
- * significant refactoring of DM core's bio splitting
- * (by eliminating DM's splitting and just using bio_split)
- */
- part_stat_lock();
- __dm_part_stat_sub(dm_disk(md)->part0,
- sectors[op_stat_group(bio_op(bio))], ci.sector_count);
- part_stat_unlock();
-
- bio_chain(b, bio);
- trace_block_split(b, bio->bi_iter.bi_sector);
- ret = submit_bio_noacct(bio);
- break;
- }
+ error = __split_and_process_non_flush(&ci);
+ if (ci.sector_count && !error) {
+ /*
+ * Remainder must be passed to submit_bio_noacct()
+ * so that it gets handled *after* bios already submitted
+ * have been completely processed.
+ * We take a clone of the original to store in
+ * ci.io->orig_bio to be used by end_io_acct() and
+ * for dec_pending to use for completion handling.
+ */
+ struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
+ GFP_NOIO, &md->queue->bio_split);
+ ci.io->orig_bio = b;
+
+ /*
+ * Adjust IO stats for each split, otherwise upon queue
+ * reentry there will be redundant IO accounting.
+ * NOTE: this is a stop-gap fix, a proper fix involves
+ * significant refactoring of DM core's bio splitting
+ * (by eliminating DM's splitting and just using bio_split)
+ */
+ part_stat_lock();
+ __dm_part_stat_sub(dm_disk(md)->part0,
+ sectors[op_stat_group(bio_op(bio))], ci.sector_count);
+ part_stat_unlock();
+
+ bio_chain(b, bio);
+ trace_block_split(b, bio->bi_iter.bi_sector);
+ ret = submit_bio_noacct(bio);
}
}
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index fe073d92f01e..b1788853a355 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -34,12 +34,12 @@ struct node_header {
__le32 max_entries;
__le32 value_size;
__le32 padding;
-} __packed;
+} __attribute__((packed, aligned(8)));
struct btree_node {
struct node_header header;
__le64 keys[];
-} __packed;
+} __attribute__((packed, aligned(8)));
/*
@@ -83,7 +83,7 @@ struct shadow_spine {
};
void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info);
-int exit_shadow_spine(struct shadow_spine *s);
+void exit_shadow_spine(struct shadow_spine *s);
int shadow_step(struct shadow_spine *s, dm_block_t b,
struct dm_btree_value_type *vt);
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index 8a2bfbfb218b..2061ab865567 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -30,8 +30,6 @@ static void node_prepare_for_write(struct dm_block_validator *v,
h->csum = cpu_to_le32(dm_bm_checksum(&h->flags,
block_size - sizeof(__le32),
BTREE_CSUM_XOR));
-
- BUG_ON(node_check(v, b, 4096));
}
static int node_check(struct dm_block_validator *v,
@@ -183,15 +181,13 @@ void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info)
s->count = 0;
}
-int exit_shadow_spine(struct shadow_spine *s)
+void exit_shadow_spine(struct shadow_spine *s)
{
- int r = 0, i;
+ int i;
for (i = 0; i < s->count; i++) {
unlock_block(s->info, s->nodes[i]);
}
-
- return r;
}
int shadow_step(struct shadow_spine *s, dm_block_t b,
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index d8b4125e338c..a213bf11738f 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -339,6 +339,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
*/
begin = do_div(index_begin, ll->entries_per_block);
end = do_div(end, ll->entries_per_block);
+ if (end == 0)
+ end = ll->entries_per_block;
for (i = index_begin; i < index_end; i++, begin = 0) {
struct dm_block *blk;
diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
index 8de63ce39bdd..87e17909ef52 100644
--- a/drivers/md/persistent-data/dm-space-map-common.h
+++ b/drivers/md/persistent-data/dm-space-map-common.h
@@ -33,7 +33,7 @@ struct disk_index_entry {
__le64 blocknr;
__le32 nr_free;
__le32 none_free_before;
-} __packed;
+} __attribute__ ((packed, aligned(8)));
#define MAX_METADATA_BITMAPS 255
@@ -43,7 +43,7 @@ struct disk_metadata_index {
__le64 blocknr;
struct disk_index_entry index[MAX_METADATA_BITMAPS];
-} __packed;
+} __attribute__ ((packed, aligned(8)));
struct ll_disk;
@@ -86,7 +86,7 @@ struct disk_sm_root {
__le64 nr_allocated;
__le64 bitmap_root;
__le64 ref_count_root;
-} __packed;
+} __attribute__ ((packed, aligned(8)));
#define ENTRIES_PER_BYTE 4
@@ -94,7 +94,7 @@ struct disk_bitmap_header {
__le32 csum;
__le32 not_used;
__le64 blocknr;
-} __packed;
+} __attribute__ ((packed, aligned(8)));
enum allocation_event {
SM_NONE,
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index bf4c5e2ccb6f..61f56909e00b 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -187,13 +187,8 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
static int sm_disk_commit(struct dm_space_map *sm)
{
int r;
- dm_block_t nr_free;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
- r = sm_disk_get_nr_free(sm, &nr_free);
- if (r)
- return r;
-
r = sm_ll_commit(&smd->ll);
if (r)
return r;
@@ -202,10 +197,6 @@ static int sm_disk_commit(struct dm_space_map *sm)
smd->begin = 0;
smd->nr_allocated_this_transaction = 0;
- r = sm_disk_get_nr_free(sm, &nr_free);
- if (r)
- return r;
-
return 0;
}
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index f2f565281e63..a777b389a66e 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -6,11 +6,14 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
@@ -1096,6 +1099,29 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return data[0];
}
+static inline enum dma_data_direction uvc_stream_dir(
+ struct uvc_streaming *stream)
+{
+ if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return DMA_FROM_DEVICE;
+ else
+ return DMA_TO_DEVICE;
+}
+
+static inline struct device *uvc_stream_to_dmadev(struct uvc_streaming *stream)
+{
+ return bus_to_hcd(stream->dev->udev->bus)->self.sysdev;
+}
+
+static int uvc_submit_urb(struct uvc_urb *uvc_urb, gfp_t mem_flags)
+{
+ /* Sync DMA. */
+ dma_sync_sgtable_for_device(uvc_stream_to_dmadev(uvc_urb->stream),
+ uvc_urb->sgt,
+ uvc_stream_dir(uvc_urb->stream));
+ return usb_submit_urb(uvc_urb->urb, mem_flags);
+}
+
/*
* uvc_video_decode_data_work: Asynchronous memcpy processing
*
@@ -1117,7 +1143,7 @@ static void uvc_video_copy_data_work(struct work_struct *work)
uvc_queue_buffer_release(op->buf);
}
- ret = usb_submit_urb(uvc_urb->urb, GFP_KERNEL);
+ ret = uvc_submit_urb(uvc_urb, GFP_KERNEL);
if (ret < 0)
dev_err(&uvc_urb->stream->intf->dev,
"Failed to resubmit video URB (%d).\n", ret);
@@ -1537,6 +1563,12 @@ static void uvc_video_complete(struct urb *urb)
/* Re-initialise the URB async work. */
uvc_urb->async_operations = 0;
+ /* Sync DMA and invalidate vmap range. */
+ dma_sync_sgtable_for_cpu(uvc_stream_to_dmadev(uvc_urb->stream),
+ uvc_urb->sgt, uvc_stream_dir(stream));
+ invalidate_kernel_vmap_range(uvc_urb->buffer,
+ uvc_urb->stream->urb_size);
+
/*
* Process the URB headers, and optionally queue expensive memcpy tasks
* to be deferred to a work queue.
@@ -1545,7 +1577,7 @@ static void uvc_video_complete(struct urb *urb)
/* If no async work is needed, resubmit the URB immediately. */
if (!uvc_urb->async_operations) {
- ret = usb_submit_urb(uvc_urb->urb, GFP_ATOMIC);
+ ret = uvc_submit_urb(uvc_urb, GFP_ATOMIC);
if (ret < 0)
dev_err(&stream->intf->dev,
"Failed to resubmit video URB (%d).\n", ret);
@@ -1560,24 +1592,49 @@ static void uvc_video_complete(struct urb *urb)
*/
static void uvc_free_urb_buffers(struct uvc_streaming *stream)
{
+ struct device *dma_dev = uvc_stream_to_dmadev(stream);
struct uvc_urb *uvc_urb;
for_each_uvc_urb(uvc_urb, stream) {
if (!uvc_urb->buffer)
continue;
-#ifndef CONFIG_DMA_NONCOHERENT
- usb_free_coherent(stream->dev->udev, stream->urb_size,
- uvc_urb->buffer, uvc_urb->dma);
-#else
- kfree(uvc_urb->buffer);
-#endif
+ dma_vunmap_noncontiguous(dma_dev, uvc_urb->buffer);
+ dma_free_noncontiguous(dma_dev, stream->urb_size, uvc_urb->sgt,
+ uvc_stream_dir(stream));
+
uvc_urb->buffer = NULL;
+ uvc_urb->sgt = NULL;
}
stream->urb_size = 0;
}
+static bool uvc_alloc_urb_buffer(struct uvc_streaming *stream,
+ struct uvc_urb *uvc_urb, gfp_t gfp_flags)
+{
+ struct device *dma_dev = uvc_stream_to_dmadev(stream);
+
+ uvc_urb->sgt = dma_alloc_noncontiguous(dma_dev, stream->urb_size,
+ uvc_stream_dir(stream),
+ gfp_flags, 0);
+ if (!uvc_urb->sgt)
+ return false;
+ uvc_urb->dma = uvc_urb->sgt->sgl->dma_address;
+
+ uvc_urb->buffer = dma_vmap_noncontiguous(dma_dev, stream->urb_size,
+ uvc_urb->sgt);
+ if (!uvc_urb->buffer) {
+ dma_free_noncontiguous(dma_dev, stream->urb_size,
+ uvc_urb->sgt,
+ uvc_stream_dir(stream));
+ uvc_urb->sgt = NULL;
+ return false;
+ }
+
+ return true;
+}
+
/*
* Allocate transfer buffers. This function can be called with buffers
* already allocated when resuming from suspend, in which case it will
@@ -1608,19 +1665,12 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
/* Retry allocations until one succeed. */
for (; npackets > 1; npackets /= 2) {
+ stream->urb_size = psize * npackets;
+
for (i = 0; i < UVC_URBS; ++i) {
struct uvc_urb *uvc_urb = &stream->uvc_urb[i];
- stream->urb_size = psize * npackets;
-#ifndef CONFIG_DMA_NONCOHERENT
- uvc_urb->buffer = usb_alloc_coherent(
- stream->dev->udev, stream->urb_size,
- gfp_flags | __GFP_NOWARN, &uvc_urb->dma);
-#else
- uvc_urb->buffer =
- kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN);
-#endif
- if (!uvc_urb->buffer) {
+ if (!uvc_alloc_urb_buffer(stream, uvc_urb, gfp_flags)) {
uvc_free_urb_buffers(stream);
break;
}
@@ -1730,12 +1780,8 @@ static int uvc_init_video_isoc(struct uvc_streaming *stream,
urb->context = uvc_urb;
urb->pipe = usb_rcvisocpipe(stream->dev->udev,
ep->desc.bEndpointAddress);
-#ifndef CONFIG_DMA_NONCOHERENT
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = uvc_urb->dma;
-#else
- urb->transfer_flags = URB_ISO_ASAP;
-#endif
urb->interval = ep->desc.bInterval;
urb->transfer_buffer = uvc_urb->buffer;
urb->complete = uvc_video_complete;
@@ -1795,10 +1841,8 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
usb_fill_bulk_urb(urb, stream->dev->udev, pipe, uvc_urb->buffer,
size, uvc_video_complete, uvc_urb);
-#ifndef CONFIG_DMA_NONCOHERENT
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = uvc_urb->dma;
-#endif
uvc_urb->urb = urb;
}
@@ -1895,7 +1939,7 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
/* Submit the URBs. */
for_each_uvc_urb(uvc_urb, stream) {
- ret = usb_submit_urb(uvc_urb->urb, gfp_flags);
+ ret = uvc_submit_urb(uvc_urb, gfp_flags);
if (ret < 0) {
dev_err(&stream->intf->dev,
"Failed to submit URB %u (%d).\n",
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 97df5ecd66c9..cce5e38133cd 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -219,6 +219,7 @@
*/
struct gpio_desc;
+struct sg_table;
struct uvc_device;
/* TODO: Put the most frequently accessed fields at the beginning of
@@ -545,7 +546,8 @@ struct uvc_copy_op {
* @urb: the URB described by this context structure
* @stream: UVC streaming context
* @buffer: memory storage for the URB
- * @dma: DMA coherent addressing for the urb_buffer
+ * @dma: Allocated DMA handle
+ * @sgt: sgt_table with the urb locations in memory
* @async_operations: counter to indicate the number of copy operations
* @copy_operations: work descriptors for asynchronous copy operations
* @work: work queue entry for asynchronous decode
@@ -556,6 +558,7 @@ struct uvc_urb {
char *buffer;
dma_addr_t dma;
+ struct sg_table *sgt;
unsigned int async_operations;
struct uvc_copy_op copy_operations[UVC_MAX_PACKETS];
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index 94843e0e51c6..bae18ef03dcb 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -385,6 +385,33 @@ static void uacce_release(struct device *dev)
kfree(uacce);
}
+static unsigned int uacce_enable_sva(struct device *parent, unsigned int flags)
+{
+ if (!(flags & UACCE_DEV_SVA))
+ return flags;
+
+ flags &= ~UACCE_DEV_SVA;
+
+ if (iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_IOPF))
+ return flags;
+
+ if (iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA)) {
+ iommu_dev_disable_feature(parent, IOMMU_DEV_FEAT_IOPF);
+ return flags;
+ }
+
+ return flags | UACCE_DEV_SVA;
+}
+
+static void uacce_disable_sva(struct uacce_device *uacce)
+{
+ if (!(uacce->flags & UACCE_DEV_SVA))
+ return;
+
+ iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
+ iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_IOPF);
+}
+
/**
* uacce_alloc() - alloc an accelerator
* @parent: pointer of uacce parent device
@@ -404,11 +431,7 @@ struct uacce_device *uacce_alloc(struct device *parent,
if (!uacce)
return ERR_PTR(-ENOMEM);
- if (flags & UACCE_DEV_SVA) {
- ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
- if (ret)
- flags &= ~UACCE_DEV_SVA;
- }
+ flags = uacce_enable_sva(parent, flags);
uacce->parent = parent;
uacce->flags = flags;
@@ -432,8 +455,7 @@ struct uacce_device *uacce_alloc(struct device *parent,
return uacce;
err_with_uacce:
- if (flags & UACCE_DEV_SVA)
- iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
+ uacce_disable_sva(uacce);
kfree(uacce);
return ERR_PTR(ret);
}
@@ -487,8 +509,7 @@ void uacce_remove(struct uacce_device *uacce)
mutex_unlock(&uacce->queues_lock);
/* disable sva now since no opened queues */
- if (uacce->flags & UACCE_DEV_SVA)
- iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
+ uacce_disable_sva(uacce);
if (uacce->cdev)
cdev_device_del(uacce->cdev, &uacce->dev);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index f399edc82191..a7e3eb9befb6 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1350,6 +1350,7 @@ static int bytes_str_to_int(const char *str)
fallthrough;
case 'K':
result *= 1024;
+ break;
case '\0':
break;
default:
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index c2da77163f94..7c083ad58274 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -388,8 +388,6 @@ struct ubi_volume_desc {
int mode;
};
-struct ubi_wl_entry;
-
/**
* struct ubi_debug_info - debugging information for an UBI device.
*
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 3e8a179f39db..c0986096c701 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8057,7 +8057,7 @@ bnx2_read_vpd_fw_ver(struct bnx2 *bp)
data[i + 3] = data[i + BNX2_VPD_LEN];
}
- i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_tag(data, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
if (i < 0)
goto vpd_done;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 56801387591d..281b1c2e04a7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12206,8 +12206,7 @@ static void bnx2x_read_fwinfo(struct bnx2x *bp)
/* VPD RO tag should be first tag after identifier string, hence
* we should be able to find it in first BNX2X_VPD_LEN chars
*/
- i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
- PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_tag(vpd_start, BNX2X_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
if (i < 0)
goto out_not_found;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 39ac9e2f5118..2985844634c8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -12794,7 +12794,7 @@ static void bnxt_vpd_read_info(struct bnxt *bp)
goto exit;
}
- i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
if (i < 0) {
netdev_err(bp->dev, "VPD READ-Only not found\n");
goto exit;
@@ -12985,8 +12985,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!BNXT_CHIP_P4_PLUS(bp))
bp->flags |= BNXT_FLAG_DOUBLE_DB;
- bp->ulp_probe = bnxt_ulp_probe;
-
rc = bnxt_init_mac_addr(bp);
if (rc) {
dev_err(&pdev->dev, "Unable to initialize mac address.\n");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 24d2ad6a8740..98e0cef4532c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1751,7 +1751,6 @@ struct bnxt {
(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
struct bnxt_en_dev *edev;
- struct bnxt_en_dev * (*ulp_probe)(struct net_device *);
struct bnxt_napi **bnapi;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 64dbbb04b043..a918e374f3c5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -491,3 +491,4 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
}
return bp->edev;
}
+EXPORT_SYMBOL(bnxt_ulp_probe);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d2381929931b..b0e49643f483 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -13016,7 +13016,7 @@ static int tg3_test_nvram(struct tg3 *tp)
if (!buf)
return -ENOMEM;
- i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_tag((u8 *)buf, len, PCI_VPD_LRDT_RO_DATA);
if (i > 0) {
j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
if (j < 0)
@@ -15629,7 +15629,7 @@ static void tg3_read_vpd(struct tg3 *tp)
if (!vpd_data)
goto out_no_vpd;
- i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_tag(vpd_data, vpdlen, PCI_VPD_LRDT_RO_DATA);
if (i < 0)
goto out_not_found;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 80882cfc370f..9428ef1f04a8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2775,7 +2775,7 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (id_len > ID_LEN)
id_len = ID_LEN;
- i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_tag(vpd, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
if (i < 0) {
dev_err(adapter->pdev_dev, "missing VPD-R section\n");
ret = -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 612a7f69366d..7d7ed025db0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -716,7 +716,7 @@ static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev)
return &mlx5i_nic_profile;
}
-static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
+static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
struct net_device *netdev, void *param)
{
struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
index 3f9869c7e326..96ffc0a0e670 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
@@ -137,10 +137,10 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
}
ether_addr_copy(addr_mac, mac);
- MLX5_SET_RA(in_addr, roce_version, roce_version);
- MLX5_SET_RA(in_addr, roce_l3_type, roce_l3_type);
memcpy(addr_l3_addr, gid, gidsz);
}
+ MLX5_SET_RA(in_addr, roce_version, roce_version);
+ MLX5_SET_RA(in_addr, roce_l3_type, roce_l3_type);
if (MLX5_CAP_GEN(dev, num_vhca_ports) > 0)
MLX5_SET(set_roce_address_in, in, vhca_port_num, port_num);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 37fb2e1fb278..dfea14399607 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -132,7 +132,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
/* Allow mlxsw thermal zone binding to an external cooling device */
for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) {
if (strnstr(cdev->type, mlxsw_thermal_external_allowed_cdev[i],
- sizeof(cdev->type)))
+ strlen(cdev->type)))
return 0;
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 3e86fbe21431..2c89cde7da1e 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4398,20 +4398,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
if (net_ratelimit())
netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
pci_cmd, pci_status_errs);
- /*
- * The recovery sequence below admits a very elaborated explanation:
- * - it seems to work;
- * - I did not see what else could be done;
- * - it makes iop3xx happy.
- *
- * Feel free to adjust to your needs.
- */
- if (pdev->broken_parity_status)
- pci_cmd &= ~PCI_COMMAND_PARITY;
- else
- pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
-
- pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 36c8625a6fd7..c746ca7235f1 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -920,7 +920,7 @@ static void efx_probe_vpd_strings(struct efx_nic *efx)
}
/* Get the Read only section */
- ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
if (ro_start < 0) {
netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
return;
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index f8979991970e..5e7a57b680ca 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2800,7 +2800,7 @@ static void ef4_probe_vpd_strings(struct ef4_nic *efx)
}
/* Get the Read only section */
- ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
if (ro_start < 0) {
netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
return;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7fda2ae4c40f..9b6a4a875c55 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2870,9 +2870,13 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
{
int i;
- vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
- if (!vi->ctrl)
- goto err_ctrl;
+ if (vi->has_cvq) {
+ vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
+ if (!vi->ctrl)
+ goto err_ctrl;
+ } else {
+ vi->ctrl = NULL;
+ }
vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
if (!vi->sq)
goto err_sq;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 2a7339b12b13..398390c59344 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -146,8 +146,8 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (mvm->tz_device.tzone) {
struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device;
- thermal_notify_framework(tz_dev->tzone,
- tz_dev->fw_trips_index[ths_crossed]);
+ thermal_zone_device_update(tz_dev->tzone,
+ THERMAL_TRIP_VIOLATED);
}
#endif /* CONFIG_THERMAL */
}
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 0d3719407b8b..6d7d64939f82 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -480,7 +480,7 @@ EXPORT_SYMBOL_GPL(pci_pasid_features);
#define PASID_NUMBER_SHIFT 8
#define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT)
/**
- * pci_max_pasid - Get maximum number of PASIDs supported by device
+ * pci_max_pasids - Get maximum number of PASIDs supported by device
* @pdev: PCI device structure
*
* Returns negative value when PASID capability is not present.
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 5aa8977d7b0f..2f2c8a1729f9 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -41,7 +41,6 @@ config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
- select PCI_MSI_ARCH_FALLBACKS
help
Say Y here if you want support for the PCIe host controller found
on NVIDIA Tegra SoCs.
@@ -59,7 +58,6 @@ config PCIE_RCAR_HOST
bool "Renesas R-Car PCIe host controller"
depends on ARCH_RENESAS || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
- select PCI_MSI_ARCH_FALLBACKS
help
Say Y here if you want PCIe controller support on R-Car SoCs in host
mode.
@@ -88,7 +86,7 @@ config PCI_HOST_GENERIC
config PCIE_XILINX
bool "Xilinx AXI PCIe host bridge support"
depends on OF || COMPILE_TEST
- select PCI_MSI_ARCH_FALLBACKS
+ depends on PCI_MSI_IRQ_DOMAIN
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
@@ -233,6 +231,19 @@ config PCIE_MEDIATEK
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
+config PCIE_MEDIATEK_GEN3
+ tristate "MediaTek Gen3 PCIe controller"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
+ This PCIe controller is compatible with Gen3, Gen2 and Gen1 speed,
+ and support up to 256 MSI interrupt numbers for
+ multi-function devices.
+
+ Say Y here if you want to enable Gen3 PCIe controller support on
+ MediaTek SoCs.
+
config VMD
depends on PCI_MSI && X86_64 && SRCU
tristate "Intel Volume Management Device Driver"
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index e4559f2182f2..63e3880a3e87 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -11,10 +11,13 @@ obj-$(CONFIG_PCIE_RCAR_HOST) += pcie-rcar.o pcie-rcar-host.o
obj-$(CONFIG_PCIE_RCAR_EP) += pcie-rcar.o pcie-rcar-ep.o
obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
+obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
+obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o
obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
+obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
@@ -27,6 +30,7 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
+obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie-mediatek-gen3.o
obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
obj-$(CONFIG_VMD) += vmd.o
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
@@ -47,8 +51,10 @@ obj-y += mobiveil/
# ARM64 and use internal ifdefs to only build the pieces we need
# depending on whether ACPI, the DT driver, or both are enabled.
-ifdef CONFIG_PCI
+ifdef CONFIG_ACPI
+ifdef CONFIG_PCI_QUIRKS
obj-$(CONFIG_ARM64) += pci-thunder-ecam.o
obj-$(CONFIG_ARM64) += pci-thunder-pem.o
obj-$(CONFIG_ARM64) += pci-xgene.o
endif
+endif
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 849f1e416ea5..35e61048e133 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* pci-j721e - PCIe controller driver for TI's J721E SoCs
*
* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
@@ -50,6 +51,7 @@ enum link_status {
struct j721e_pcie {
struct device *dev;
+ struct clk *refclk;
u32 mode;
u32 num_lanes;
struct cdns_pcie *cdns_pcie;
@@ -312,6 +314,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
struct cdns_pcie_ep *ep;
struct gpio_desc *gpiod;
void __iomem *base;
+ struct clk *clk;
u32 num_lanes;
u32 mode;
int ret;
@@ -411,6 +414,20 @@ static int j721e_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
+ clk = devm_clk_get_optional(dev, "pcie_refclk");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "failed to get pcie_refclk\n");
+ goto err_pcie_setup;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "failed to enable pcie_refclk\n");
+ goto err_get_sync;
+ }
+ pcie->refclk = clk;
+
/*
* "Power Sequencing and Reset Signal Timings" table in
* PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0
@@ -425,8 +442,10 @@ static int j721e_pcie_probe(struct platform_device *pdev)
}
ret = cdns_pcie_host_setup(rc);
- if (ret < 0)
+ if (ret < 0) {
+ clk_disable_unprepare(pcie->refclk);
goto err_pcie_setup;
+ }
break;
case PCI_MODE_EP:
@@ -479,6 +498,7 @@ static int j721e_pcie_remove(struct platform_device *pdev)
struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
struct device *dev = &pdev->dev;
+ clk_disable_unprepare(pcie->refclk);
cdns_pcie_disable_phy(cdns_pcie);
pm_runtime_put(dev);
pm_runtime_disable(dev);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 22c5529e9a65..423d35872ce4 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -280,7 +280,7 @@ config PCIE_TEGRA194_EP
select PCIE_TEGRA194
help
Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
- work in host mode. There are two instances of PCIe controllers in
+ work in endpoint mode. There are two instances of PCIe controllers in
Tegra194. This controller can work either as EP or RC. In order to
enable host-specific features PCIE_TEGRA194_HOST must be selected and
in order to enable device-specific features PCIE_TEGRA194_EP must be
@@ -311,6 +311,7 @@ config PCIE_AL
depends on OF && (ARM64 || COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ select PCI_ECAM
help
Say Y here to enable support of the Amazon's Annapurna Labs PCIe
controller IP on Amazon SoCs. The PCIe controller uses the DesignWare
@@ -318,4 +319,13 @@ config PCIE_AL
required only for DT-based platforms. ACPI platforms with the
Annapurna Labs PCIe controller don't need to enable this.
+config PCIE_FU740
+ bool "SiFive FU740 PCIe host controller"
+ depends on PCI_MSI_IRQ_DOMAIN
+ depends on SOC_SIFIVE || COMPILE_TEST
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support for the SiFive
+ FU740.
+
endmenu
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index a751553fa0db..eca805c1a023 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
@@ -17,7 +18,6 @@ obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
obj-$(CONFIG_PCI_MESON) += pci-meson.o
-obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
@@ -31,7 +31,13 @@ obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
# ARM64 and use internal ifdefs to only build the pieces we need
# depending on whether ACPI, the DT driver, or both are enabled.
-ifdef CONFIG_PCI
+obj-$(CONFIG_PCIE_AL) += pcie-al.o
+obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
+
+ifdef CONFIG_ACPI
+ifdef CONFIG_PCI_QUIRKS
obj-$(CONFIG_ARM64) += pcie-al.o
obj-$(CONFIG_ARM64) += pcie-hisi.o
+obj-$(CONFIG_ARM64) += pcie-tegra194.o
+endif
endif
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 53aa35cb3a49..bde3b2824e89 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -346,8 +346,9 @@ static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
};
/**
- * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
- * registers
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
*
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
@@ -367,6 +368,8 @@ static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
/**
* ks_pcie_clear_dbi_mode() - Disable DBI mode
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
*
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
@@ -449,6 +452,7 @@ static struct pci_ops ks_child_pcie_ops = {
/**
* ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
+ * @bus: A pointer to the PCI bus structure.
*
* This sets BAR0 to enable inbound access for MSI_IRQ register
*/
@@ -488,6 +492,8 @@ static struct pci_ops ks_pcie_ops = {
/**
* ks_pcie_link_up() - Check if link up
+ * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
+ * controller driver information.
*/
static int ks_pcie_link_up(struct dw_pcie *pci)
{
@@ -605,7 +611,6 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
/**
* ks_pcie_legacy_irq_handler() - Handle legacy interrupt
- * @irq: IRQ line for legacy interrupts
* @desc: Pointer to irq descriptor
*
* Traverse through pending legacy interrupts and invoke handler for each. Also
@@ -798,7 +803,8 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
int ret;
pp->bridge->ops = &ks_pcie_ops;
- pp->bridge->child_ops = &ks_child_pcie_ops;
+ if (!ks_pcie->is_am6)
+ pp->bridge->child_ops = &ks_child_pcie_ops;
ret = ks_pcie_config_legacy_irq(ks_pcie);
if (ret)
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 39fe2ed5a6a2..39f4664bd84c 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -154,7 +154,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = pcie->drvdata->dw_pcie_ops;
- ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4),
+ ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4);
pcie->pci = pci;
pcie->ls_epc = ls_epc;
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 1c25d8337151..8d028a88b375 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -705,6 +705,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
}
}
+ dw_pcie_iatu_detect(pci);
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
return -EINVAL;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 7e55b2b66182..a608ae1fad57 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -398,9 +398,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret)
goto err_free_msi;
}
+ dw_pcie_iatu_detect(pci);
dw_pcie_setup_rc(pp);
- dw_pcie_msi_init(pp);
if (!dw_pcie_link_up(pci) && pci->ops && pci->ops->start_link) {
ret = pci->ops->start_link(pci);
@@ -551,6 +551,8 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
}
}
+ dw_pcie_msi_init(pp);
+
/* Setup RC BARs */
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 004cb860e266..a945f0c0e73d 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -660,11 +660,9 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
pci->num_ob_windows = ob;
}
-void dw_pcie_setup(struct dw_pcie *pci)
+void dw_pcie_iatu_detect(struct dw_pcie *pci)
{
- u32 val;
struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
if (pci->version >= 0x480A || (!pci->version &&
@@ -693,6 +691,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
pci->num_ob_windows, pci->num_ib_windows);
+}
+
+void dw_pcie_setup(struct dw_pcie *pci)
+{
+ u32 val;
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
if (pci->link_gen > 0)
dw_pcie_link_set_max_speed(pci, pci->link_gen);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 7247c8b01f04..7d6e9b7576be 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -306,6 +306,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
enum dw_pcie_region_type type);
void dw_pcie_setup(struct dw_pcie *pci);
+void dw_pcie_iatu_detect(struct dw_pcie *pci);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
new file mode 100644
index 000000000000..00cde9a248b5
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-fu740.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FU740 DesignWare PCIe Controller integration
+ * Copyright (C) 2019-2021 SiFive, Inc.
+ * Paul Walmsley
+ * Greentime Hu
+ *
+ * Based in part on the i.MX6 PCIe host controller shim which is:
+ *
+ * Copyright (C) 2013 Kosagi
+ * https://www.kosagi.com
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define to_fu740_pcie(x) dev_get_drvdata((x)->dev)
+
+struct fu740_pcie {
+ struct dw_pcie pci;
+ void __iomem *mgmt_base;
+ struct gpio_desc *reset;
+ struct gpio_desc *pwren;
+ struct clk *pcie_aux;
+ struct reset_control *rst;
+};
+
+#define SIFIVE_DEVICESRESETREG 0x28
+
+#define PCIEX8MGMT_PERST_N 0x0
+#define PCIEX8MGMT_APP_LTSSM_ENABLE 0x10
+#define PCIEX8MGMT_APP_HOLD_PHY_RST 0x18
+#define PCIEX8MGMT_DEVICE_TYPE 0x708
+#define PCIEX8MGMT_PHY0_CR_PARA_ADDR 0x860
+#define PCIEX8MGMT_PHY0_CR_PARA_RD_EN 0x870
+#define PCIEX8MGMT_PHY0_CR_PARA_RD_DATA 0x878
+#define PCIEX8MGMT_PHY0_CR_PARA_SEL 0x880
+#define PCIEX8MGMT_PHY0_CR_PARA_WR_DATA 0x888
+#define PCIEX8MGMT_PHY0_CR_PARA_WR_EN 0x890
+#define PCIEX8MGMT_PHY0_CR_PARA_ACK 0x898
+#define PCIEX8MGMT_PHY1_CR_PARA_ADDR 0x8a0
+#define PCIEX8MGMT_PHY1_CR_PARA_RD_EN 0x8b0
+#define PCIEX8MGMT_PHY1_CR_PARA_RD_DATA 0x8b8
+#define PCIEX8MGMT_PHY1_CR_PARA_SEL 0x8c0
+#define PCIEX8MGMT_PHY1_CR_PARA_WR_DATA 0x8c8
+#define PCIEX8MGMT_PHY1_CR_PARA_WR_EN 0x8d0
+#define PCIEX8MGMT_PHY1_CR_PARA_ACK 0x8d8
+
+#define PCIEX8MGMT_PHY_CDR_TRACK_EN BIT(0)
+#define PCIEX8MGMT_PHY_LOS_THRSHLD BIT(5)
+#define PCIEX8MGMT_PHY_TERM_EN BIT(9)
+#define PCIEX8MGMT_PHY_TERM_ACDC BIT(10)
+#define PCIEX8MGMT_PHY_EN BIT(11)
+#define PCIEX8MGMT_PHY_INIT_VAL (PCIEX8MGMT_PHY_CDR_TRACK_EN|\
+ PCIEX8MGMT_PHY_LOS_THRSHLD|\
+ PCIEX8MGMT_PHY_TERM_EN|\
+ PCIEX8MGMT_PHY_TERM_ACDC|\
+ PCIEX8MGMT_PHY_EN)
+
+#define PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 0x1008
+#define PCIEX8MGMT_PHY_LANE_OFF 0x100
+#define PCIEX8MGMT_PHY_LANE0_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 0)
+#define PCIEX8MGMT_PHY_LANE1_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 1)
+#define PCIEX8MGMT_PHY_LANE2_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 2)
+#define PCIEX8MGMT_PHY_LANE3_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 3)
+
+static void fu740_pcie_assert_reset(struct fu740_pcie *afp)
+{
+ /* Assert PERST_N GPIO */
+ gpiod_set_value_cansleep(afp->reset, 0);
+ /* Assert controller PERST_N */
+ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_PERST_N);
+}
+
+static void fu740_pcie_deassert_reset(struct fu740_pcie *afp)
+{
+ /* Deassert controller PERST_N */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PERST_N);
+ /* Deassert PERST_N GPIO */
+ gpiod_set_value_cansleep(afp->reset, 1);
+}
+
+static void fu740_pcie_power_on(struct fu740_pcie *afp)
+{
+ gpiod_set_value_cansleep(afp->pwren, 1);
+ /*
+ * Ensure that PERST has been asserted for at least 100 ms.
+ * Section 2.2 of PCI Express Card Electromechanical Specification
+ * Revision 3.0
+ */
+ msleep(100);
+}
+
+static void fu740_pcie_drive_reset(struct fu740_pcie *afp)
+{
+ fu740_pcie_assert_reset(afp);
+ fu740_pcie_power_on(afp);
+ fu740_pcie_deassert_reset(afp);
+}
+
+static void fu740_phyregwrite(const uint8_t phy, const uint16_t addr,
+ const uint16_t wrdata, struct fu740_pcie *afp)
+{
+ struct device *dev = afp->pci.dev;
+ void __iomem *phy_cr_para_addr;
+ void __iomem *phy_cr_para_wr_data;
+ void __iomem *phy_cr_para_wr_en;
+ void __iomem *phy_cr_para_ack;
+ int ret, val;
+
+ /* Setup */
+ if (phy) {
+ phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ADDR;
+ phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_DATA;
+ phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_EN;
+ phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ACK;
+ } else {
+ phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ADDR;
+ phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_DATA;
+ phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_EN;
+ phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ACK;
+ }
+
+ writel_relaxed(addr, phy_cr_para_addr);
+ writel_relaxed(wrdata, phy_cr_para_wr_data);
+ writel_relaxed(1, phy_cr_para_wr_en);
+
+ /* Wait for wait_idle */
+ ret = readl_poll_timeout(phy_cr_para_ack, val, val, 10, 5000);
+ if (ret)
+ dev_warn(dev, "Wait for wait_idle state failed!\n");
+
+ /* Clear */
+ writel_relaxed(0, phy_cr_para_wr_en);
+
+ /* Wait for ~wait_idle */
+ ret = readl_poll_timeout(phy_cr_para_ack, val, !val, 10, 5000);
+ if (ret)
+ dev_warn(dev, "Wait for !wait_idle state failed!\n");
+}
+
+static void fu740_pcie_init_phy(struct fu740_pcie *afp)
+{
+ /* Enable phy cr_para_sel interfaces */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_SEL);
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_SEL);
+
+ /*
+ * Wait 10 cr_para cycles to guarantee that the registers are ready
+ * to be edited.
+ */
+ ndelay(10);
+
+ /* Set PHY AC termination mode */
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+}
+
+static int fu740_pcie_start_link(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+ struct fu740_pcie *afp = dev_get_drvdata(dev);
+
+ /* Enable LTSSM */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_LTSSM_ENABLE);
+ return 0;
+}
+
+static int fu740_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct fu740_pcie *afp = to_fu740_pcie(pci);
+ struct device *dev = pci->dev;
+ int ret;
+
+ /* Power on reset */
+ fu740_pcie_drive_reset(afp);
+
+ /* Enable pcieauxclk */
+ ret = clk_prepare_enable(afp->pcie_aux);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_aux clock\n");
+ return ret;
+ }
+
+ /*
+ * Assert hold_phy_rst (hold the controller LTSSM in reset after
+ * power_up_rst_n for register programming with cr_para)
+ */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST);
+
+ /* Deassert power_up_rst_n */
+ ret = reset_control_deassert(afp->rst);
+ if (ret) {
+ dev_err(dev, "unable to deassert pcie_power_up_rst_n\n");
+ return ret;
+ }
+
+ fu740_pcie_init_phy(afp);
+
+ /* Disable pcieauxclk */
+ clk_disable_unprepare(afp->pcie_aux);
+ /* Clear hold_phy_rst */
+ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST);
+ /* Enable pcieauxclk */
+ ret = clk_prepare_enable(afp->pcie_aux);
+ /* Set RC mode */
+ writel_relaxed(0x4, afp->mgmt_base + PCIEX8MGMT_DEVICE_TYPE);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops fu740_pcie_host_ops = {
+ .host_init = fu740_pcie_host_init,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = fu740_pcie_start_link,
+};
+
+static int fu740_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct fu740_pcie *afp;
+
+ afp = devm_kzalloc(dev, sizeof(*afp), GFP_KERNEL);
+ if (!afp)
+ return -ENOMEM;
+ pci = &afp->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &fu740_pcie_host_ops;
+
+ /* SiFive specific region: mgmt */
+ afp->mgmt_base = devm_platform_ioremap_resource_byname(pdev, "mgmt");
+ if (IS_ERR(afp->mgmt_base))
+ return PTR_ERR(afp->mgmt_base);
+
+ /* Fetch GPIOs */
+ afp->reset = devm_gpiod_get_optional(dev, "reset-gpios", GPIOD_OUT_LOW);
+ if (IS_ERR(afp->reset))
+ return dev_err_probe(dev, PTR_ERR(afp->reset), "unable to get reset-gpios\n");
+
+ afp->pwren = devm_gpiod_get_optional(dev, "pwren-gpios", GPIOD_OUT_LOW);
+ if (IS_ERR(afp->pwren))
+ return dev_err_probe(dev, PTR_ERR(afp->pwren), "unable to get pwren-gpios\n");
+
+ /* Fetch clocks */
+ afp->pcie_aux = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(afp->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(afp->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
+
+ /* Fetch reset */
+ afp->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(afp->rst))
+ return dev_err_probe(dev, PTR_ERR(afp->rst), "unable to get reset\n");
+
+ platform_set_drvdata(pdev, afp);
+
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static void fu740_pcie_shutdown(struct platform_device *pdev)
+{
+ struct fu740_pcie *afp = platform_get_drvdata(pdev);
+
+ /* Bring down link, so bootloader gets clean state in case of reboot */
+ fu740_pcie_assert_reset(afp);
+}
+
+static const struct of_device_id fu740_pcie_of_match[] = {
+ { .compatible = "sifive,fu740-pcie", },
+ {},
+};
+
+static struct platform_driver fu740_pcie_driver = {
+ .driver = {
+ .name = "fu740-pcie",
+ .of_match_table = fu740_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fu740_pcie_probe,
+ .shutdown = fu740_pcie_shutdown,
+};
+
+builtin_platform_driver(fu740_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index 0cedd1f95f37..f89a7d24ba28 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -81,11 +81,6 @@ static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val)
writel(val, base + ofs);
}
-static inline u32 pcie_app_rd(struct intel_pcie_port *lpp, u32 ofs)
-{
- return readl(lpp->app_base + ofs);
-}
-
static inline void pcie_app_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val)
{
writel(val, lpp->app_base + ofs);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 6fa216e52d14..bafd2c6ab3c2 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -22,6 +22,8 @@
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include <linux/phy/phy.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -311,6 +313,104 @@ struct tegra_pcie_dw_of_data {
enum dw_pcie_device_mode mode;
};
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+struct tegra194_pcie_ecam {
+ void __iomem *config_base;
+ void __iomem *iatu_base;
+ void __iomem *dbi_base;
+};
+
+static int tegra194_acpi_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct tegra194_pcie_ecam *pcie_ecam;
+
+ pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
+ if (!pcie_ecam)
+ return -ENOMEM;
+
+ pcie_ecam->config_base = cfg->win;
+ pcie_ecam->iatu_base = cfg->win + SZ_256K;
+ pcie_ecam->dbi_base = cfg->win + SZ_512K;
+ cfg->priv = pcie_ecam;
+
+ return 0;
+}
+
+static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
+ u32 val, u32 reg)
+{
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+ writel(val, pcie_ecam->iatu_base + offset + reg);
+}
+
+static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
+{
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
+ PCIE_ATU_LOWER_BASE);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
+ PCIE_ATU_UPPER_BASE);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
+ PCIE_ATU_LOWER_TARGET);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
+ PCIE_ATU_LIMIT);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
+ PCIE_ATU_UPPER_TARGET);
+ atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
+ atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void __iomem *tegra194_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
+ u32 busdev;
+ int type;
+
+ if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
+ return NULL;
+
+ if (bus->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ return pcie_ecam->dbi_base + where;
+ else
+ return NULL;
+ }
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+ if (bus->parent->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ type = PCIE_ATU_TYPE_CFG0;
+ else
+ return NULL;
+ } else {
+ type = PCIE_ATU_TYPE_CFG1;
+ }
+
+ program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
+ SZ_256K);
+
+ return pcie_ecam->config_base + where;
+}
+
+const struct pci_ecam_ops tegra194_pcie_ops = {
+ .init = tegra194_acpi_init,
+ .pci_ops = {
+ .map_bus = tegra194_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
+
+#ifdef CONFIG_PCIE_TEGRA194
+
static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
{
return container_of(pci, struct tegra_pcie_dw, pci);
@@ -1019,7 +1119,7 @@ static const struct dw_pcie_ops tegra_dw_pcie_ops = {
.stop_link = tegra_pcie_dw_stop_link,
};
-static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
+static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
.host_init = tegra_pcie_dw_host_init,
};
@@ -1645,7 +1745,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
if (pcie->ep_state == EP_STATE_ENABLED)
return;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
ret);
@@ -1881,7 +1981,7 @@ tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
return &tegra_pcie_epc_features;
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
.raise_irq = tegra_pcie_ep_raise_irq,
.get_features = tegra_pcie_ep_get_features,
};
@@ -2311,3 +2411,5 @@ MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
MODULE_LICENSE("GPL v2");
+
+#endif /* CONFIG_PCIE_TEGRA194 */
diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig
index a62d247018cf..e4643fb94e78 100644
--- a/drivers/pci/controller/mobiveil/Kconfig
+++ b/drivers/pci/controller/mobiveil/Kconfig
@@ -24,8 +24,7 @@ config PCIE_MOBIVEIL_PLAT
config PCIE_LAYERSCAPE_GEN4
bool "Freescale Layerscape PCIe Gen4 controller"
- depends on PCI
- depends on OF && (ARM64 || ARCH_LAYERSCAPE)
+ depends on ARCH_LAYERSCAPE || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_MOBIVEIL_HOST
help
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index 6ab694f8d283..d3924a44db02 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -79,6 +79,7 @@ int pci_host_common_probe(struct platform_device *pdev)
bridge->sysdata = cfg;
bridge->ops = (struct pci_ops *)&ops->pci_ops;
+ bridge->msi_domain = true;
return pci_host_probe(bridge);
}
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 1ff4ce24f4b3..6511648271b2 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -473,7 +473,6 @@ struct hv_pcibus_device {
struct list_head dr_list;
struct msi_domain_info msi_info;
- struct msi_controller msi_chip;
struct irq_domain *irq_domain;
spinlock_t retarget_msi_interrupt_lock;
@@ -1866,9 +1865,6 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
if (!hbus->pci_bus)
return -ENODEV;
- hbus->pci_bus->msi = &hbus->msi_chip;
- hbus->pci_bus->msi->dev = &hbus->hdev->device;
-
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
hv_pci_assign_numa_node(hbus);
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 8fcabed7c6a6..8069bd9232d4 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -78,23 +79,8 @@
#define AFI_MSI_FPCI_BAR_ST 0x64
#define AFI_MSI_AXI_BAR_ST 0x68
-#define AFI_MSI_VEC0 0x6c
-#define AFI_MSI_VEC1 0x70
-#define AFI_MSI_VEC2 0x74
-#define AFI_MSI_VEC3 0x78
-#define AFI_MSI_VEC4 0x7c
-#define AFI_MSI_VEC5 0x80
-#define AFI_MSI_VEC6 0x84
-#define AFI_MSI_VEC7 0x88
-
-#define AFI_MSI_EN_VEC0 0x8c
-#define AFI_MSI_EN_VEC1 0x90
-#define AFI_MSI_EN_VEC2 0x94
-#define AFI_MSI_EN_VEC3 0x98
-#define AFI_MSI_EN_VEC4 0x9c
-#define AFI_MSI_EN_VEC5 0xa0
-#define AFI_MSI_EN_VEC6 0xa4
-#define AFI_MSI_EN_VEC7 0xa8
+#define AFI_MSI_VEC(x) (0x6c + ((x) * 4))
+#define AFI_MSI_EN_VEC(x) (0x8c + ((x) * 4))
#define AFI_CONFIGURATION 0xac
#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
@@ -280,10 +266,10 @@
#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
struct tegra_msi {
- struct msi_controller chip;
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
- struct mutex lock;
+ struct mutex map_lock;
+ spinlock_t mask_lock;
void *virt;
dma_addr_t phys;
int irq;
@@ -333,11 +319,6 @@ struct tegra_pcie_soc {
} ectl;
};
-static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
-{
- return container_of(chip, struct tegra_msi, chip);
-}
-
struct tegra_pcie {
struct device *dev;
@@ -372,6 +353,11 @@ struct tegra_pcie {
struct dentry *debugfs;
};
+static inline struct tegra_pcie *msi_to_pcie(struct tegra_msi *msi)
+{
+ return container_of(msi, struct tegra_pcie, msi);
+}
+
struct tegra_pcie_port {
struct tegra_pcie *pcie;
struct device_node *np;
@@ -1432,7 +1418,6 @@ static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
}
}
-
static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
@@ -1509,6 +1494,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
phys_put:
if (soc->program_uphy)
tegra_pcie_phys_put(pcie);
+
return err;
}
@@ -1551,161 +1537,227 @@ static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
afi_writel(pcie, val, AFI_PCIE_PME);
}
-static int tegra_msi_alloc(struct tegra_msi *chip)
-{
- int msi;
-
- mutex_lock(&chip->lock);
-
- msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
- if (msi < INT_PCI_MSI_NR)
- set_bit(msi, chip->used);
- else
- msi = -ENOSPC;
-
- mutex_unlock(&chip->lock);
-
- return msi;
-}
-
-static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
+static void tegra_pcie_msi_irq(struct irq_desc *desc)
{
- struct device *dev = chip->chip.dev;
-
- mutex_lock(&chip->lock);
-
- if (!test_bit(irq, chip->used))
- dev_err(dev, "trying to free unused MSI#%lu\n", irq);
- else
- clear_bit(irq, chip->used);
-
- mutex_unlock(&chip->lock);
-}
-
-static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
-{
- struct tegra_pcie *pcie = data;
- struct device *dev = pcie->dev;
+ struct tegra_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct tegra_msi *msi = &pcie->msi;
- unsigned int i, processed = 0;
+ struct device *dev = pcie->dev;
+ unsigned int i;
+
+ chained_irq_enter(chip, desc);
for (i = 0; i < 8; i++) {
- unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
+ unsigned long reg = afi_readl(pcie, AFI_MSI_VEC(i));
while (reg) {
unsigned int offset = find_first_bit(&reg, 32);
unsigned int index = i * 32 + offset;
unsigned int irq;
- /* clear the interrupt */
- afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
-
- irq = irq_find_mapping(msi->domain, index);
+ irq = irq_find_mapping(msi->domain->parent, index);
if (irq) {
- if (test_bit(index, msi->used))
- generic_handle_irq(irq);
- else
- dev_info(dev, "unhandled MSI\n");
+ generic_handle_irq(irq);
} else {
/*
* that's weird who triggered this?
* just clear it
*/
dev_info(dev, "unexpected MSI\n");
+ afi_writel(pcie, BIT(index % 32), AFI_MSI_VEC(index));
}
/* see if there's any more pending in this vector */
- reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
-
- processed++;
+ reg = afi_readl(pcie, AFI_MSI_VEC(i));
}
}
- return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
+ chained_irq_exit(chip, desc);
}
-static int tegra_msi_setup_irq(struct msi_controller *chip,
- struct pci_dev *pdev, struct msi_desc *desc)
+static void tegra_msi_top_irq_ack(struct irq_data *d)
{
- struct tegra_msi *msi = to_tegra_msi(chip);
- struct msi_msg msg;
- unsigned int irq;
- int hwirq;
+ irq_chip_ack_parent(d);
+}
- hwirq = tegra_msi_alloc(msi);
- if (hwirq < 0)
- return hwirq;
+static void tegra_msi_top_irq_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
- irq = irq_create_mapping(msi->domain, hwirq);
- if (!irq) {
- tegra_msi_free(msi, hwirq);
- return -EINVAL;
- }
+static void tegra_msi_top_irq_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip tegra_msi_top_chip = {
+ .name = "Tegra PCIe MSI",
+ .irq_ack = tegra_msi_top_irq_ack,
+ .irq_mask = tegra_msi_top_irq_mask,
+ .irq_unmask = tegra_msi_top_irq_unmask,
+};
- irq_set_msi_desc(irq, desc);
+static void tegra_msi_irq_ack(struct irq_data *d)
+{
+ struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
+ struct tegra_pcie *pcie = msi_to_pcie(msi);
+ unsigned int index = d->hwirq / 32;
- msg.address_lo = lower_32_bits(msi->phys);
- msg.address_hi = upper_32_bits(msi->phys);
- msg.data = hwirq;
+ /* clear the interrupt */
+ afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index));
+}
- pci_write_msi_msg(irq, &msg);
+static void tegra_msi_irq_mask(struct irq_data *d)
+{
+ struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
+ struct tegra_pcie *pcie = msi_to_pcie(msi);
+ unsigned int index = d->hwirq / 32;
+ unsigned long flags;
+ u32 value;
- return 0;
+ spin_lock_irqsave(&msi->mask_lock, flags);
+ value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+ value &= ~BIT(d->hwirq % 32);
+ afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+ spin_unlock_irqrestore(&msi->mask_lock, flags);
}
-static void tegra_msi_teardown_irq(struct msi_controller *chip,
- unsigned int irq)
+static void tegra_msi_irq_unmask(struct irq_data *d)
{
- struct tegra_msi *msi = to_tegra_msi(chip);
- struct irq_data *d = irq_get_irq_data(irq);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
+ struct tegra_pcie *pcie = msi_to_pcie(msi);
+ unsigned int index = d->hwirq / 32;
+ unsigned long flags;
+ u32 value;
- irq_dispose_mapping(irq);
- tegra_msi_free(msi, hwirq);
+ spin_lock_irqsave(&msi->mask_lock, flags);
+ value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+ value |= BIT(d->hwirq % 32);
+ afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+ spin_unlock_irqrestore(&msi->mask_lock, flags);
}
-static struct irq_chip tegra_msi_irq_chip = {
- .name = "Tegra PCIe MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
+static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
+
+ msg->address_lo = lower_32_bits(msi->phys);
+ msg->address_hi = upper_32_bits(msi->phys);
+ msg->data = data->hwirq;
+}
+
+static struct irq_chip tegra_msi_bottom_chip = {
+ .name = "Tegra MSI",
+ .irq_ack = tegra_msi_irq_ack,
+ .irq_mask = tegra_msi_irq_mask,
+ .irq_unmask = tegra_msi_irq_unmask,
+ .irq_set_affinity = tegra_msi_set_affinity,
+ .irq_compose_msi_msg = tegra_compose_msi_msg,
};
-static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
+static int tegra_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
{
- irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
+ struct tegra_msi *msi = domain->host_data;
+ unsigned int i;
+ int hwirq;
+
+ mutex_lock(&msi->map_lock);
+
+ hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
+
+ mutex_unlock(&msi->map_lock);
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &tegra_msi_bottom_chip, domain->host_data,
+ handle_edge_irq, NULL, NULL);
tegra_cpuidle_pcie_irqs_in_use();
return 0;
}
-static const struct irq_domain_ops msi_domain_ops = {
- .map = tegra_msi_map,
+static void tegra_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct tegra_msi *msi = domain->host_data;
+
+ mutex_lock(&msi->map_lock);
+
+ bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
+
+ mutex_unlock(&msi->map_lock);
+}
+
+static const struct irq_domain_ops tegra_msi_domain_ops = {
+ .alloc = tegra_msi_domain_alloc,
+ .free = tegra_msi_domain_free,
+};
+
+static struct msi_domain_info tegra_msi_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &tegra_msi_top_chip,
};
+static int tegra_allocate_domains(struct tegra_msi *msi)
+{
+ struct tegra_pcie *pcie = msi_to_pcie(msi);
+ struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain *parent;
+
+ parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
+ &tegra_msi_domain_ops, msi);
+ if (!parent) {
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+
+ msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
+ if (!msi->domain) {
+ dev_err(pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(parent);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void tegra_free_domains(struct tegra_msi *msi)
+{
+ struct irq_domain *parent = msi->domain->parent;
+
+ irq_domain_remove(msi->domain);
+ irq_domain_remove(parent);
+}
+
static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
{
- struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
struct platform_device *pdev = to_platform_device(pcie->dev);
struct tegra_msi *msi = &pcie->msi;
struct device *dev = pcie->dev;
int err;
- mutex_init(&msi->lock);
-
- msi->chip.dev = dev;
- msi->chip.setup_irq = tegra_msi_setup_irq;
- msi->chip.teardown_irq = tegra_msi_teardown_irq;
+ mutex_init(&msi->map_lock);
+ spin_lock_init(&msi->mask_lock);
- msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
- &msi_domain_ops, &msi->chip);
- if (!msi->domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ err = tegra_allocate_domains(msi);
+ if (err)
+ return err;
}
err = platform_get_irq_byname(pdev, "msi");
@@ -1714,12 +1766,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
msi->irq = err;
- err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
- tegra_msi_irq_chip.name, pcie);
- if (err < 0) {
- dev_err(dev, "failed to request IRQ: %d\n", err);
- goto free_irq_domain;
- }
+ irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie);
/* Though the PCIe controller can address >32-bit address space, to
* facilitate endpoints that support only 32-bit MSI target address,
@@ -1740,14 +1787,14 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
goto free_irq;
}
- host->msi = &msi->chip;
-
return 0;
free_irq:
- free_irq(msi->irq, pcie);
+ irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
free_irq_domain:
- irq_domain_remove(msi->domain);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ tegra_free_domains(msi);
+
return err;
}
@@ -1755,22 +1802,18 @@ static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
{
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_msi *msi = &pcie->msi;
- u32 reg;
+ u32 reg, msi_state[INT_PCI_MSI_NR / 32];
+ int i;
afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
/* this register is in 4K increments */
afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
- /* enable all MSI vectors */
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
- afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
+ /* Restore the MSI allocation state */
+ bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR);
+ for (i = 0; i < ARRAY_SIZE(msi_state); i++)
+ afi_writel(pcie, msi_state[i], AFI_MSI_EN_VEC(i));
/* and unmask the MSI interrupt */
reg = afi_readl(pcie, AFI_INTR_MASK);
@@ -1786,16 +1829,16 @@ static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
DMA_ATTR_NO_KERNEL_MAPPING);
- if (msi->irq > 0)
- free_irq(msi->irq, pcie);
-
for (i = 0; i < INT_PCI_MSI_NR; i++) {
irq = irq_find_mapping(msi->domain, i);
if (irq > 0)
- irq_dispose_mapping(irq);
+ irq_domain_free_irqs(irq, 1);
}
- irq_domain_remove(msi->domain);
+ irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ tegra_free_domains(msi);
}
static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
@@ -1807,16 +1850,6 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
value &= ~AFI_INTR_MASK_MSI_MASK;
afi_writel(pcie, value, AFI_INTR_MASK);
- /* disable all MSI vectors */
- afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
- afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
- afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
- afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
- afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
- afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
- afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
- afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
-
return 0;
}
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index f964fd26f7e0..ffd84656544f 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -116,7 +116,7 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
* the config space access window. Since we are working with
* the high-order 32 bits, shift everything down by 32 bits.
*/
- node_bits = (cfg->res.start >> 32) & (1 << 12);
+ node_bits = upper_32_bits(cfg->res.start) & (1 << 12);
v |= node_bits;
set_val(v, where, size, val);
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index 1a3f70ac61fc..0660b9da204f 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -12,6 +12,7 @@
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "../pci.h"
#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
@@ -324,9 +325,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
* structure here for the BAR.
*/
bar4_start = res_pem->start + 0xf00000;
- pem_pci->ea_entry[0] = (u32)bar4_start | 2;
- pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
- pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
+ pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2;
+ pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u;
+ pem_pci->ea_entry[2] = upper_32_bits(bar4_start);
cfg->priv = pem_pci;
return 0;
@@ -334,9 +335,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
-#define PEM_RES_BASE 0x87e0c0000000UL
-#define PEM_NODE_MASK GENMASK(45, 44)
-#define PEM_INDX_MASK GENMASK(26, 24)
+#define PEM_RES_BASE 0x87e0c0000000ULL
+#define PEM_NODE_MASK GENMASK_ULL(45, 44)
+#define PEM_INDX_MASK GENMASK_ULL(26, 24)
#define PEM_MIN_DOM_IN_NODE 4
#define PEM_MAX_DOM_IN_NODE 10
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 2afdc865253e..7f503dd4ff81 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -354,7 +354,8 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
if (IS_ERR(port->csr_base))
return PTR_ERR(port->csr_base);
- port->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ port->cfg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(port->cfg_base))
return PTR_ERR(port->cfg_base);
port->cfg_addr = res->start;
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 42691dd8ebef..98aa1dccc6e6 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -236,10 +236,8 @@ static int altera_msi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"vector_slave");
msi->vector_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(msi->vector_base)) {
- dev_err(&pdev->dev, "failed to map vector_slave memory\n");
+ if (IS_ERR(msi->vector_base))
return PTR_ERR(msi->vector_base);
- }
msi->vector_phy = res->start;
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index e330e6811f0b..08bc788d9422 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -1148,6 +1148,7 @@ static int brcm_pcie_suspend(struct device *dev)
brcm_pcie_turn_off(pcie);
ret = brcm_phy_stop(pcie);
+ reset_control_rearm(pcie->rescal);
clk_disable_unprepare(pcie->clk);
return ret;
@@ -1163,9 +1164,13 @@ static int brcm_pcie_resume(struct device *dev)
base = pcie->base;
clk_prepare_enable(pcie->clk);
+ ret = reset_control_reset(pcie->rescal);
+ if (ret)
+ goto err_disable_clk;
+
ret = brcm_phy_start(pcie);
if (ret)
- goto err;
+ goto err_reset;
/* Take bridge out of reset so we can access the SERDES reg */
pcie->bridge_sw_init_set(pcie, 0);
@@ -1180,14 +1185,16 @@ static int brcm_pcie_resume(struct device *dev)
ret = brcm_pcie_setup(pcie);
if (ret)
- goto err;
+ goto err_reset;
if (pcie->msi)
brcm_msi_set_regs(pcie->msi);
return 0;
-err:
+err_reset:
+ reset_control_rearm(pcie->rescal);
+err_disable_clk:
clk_disable_unprepare(pcie->clk);
return ret;
}
@@ -1197,7 +1204,7 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie)
brcm_msi_remove(pcie);
brcm_pcie_turn_off(pcie);
brcm_phy_stop(pcie);
- reset_control_assert(pcie->rescal);
+ reset_control_rearm(pcie->rescal);
clk_disable_unprepare(pcie->clk);
}
@@ -1278,13 +1285,13 @@ static int brcm_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->perst_reset);
}
- ret = reset_control_deassert(pcie->rescal);
+ ret = reset_control_reset(pcie->rescal);
if (ret)
dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
ret = brcm_phy_start(pcie);
if (ret) {
- reset_control_assert(pcie->rescal);
+ reset_control_rearm(pcie->rescal);
clk_disable_unprepare(pcie->clk);
return ret;
}
@@ -1296,6 +1303,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
+ ret = -ENODEV;
goto fail;
}
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 908475d27e0e..eede4e8f3f75 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -271,7 +271,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
NULL, NULL);
}
- return hwirq;
+ return 0;
}
static void iproc_msi_irq_domain_free(struct irq_domain *domain,
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
new file mode 100644
index 000000000000..3c5b97716d40
--- /dev/null
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -0,0 +1,1027 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek PCIe host controller driver.
+ *
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Jianjun Wang <jianjun.wang@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "../pci.h"
+
+#define PCIE_SETTING_REG 0x80
+#define PCIE_PCI_IDS_1 0x9c
+#define PCI_CLASS(class) (class << 8)
+#define PCIE_RC_MODE BIT(0)
+
+#define PCIE_CFGNUM_REG 0x140
+#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
+#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
+#define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16))
+#define PCIE_CFG_FORCE_BYTE_EN BIT(20)
+#define PCIE_CFG_OFFSET_ADDR 0x1000
+#define PCIE_CFG_HEADER(bus, devfn) \
+ (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
+
+#define PCIE_RST_CTRL_REG 0x148
+#define PCIE_MAC_RSTB BIT(0)
+#define PCIE_PHY_RSTB BIT(1)
+#define PCIE_BRG_RSTB BIT(2)
+#define PCIE_PE_RSTB BIT(3)
+
+#define PCIE_LTSSM_STATUS_REG 0x150
+#define PCIE_LTSSM_STATE_MASK GENMASK(28, 24)
+#define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24)
+#define PCIE_LTSSM_STATE_L2_IDLE 0x14
+
+#define PCIE_LINK_STATUS_REG 0x154
+#define PCIE_PORT_LINKUP BIT(8)
+
+#define PCIE_MSI_SET_NUM 8
+#define PCIE_MSI_IRQS_PER_SET 32
+#define PCIE_MSI_IRQS_NUM \
+ (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
+
+#define PCIE_INT_ENABLE_REG 0x180
+#define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
+#define PCIE_MSI_SHIFT 8
+#define PCIE_INTX_SHIFT 24
+#define PCIE_INTX_ENABLE \
+ GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
+
+#define PCIE_INT_STATUS_REG 0x184
+#define PCIE_MSI_SET_ENABLE_REG 0x190
+#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
+
+#define PCIE_MSI_SET_BASE_REG 0xc00
+#define PCIE_MSI_SET_OFFSET 0x10
+#define PCIE_MSI_SET_STATUS_OFFSET 0x04
+#define PCIE_MSI_SET_ENABLE_OFFSET 0x08
+
+#define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
+#define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
+
+#define PCIE_ICMD_PM_REG 0x198
+#define PCIE_TURN_OFF_LINK BIT(4)
+
+#define PCIE_TRANS_TABLE_BASE_REG 0x800
+#define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
+#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
+#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
+#define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
+#define PCIE_ATR_TLB_SET_OFFSET 0x20
+
+#define PCIE_MAX_TRANS_TABLES 8
+#define PCIE_ATR_EN BIT(0)
+#define PCIE_ATR_SIZE(size) \
+ (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
+#define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
+#define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
+#define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1)
+#define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16))
+#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
+#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
+
+/**
+ * struct mtk_msi_set - MSI information for each set
+ * @base: IO mapped register base
+ * @msg_addr: MSI message address
+ * @saved_irq_state: IRQ enable state saved at suspend time
+ */
+struct mtk_msi_set {
+ void __iomem *base;
+ phys_addr_t msg_addr;
+ u32 saved_irq_state;
+};
+
+/**
+ * struct mtk_pcie_port - PCIe port information
+ * @dev: pointer to PCIe device
+ * @base: IO mapped register base
+ * @reg_base: physical register base
+ * @mac_reset: MAC reset control
+ * @phy_reset: PHY reset control
+ * @phy: PHY controller block
+ * @clks: PCIe clocks
+ * @num_clks: PCIe clocks count for this port
+ * @irq: PCIe controller interrupt number
+ * @saved_irq_state: IRQ enable state saved at suspend time
+ * @irq_lock: lock protecting IRQ register access
+ * @intx_domain: legacy INTx IRQ domain
+ * @msi_domain: MSI IRQ domain
+ * @msi_bottom_domain: MSI IRQ bottom domain
+ * @msi_sets: MSI sets information
+ * @lock: lock protecting IRQ bit map
+ * @msi_irq_in_use: bit map for assigned MSI IRQ
+ */
+struct mtk_pcie_port {
+ struct device *dev;
+ void __iomem *base;
+ phys_addr_t reg_base;
+ struct reset_control *mac_reset;
+ struct reset_control *phy_reset;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ int num_clks;
+
+ int irq;
+ u32 saved_irq_state;
+ raw_spinlock_t irq_lock;
+ struct irq_domain *intx_domain;
+ struct irq_domain *msi_domain;
+ struct irq_domain *msi_bottom_domain;
+ struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
+ struct mutex lock;
+ DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
+};
+
+/**
+ * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
+ * @bus: PCI bus to query
+ * @devfn: device/function number
+ * @where: offset in config space
+ * @size: data size in TLP header
+ *
+ * Set byte enable field and device information in configuration TLP header.
+ */
+static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
+ int where, int size)
+{
+ struct mtk_pcie_port *port = bus->sysdata;
+ int bytes;
+ u32 val;
+
+ bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
+
+ val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
+ PCIE_CFG_HEADER(bus->number, devfn);
+
+ writel_relaxed(val, port->base + PCIE_CFGNUM_REG);
+}
+
+static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct mtk_pcie_port *port = bus->sysdata;
+
+ return port->base + PCIE_CFG_OFFSET_ADDR + where;
+}
+
+static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ mtk_pcie_config_tlp_header(bus, devfn, where, size);
+
+ return pci_generic_config_read32(bus, devfn, where, size, val);
+}
+
+static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ mtk_pcie_config_tlp_header(bus, devfn, where, size);
+
+ if (size <= 2)
+ val <<= (where & 0x3) * 8;
+
+ return pci_generic_config_write32(bus, devfn, where, 4, val);
+}
+
+static struct pci_ops mtk_pcie_ops = {
+ .map_bus = mtk_pcie_map_bus,
+ .read = mtk_pcie_config_read,
+ .write = mtk_pcie_config_write,
+};
+
+static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
+ resource_size_t cpu_addr,
+ resource_size_t pci_addr,
+ resource_size_t size,
+ unsigned long type, int num)
+{
+ void __iomem *table;
+ u32 val;
+
+ if (num >= PCIE_MAX_TRANS_TABLES) {
+ dev_err(port->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
+ (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
+ return -ENODEV;
+ }
+
+ table = port->base + PCIE_TRANS_TABLE_BASE_REG +
+ num * PCIE_ATR_TLB_SET_OFFSET;
+
+ writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
+ table);
+ writel_relaxed(upper_32_bits(cpu_addr),
+ table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
+ writel_relaxed(lower_32_bits(pci_addr),
+ table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
+ writel_relaxed(upper_32_bits(pci_addr),
+ table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
+
+ if (type == IORESOURCE_IO)
+ val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
+ else
+ val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
+
+ writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
+
+ return 0;
+}
+
+static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
+{
+ int i;
+ u32 val;
+
+ for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
+ struct mtk_msi_set *msi_set = &port->msi_sets[i];
+
+ msi_set->base = port->base + PCIE_MSI_SET_BASE_REG +
+ i * PCIE_MSI_SET_OFFSET;
+ msi_set->msg_addr = port->reg_base + PCIE_MSI_SET_BASE_REG +
+ i * PCIE_MSI_SET_OFFSET;
+
+ /* Configure the MSI capture address */
+ writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
+ writel_relaxed(upper_32_bits(msi_set->msg_addr),
+ port->base + PCIE_MSI_SET_ADDR_HI_BASE +
+ i * PCIE_MSI_SET_ADDR_HI_OFFSET);
+ }
+
+ val = readl_relaxed(port->base + PCIE_MSI_SET_ENABLE_REG);
+ val |= PCIE_MSI_SET_ENABLE;
+ writel_relaxed(val, port->base + PCIE_MSI_SET_ENABLE_REG);
+
+ val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ val |= PCIE_MSI_ENABLE;
+ writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
+}
+
+static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
+{
+ struct resource_entry *entry;
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
+ unsigned int table_index = 0;
+ int err;
+ u32 val;
+
+ /* Set as RC mode */
+ val = readl_relaxed(port->base + PCIE_SETTING_REG);
+ val |= PCIE_RC_MODE;
+ writel_relaxed(val, port->base + PCIE_SETTING_REG);
+
+ /* Set class code */
+ val = readl_relaxed(port->base + PCIE_PCI_IDS_1);
+ val &= ~GENMASK(31, 8);
+ val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8);
+ writel_relaxed(val, port->base + PCIE_PCI_IDS_1);
+
+ /* Mask all INTx interrupts */
+ val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ val &= ~PCIE_INTX_ENABLE;
+ writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
+
+ /* Assert all reset signals */
+ val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
+ writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
+
+ /*
+ * Described in PCIe CEM specification setctions 2.2 (PERST# Signal)
+ * and 2.2.1 (Initial Power-Up (G3 to S0)).
+ * The deassertion of PERST# should be delayed 100ms (TPVPERL)
+ * for the power and clock to become stable.
+ */
+ msleep(100);
+
+ /* De-assert reset signals */
+ val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
+ writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
+
+ /* Check if the link is up or not */
+ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_REG, val,
+ !!(val & PCIE_PORT_LINKUP), 20,
+ PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
+ if (err) {
+ val = readl_relaxed(port->base + PCIE_LTSSM_STATUS_REG);
+ dev_err(port->dev, "PCIe link down, ltssm reg val: %#x\n", val);
+ return err;
+ }
+
+ mtk_pcie_enable_msi(port);
+
+ /* Set PCIe translation windows */
+ resource_list_for_each_entry(entry, &host->windows) {
+ struct resource *res = entry->res;
+ unsigned long type = resource_type(res);
+ resource_size_t cpu_addr;
+ resource_size_t pci_addr;
+ resource_size_t size;
+ const char *range_type;
+
+ if (type == IORESOURCE_IO) {
+ cpu_addr = pci_pio_to_address(res->start);
+ range_type = "IO";
+ } else if (type == IORESOURCE_MEM) {
+ cpu_addr = res->start;
+ range_type = "MEM";
+ } else {
+ continue;
+ }
+
+ pci_addr = res->start - entry->offset;
+ size = resource_size(res);
+ err = mtk_pcie_set_trans_table(port, cpu_addr, pci_addr, size,
+ type, table_index);
+ if (err)
+ return err;
+
+ dev_dbg(port->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
+ range_type, table_index, (unsigned long long)cpu_addr,
+ (unsigned long long)pci_addr, (unsigned long long)size);
+
+ table_index++;
+ }
+
+ return 0;
+}
+
+static int mtk_pcie_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void mtk_pcie_msi_irq_mask(struct irq_data *data)
+{
+ pci_msi_mask_irq(data);
+ irq_chip_mask_parent(data);
+}
+
+static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
+{
+ pci_msi_unmask_irq(data);
+ irq_chip_unmask_parent(data);
+}
+
+static struct irq_chip mtk_msi_irq_chip = {
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = mtk_pcie_msi_irq_mask,
+ .irq_unmask = mtk_pcie_msi_irq_unmask,
+ .name = "MSI",
+};
+
+static struct msi_domain_info mtk_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &mtk_msi_irq_chip,
+};
+
+static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
+ struct mtk_pcie_port *port = data->domain->host_data;
+ unsigned long hwirq;
+
+ hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+ msg->address_hi = upper_32_bits(msi_set->msg_addr);
+ msg->address_lo = lower_32_bits(msi_set->msg_addr);
+ msg->data = hwirq;
+ dev_dbg(port->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
+ hwirq, msg->address_hi, msg->address_lo, msg->data);
+}
+
+static void mtk_msi_bottom_irq_ack(struct irq_data *data)
+{
+ struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
+ unsigned long hwirq;
+
+ hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+ writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
+}
+
+static void mtk_msi_bottom_irq_mask(struct irq_data *data)
+{
+ struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
+ struct mtk_pcie_port *port = data->domain->host_data;
+ unsigned long hwirq, flags;
+ u32 val;
+
+ hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+ raw_spin_lock_irqsave(&port->irq_lock, flags);
+ val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ val &= ~BIT(hwirq);
+ writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+}
+
+static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
+{
+ struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
+ struct mtk_pcie_port *port = data->domain->host_data;
+ unsigned long hwirq, flags;
+ u32 val;
+
+ hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
+
+ raw_spin_lock_irqsave(&port->irq_lock, flags);
+ val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ val |= BIT(hwirq);
+ writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+}
+
+static struct irq_chip mtk_msi_bottom_irq_chip = {
+ .irq_ack = mtk_msi_bottom_irq_ack,
+ .irq_mask = mtk_msi_bottom_irq_mask,
+ .irq_unmask = mtk_msi_bottom_irq_unmask,
+ .irq_compose_msi_msg = mtk_compose_msi_msg,
+ .irq_set_affinity = mtk_pcie_set_affinity,
+ .name = "MSI",
+};
+
+static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *arg)
+{
+ struct mtk_pcie_port *port = domain->host_data;
+ struct mtk_msi_set *msi_set;
+ int i, hwirq, set_idx;
+
+ mutex_lock(&port->lock);
+
+ hwirq = bitmap_find_free_region(port->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
+ order_base_2(nr_irqs));
+
+ mutex_unlock(&port->lock);
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
+ msi_set = &port->msi_sets[set_idx];
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &mtk_msi_bottom_irq_chip, msi_set,
+ handle_edge_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct mtk_pcie_port *port = domain->host_data;
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+
+ mutex_lock(&port->lock);
+
+ bitmap_release_region(port->msi_irq_in_use, data->hwirq,
+ order_base_2(nr_irqs));
+
+ mutex_unlock(&port->lock);
+
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
+ .alloc = mtk_msi_bottom_domain_alloc,
+ .free = mtk_msi_bottom_domain_free,
+};
+
+static void mtk_intx_mask(struct irq_data *data)
+{
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->irq_lock, flags);
+ val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
+ writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
+ raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+}
+
+static void mtk_intx_unmask(struct irq_data *data)
+{
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->irq_lock, flags);
+ val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
+ writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
+ raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+}
+
+/**
+ * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
+ * @data: pointer to chip specific data
+ *
+ * As an emulated level IRQ, its interrupt status will remain
+ * until the corresponding de-assert message is received; hence that
+ * the status can only be cleared when the interrupt has been serviced.
+ */
+static void mtk_intx_eoi(struct irq_data *data)
+{
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ unsigned long hwirq;
+
+ hwirq = data->hwirq + PCIE_INTX_SHIFT;
+ writel_relaxed(BIT(hwirq), port->base + PCIE_INT_STATUS_REG);
+}
+
+static struct irq_chip mtk_intx_irq_chip = {
+ .irq_mask = mtk_intx_mask,
+ .irq_unmask = mtk_intx_unmask,
+ .irq_eoi = mtk_intx_eoi,
+ .irq_set_affinity = mtk_pcie_set_affinity,
+ .name = "INTx",
+};
+
+static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
+ handle_fasteoi_irq, "INTx");
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = mtk_pcie_intx_map,
+};
+
+static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *intc_node, *node = dev->of_node;
+ int ret;
+
+ raw_spin_lock_init(&port->irq_lock);
+
+ /* Setup INTx */
+ intc_node = of_get_child_by_name(node, "interrupt-controller");
+ if (!intc_node) {
+ dev_err(dev, "missing interrupt-controller node\n");
+ return -ENODEV;
+ }
+
+ port->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, port);
+ if (!port->intx_domain) {
+ dev_err(dev, "failed to create INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ /* Setup MSI */
+ mutex_init(&port->lock);
+
+ port->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
+ &mtk_msi_bottom_domain_ops, port);
+ if (!port->msi_bottom_domain) {
+ dev_err(dev, "failed to create MSI bottom domain\n");
+ ret = -ENODEV;
+ goto err_msi_bottom_domain;
+ }
+
+ port->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
+ &mtk_msi_domain_info,
+ port->msi_bottom_domain);
+ if (!port->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ ret = -ENODEV;
+ goto err_msi_domain;
+ }
+
+ return 0;
+
+err_msi_domain:
+ irq_domain_remove(port->msi_bottom_domain);
+err_msi_bottom_domain:
+ irq_domain_remove(port->intx_domain);
+
+ return ret;
+}
+
+static void mtk_pcie_irq_teardown(struct mtk_pcie_port *port)
+{
+ irq_set_chained_handler_and_data(port->irq, NULL, NULL);
+
+ if (port->intx_domain)
+ irq_domain_remove(port->intx_domain);
+
+ if (port->msi_domain)
+ irq_domain_remove(port->msi_domain);
+
+ if (port->msi_bottom_domain)
+ irq_domain_remove(port->msi_bottom_domain);
+
+ irq_dispose_mapping(port->irq);
+}
+
+static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
+{
+ struct mtk_msi_set *msi_set = &port->msi_sets[set_idx];
+ unsigned long msi_enable, msi_status;
+ unsigned int virq;
+ irq_hw_number_t bit, hwirq;
+
+ msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+
+ do {
+ msi_status = readl_relaxed(msi_set->base +
+ PCIE_MSI_SET_STATUS_OFFSET);
+ msi_status &= msi_enable;
+ if (!msi_status)
+ break;
+
+ for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
+ hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
+ virq = irq_find_mapping(port->msi_bottom_domain, hwirq);
+ generic_handle_irq(virq);
+ }
+ } while (true);
+}
+
+static void mtk_pcie_irq_handler(struct irq_desc *desc)
+{
+ struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned long status;
+ unsigned int virq;
+ irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
+
+ chained_irq_enter(irqchip, desc);
+
+ status = readl_relaxed(port->base + PCIE_INT_STATUS_REG);
+ for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
+ PCIE_INTX_SHIFT) {
+ virq = irq_find_mapping(port->intx_domain,
+ irq_bit - PCIE_INTX_SHIFT);
+ generic_handle_irq(virq);
+ }
+
+ irq_bit = PCIE_MSI_SHIFT;
+ for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
+ PCIE_MSI_SHIFT) {
+ mtk_pcie_msi_handler(port, irq_bit - PCIE_MSI_SHIFT);
+
+ writel_relaxed(BIT(irq_bit), port->base + PCIE_INT_STATUS_REG);
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int mtk_pcie_setup_irq(struct mtk_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int err;
+
+ err = mtk_pcie_init_irq_domains(port);
+ if (err)
+ return err;
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return port->irq;
+
+ irq_set_chained_handler_and_data(port->irq, mtk_pcie_irq_handler, port);
+
+ return 0;
+}
+
+static int mtk_pcie_parse_port(struct mtk_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *regs;
+ int ret;
+
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
+ if (!regs)
+ return -EINVAL;
+ port->base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(port->base)) {
+ dev_err(dev, "failed to map register base\n");
+ return PTR_ERR(port->base);
+ }
+
+ port->reg_base = regs->start;
+
+ port->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
+ if (IS_ERR(port->phy_reset)) {
+ ret = PTR_ERR(port->phy_reset);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get PHY reset\n");
+
+ return ret;
+ }
+
+ port->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
+ if (IS_ERR(port->mac_reset)) {
+ ret = PTR_ERR(port->mac_reset);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get MAC reset\n");
+
+ return ret;
+ }
+
+ port->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(port->phy)) {
+ ret = PTR_ERR(port->phy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get PHY\n");
+
+ return ret;
+ }
+
+ port->num_clks = devm_clk_bulk_get_all(dev, &port->clks);
+ if (port->num_clks < 0) {
+ dev_err(dev, "failed to get clocks\n");
+ return port->num_clks;
+ }
+
+ return 0;
+}
+
+static int mtk_pcie_power_up(struct mtk_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ int err;
+
+ /* PHY power on and enable pipe clock */
+ reset_control_deassert(port->phy_reset);
+
+ err = phy_init(port->phy);
+ if (err) {
+ dev_err(dev, "failed to initialize PHY\n");
+ goto err_phy_init;
+ }
+
+ err = phy_power_on(port->phy);
+ if (err) {
+ dev_err(dev, "failed to power on PHY\n");
+ goto err_phy_on;
+ }
+
+ /* MAC power on and enable transaction layer clocks */
+ reset_control_deassert(port->mac_reset);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ err = clk_bulk_prepare_enable(port->num_clks, port->clks);
+ if (err) {
+ dev_err(dev, "failed to enable clocks\n");
+ goto err_clk_init;
+ }
+
+ return 0;
+
+err_clk_init:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ reset_control_assert(port->mac_reset);
+ phy_power_off(port->phy);
+err_phy_on:
+ phy_exit(port->phy);
+err_phy_init:
+ reset_control_assert(port->phy_reset);
+
+ return err;
+}
+
+static void mtk_pcie_power_down(struct mtk_pcie_port *port)
+{
+ clk_bulk_disable_unprepare(port->num_clks, port->clks);
+
+ pm_runtime_put_sync(port->dev);
+ pm_runtime_disable(port->dev);
+ reset_control_assert(port->mac_reset);
+
+ phy_power_off(port->phy);
+ phy_exit(port->phy);
+ reset_control_assert(port->phy_reset);
+}
+
+static int mtk_pcie_setup(struct mtk_pcie_port *port)
+{
+ int err;
+
+ err = mtk_pcie_parse_port(port);
+ if (err)
+ return err;
+
+ /* Don't touch the hardware registers before power up */
+ err = mtk_pcie_power_up(port);
+ if (err)
+ return err;
+
+ /* Try link up */
+ err = mtk_pcie_startup_port(port);
+ if (err)
+ goto err_setup;
+
+ err = mtk_pcie_setup_irq(port);
+ if (err)
+ goto err_setup;
+
+ return 0;
+
+err_setup:
+ mtk_pcie_power_down(port);
+
+ return err;
+}
+
+static int mtk_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_pcie_port *port;
+ struct pci_host_bridge *host;
+ int err;
+
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ if (!host)
+ return -ENOMEM;
+
+ port = pci_host_bridge_priv(host);
+
+ port->dev = dev;
+ platform_set_drvdata(pdev, port);
+
+ err = mtk_pcie_setup(port);
+ if (err)
+ return err;
+
+ host->ops = &mtk_pcie_ops;
+ host->sysdata = port;
+
+ err = pci_host_probe(host);
+ if (err) {
+ mtk_pcie_irq_teardown(port);
+ mtk_pcie_power_down(port);
+ return err;
+ }
+
+ return 0;
+}
+
+static int mtk_pcie_remove(struct platform_device *pdev)
+{
+ struct mtk_pcie_port *port = platform_get_drvdata(pdev);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
+
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(host->bus);
+ pci_remove_root_bus(host->bus);
+ pci_unlock_rescan_remove();
+
+ mtk_pcie_irq_teardown(port);
+ mtk_pcie_power_down(port);
+
+ return 0;
+}
+
+static void __maybe_unused mtk_pcie_irq_save(struct mtk_pcie_port *port)
+{
+ int i;
+
+ raw_spin_lock(&port->irq_lock);
+
+ port->saved_irq_state = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+
+ for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
+ struct mtk_msi_set *msi_set = &port->msi_sets[i];
+
+ msi_set->saved_irq_state = readl_relaxed(msi_set->base +
+ PCIE_MSI_SET_ENABLE_OFFSET);
+ }
+
+ raw_spin_unlock(&port->irq_lock);
+}
+
+static void __maybe_unused mtk_pcie_irq_restore(struct mtk_pcie_port *port)
+{
+ int i;
+
+ raw_spin_lock(&port->irq_lock);
+
+ writel_relaxed(port->saved_irq_state, port->base + PCIE_INT_ENABLE_REG);
+
+ for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
+ struct mtk_msi_set *msi_set = &port->msi_sets[i];
+
+ writel_relaxed(msi_set->saved_irq_state,
+ msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
+ }
+
+ raw_spin_unlock(&port->irq_lock);
+}
+
+static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port)
+{
+ u32 val;
+
+ val = readl_relaxed(port->base + PCIE_ICMD_PM_REG);
+ val |= PCIE_TURN_OFF_LINK;
+ writel_relaxed(val, port->base + PCIE_ICMD_PM_REG);
+
+ /* Check the link is L2 */
+ return readl_poll_timeout(port->base + PCIE_LTSSM_STATUS_REG, val,
+ (PCIE_LTSSM_STATE(val) ==
+ PCIE_LTSSM_STATE_L2_IDLE), 20,
+ 50 * USEC_PER_MSEC);
+}
+
+static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
+{
+ struct mtk_pcie_port *port = dev_get_drvdata(dev);
+ int err;
+ u32 val;
+
+ /* Trigger link to L2 state */
+ err = mtk_pcie_turn_off_link(port);
+ if (err) {
+ dev_err(port->dev, "cannot enter L2 state\n");
+ return err;
+ }
+
+ /* Pull down the PERST# pin */
+ val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_PE_RSTB;
+ writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
+
+ dev_dbg(port->dev, "entered L2 states successfully");
+
+ mtk_pcie_irq_save(port);
+ mtk_pcie_power_down(port);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
+{
+ struct mtk_pcie_port *port = dev_get_drvdata(dev);
+ int err;
+
+ err = mtk_pcie_power_up(port);
+ if (err)
+ return err;
+
+ err = mtk_pcie_startup_port(port);
+ if (err) {
+ mtk_pcie_power_down(port);
+ return err;
+ }
+
+ mtk_pcie_irq_restore(port);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mtk_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+ mtk_pcie_resume_noirq)
+};
+
+static const struct of_device_id mtk_pcie_of_match[] = {
+ { .compatible = "mediatek,mt8192-pcie" },
+ {},
+};
+
+static struct platform_driver mtk_pcie_driver = {
+ .probe = mtk_pcie_probe,
+ .remove = mtk_pcie_remove,
+ .driver = {
+ .name = "mtk-pcie",
+ .of_match_table = mtk_pcie_of_match,
+ .pm = &mtk_pcie_pm_ops,
+ },
+};
+
+module_platform_driver(mtk_pcie_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 23548b517e4b..62a042e75d9a 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -143,6 +143,7 @@ struct mtk_pcie_port;
* struct mtk_pcie_soc - differentiate between host generations
* @need_fix_class_id: whether this host's class ID needed to be fixed or not
* @need_fix_device_id: whether this host's device ID needed to be fixed or not
+ * @no_msi: Bridge has no MSI support, and relies on an external block
* @device_id: device ID which this host need to be fixed
* @ops: pointer to configuration access functions
* @startup: pointer to controller setting functions
@@ -151,6 +152,7 @@ struct mtk_pcie_port;
struct mtk_pcie_soc {
bool need_fix_class_id;
bool need_fix_device_id;
+ bool no_msi;
unsigned int device_id;
struct pci_ops *ops;
int (*startup)(struct mtk_pcie_port *port);
@@ -760,7 +762,7 @@ static struct pci_ops mtk_pcie_ops = {
static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
{
struct mtk_pcie *pcie = port->pcie;
- u32 func = PCI_FUNC(port->slot << 3);
+ u32 func = PCI_FUNC(port->slot);
u32 slot = PCI_SLOT(port->slot << 3);
u32 val;
int err;
@@ -1087,6 +1089,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
host->ops = pcie->soc->ops;
host->sysdata = pcie;
+ host->msi_domain = pcie->soc->no_msi;
err = pci_host_probe(host);
if (err)
@@ -1176,6 +1179,7 @@ static const struct dev_pm_ops mtk_pcie_pm_ops = {
};
static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
+ .no_msi = true,
.ops = &mtk_pcie_ops,
.startup = mtk_pcie_startup_port,
};
@@ -1210,6 +1214,7 @@ static const struct of_device_id mtk_pcie_ids[] = {
{ .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 },
{},
};
+MODULE_DEVICE_TABLE(of, mtk_pcie_ids);
static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index 04c19ff81aff..89c68c56d93b 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -301,27 +301,27 @@ static const struct cause event_cause[NUM_EVENTS] = {
LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
};
-struct event_map pcie_event_to_event[] = {
+static struct event_map pcie_event_to_event[] = {
PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
};
-struct event_map sec_error_to_event[] = {
+static struct event_map sec_error_to_event[] = {
SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
};
-struct event_map ded_error_to_event[] = {
+static struct event_map ded_error_to_event[] = {
DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
};
-struct event_map local_status_to_event[] = {
+static struct event_map local_status_to_event[] = {
LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
@@ -1023,10 +1023,8 @@ static int mc_platform_init(struct pci_config_window *cfg)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "unable to request IRQ%d\n", irq);
+ if (irq < 0)
return -ENODEV;
- }
for (i = 0; i < NUM_EVENTS; i++) {
event_irq = irq_create_mapping(port->event_domain, i);
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index a728e8f9ad3c..765cf2b45e24 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -35,18 +35,12 @@
struct rcar_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
- struct msi_controller chip;
- unsigned long pages;
- struct mutex lock;
+ struct mutex map_lock;
+ spinlock_t mask_lock;
int irq1;
int irq2;
};
-static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
-{
- return container_of(chip, struct rcar_msi, chip);
-}
-
/* Structure representing the PCIe interface */
struct rcar_pcie_host {
struct rcar_pcie pcie;
@@ -56,6 +50,11 @@ struct rcar_pcie_host {
int (*phy_init_fn)(struct rcar_pcie_host *host);
};
+static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi)
+{
+ return container_of(msi, struct rcar_pcie_host, msi);
+}
+
static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
{
unsigned int shift = BITS_PER_BYTE * (where & 3);
@@ -292,8 +291,6 @@ static int rcar_pcie_enable(struct rcar_pcie_host *host)
bridge->sysdata = host;
bridge->ops = &rcar_pcie_ops;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- bridge->msi = &host->msi.chip;
return pci_host_probe(bridge);
}
@@ -473,42 +470,6 @@ static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
return err;
}
-static int rcar_msi_alloc(struct rcar_msi *chip)
-{
- int msi;
-
- mutex_lock(&chip->lock);
-
- msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
- if (msi < INT_PCI_MSI_NR)
- set_bit(msi, chip->used);
- else
- msi = -ENOSPC;
-
- mutex_unlock(&chip->lock);
-
- return msi;
-}
-
-static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
-{
- int msi;
-
- mutex_lock(&chip->lock);
- msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
- order_base_2(no_irqs));
- mutex_unlock(&chip->lock);
-
- return msi;
-}
-
-static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
-{
- mutex_lock(&chip->lock);
- clear_bit(irq, chip->used);
- mutex_unlock(&chip->lock);
-}
-
static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
{
struct rcar_pcie_host *host = data;
@@ -527,18 +488,13 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
unsigned int index = find_first_bit(&reg, 32);
unsigned int msi_irq;
- /* clear the interrupt */
- rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
-
- msi_irq = irq_find_mapping(msi->domain, index);
+ msi_irq = irq_find_mapping(msi->domain->parent, index);
if (msi_irq) {
- if (test_bit(index, msi->used))
- generic_handle_irq(msi_irq);
- else
- dev_info(dev, "unhandled MSI\n");
+ generic_handle_irq(msi_irq);
} else {
/* Unknown MSI, just clear it */
dev_dbg(dev, "unexpected MSI\n");
+ rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR);
}
/* see if there's any more pending in this vector */
@@ -548,149 +504,169 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
- struct msi_desc *desc)
+static void rcar_msi_top_irq_ack(struct irq_data *d)
{
- struct rcar_msi *msi = to_rcar_msi(chip);
- struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
- msi.chip);
- struct rcar_pcie *pcie = &host->pcie;
- struct msi_msg msg;
- unsigned int irq;
- int hwirq;
+ irq_chip_ack_parent(d);
+}
- hwirq = rcar_msi_alloc(msi);
- if (hwirq < 0)
- return hwirq;
+static void rcar_msi_top_irq_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
- irq = irq_find_mapping(msi->domain, hwirq);
- if (!irq) {
- rcar_msi_free(msi, hwirq);
- return -EINVAL;
- }
+static void rcar_msi_top_irq_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
- irq_set_msi_desc(irq, desc);
+static struct irq_chip rcar_msi_top_chip = {
+ .name = "PCIe MSI",
+ .irq_ack = rcar_msi_top_irq_ack,
+ .irq_mask = rcar_msi_top_irq_mask,
+ .irq_unmask = rcar_msi_top_irq_unmask,
+};
- msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
- msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
- msg.data = hwirq;
+static void rcar_msi_irq_ack(struct irq_data *d)
+{
+ struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- pci_write_msi_msg(irq, &msg);
+ /* clear the interrupt */
+ rcar_pci_write_reg(pcie, BIT(d->hwirq), PCIEMSIFR);
+}
- return 0;
+static void rcar_msi_irq_mask(struct irq_data *d)
+{
+ struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&msi->mask_lock, flags);
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value &= ~BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ spin_unlock_irqrestore(&msi->mask_lock, flags);
}
-static int rcar_msi_setup_irqs(struct msi_controller *chip,
- struct pci_dev *pdev, int nvec, int type)
+static void rcar_msi_irq_unmask(struct irq_data *d)
{
- struct rcar_msi *msi = to_rcar_msi(chip);
- struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
- msi.chip);
- struct rcar_pcie *pcie = &host->pcie;
- struct msi_desc *desc;
- struct msi_msg msg;
- unsigned int irq;
- int hwirq;
- int i;
+ struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&msi->mask_lock, flags);
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value |= BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ spin_unlock_irqrestore(&msi->mask_lock, flags);
+}
- /* MSI-X interrupts are not supported */
- if (type == PCI_CAP_ID_MSIX)
- return -EINVAL;
+static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
- WARN_ON(!list_is_singular(&pdev->dev.msi_list));
- desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
+static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- hwirq = rcar_msi_alloc_region(msi, nvec);
- if (hwirq < 0)
- return -ENOSPC;
+ msg->address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+ msg->address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
+ msg->data = data->hwirq;
+}
- irq = irq_find_mapping(msi->domain, hwirq);
- if (!irq)
- return -ENOSPC;
+static struct irq_chip rcar_msi_bottom_chip = {
+ .name = "Rcar MSI",
+ .irq_ack = rcar_msi_irq_ack,
+ .irq_mask = rcar_msi_irq_mask,
+ .irq_unmask = rcar_msi_irq_unmask,
+ .irq_set_affinity = rcar_msi_set_affinity,
+ .irq_compose_msi_msg = rcar_compose_msi_msg,
+};
- for (i = 0; i < nvec; i++) {
- /*
- * irq_create_mapping() called from rcar_pcie_probe() pre-
- * allocates descs, so there is no need to allocate descs here.
- * We can therefore assume that if irq_find_mapping() above
- * returns non-zero, then the descs are also successfully
- * allocated.
- */
- if (irq_set_msi_desc_off(irq, i, desc)) {
- /* TODO: clear */
- return -EINVAL;
- }
- }
+static int rcar_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct rcar_msi *msi = domain->host_data;
+ unsigned int i;
+ int hwirq;
- desc->nvec_used = nvec;
- desc->msi_attrib.multiple = order_base_2(nvec);
+ mutex_lock(&msi->map_lock);
- msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
- msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
- msg.data = hwirq;
+ hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
- pci_write_msi_msg(irq, &msg);
+ mutex_unlock(&msi->map_lock);
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &rcar_msi_bottom_chip, domain->host_data,
+ handle_edge_irq, NULL, NULL);
return 0;
}
-static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+static void rcar_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
{
- struct rcar_msi *msi = to_rcar_msi(chip);
- struct irq_data *d = irq_get_irq_data(irq);
-
- rcar_msi_free(msi, d->hwirq);
-}
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct rcar_msi *msi = domain->host_data;
-static struct irq_chip rcar_msi_irq_chip = {
- .name = "R-Car PCIe MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+ mutex_lock(&msi->map_lock);
-static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
+ bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
- return 0;
+ mutex_unlock(&msi->map_lock);
}
-static const struct irq_domain_ops msi_domain_ops = {
- .map = rcar_msi_map,
+static const struct irq_domain_ops rcar_msi_domain_ops = {
+ .alloc = rcar_msi_domain_alloc,
+ .free = rcar_msi_domain_free,
+};
+
+static struct msi_domain_info rcar_msi_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &rcar_msi_top_chip,
};
-static void rcar_pcie_unmap_msi(struct rcar_pcie_host *host)
+static int rcar_allocate_domains(struct rcar_msi *msi)
{
- struct rcar_msi *msi = &host->msi;
- int i, irq;
+ struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+ struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain *parent;
+
+ parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
+ &rcar_msi_domain_ops, msi);
+ if (!parent) {
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
- for (i = 0; i < INT_PCI_MSI_NR; i++) {
- irq = irq_find_mapping(msi->domain, i);
- if (irq > 0)
- irq_dispose_mapping(irq);
+ msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
+ if (!msi->domain) {
+ dev_err(pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(parent);
+ return -ENOMEM;
}
- irq_domain_remove(msi->domain);
+ return 0;
}
-static void rcar_pcie_hw_enable_msi(struct rcar_pcie_host *host)
+static void rcar_free_domains(struct rcar_msi *msi)
{
- struct rcar_pcie *pcie = &host->pcie;
- struct rcar_msi *msi = &host->msi;
- unsigned long base;
-
- /* setup MSI data target */
- base = virt_to_phys((void *)msi->pages);
+ struct irq_domain *parent = msi->domain->parent;
- rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
- rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
-
- /* enable all MSI interrupts */
- rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
+ irq_domain_remove(msi->domain);
+ irq_domain_remove(parent);
}
static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
@@ -698,29 +674,24 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
struct rcar_pcie *pcie = &host->pcie;
struct device *dev = pcie->dev;
struct rcar_msi *msi = &host->msi;
- int err, i;
-
- mutex_init(&msi->lock);
+ struct resource res;
+ int err;
- msi->chip.dev = dev;
- msi->chip.setup_irq = rcar_msi_setup_irq;
- msi->chip.setup_irqs = rcar_msi_setup_irqs;
- msi->chip.teardown_irq = rcar_msi_teardown_irq;
+ mutex_init(&msi->map_lock);
+ spin_lock_init(&msi->mask_lock);
- msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
- &msi_domain_ops, &msi->chip);
- if (!msi->domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
+ err = of_address_to_resource(dev->of_node, 0, &res);
+ if (err)
+ return err;
- for (i = 0; i < INT_PCI_MSI_NR; i++)
- irq_create_mapping(msi->domain, i);
+ err = rcar_allocate_domains(msi);
+ if (err)
+ return err;
/* Two irqs are for MSI, but they are also used for non-MSI irqs */
err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
- rcar_msi_irq_chip.name, host);
+ rcar_msi_bottom_chip.name, host);
if (err < 0) {
dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
@@ -728,27 +699,32 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
- rcar_msi_irq_chip.name, host);
+ rcar_msi_bottom_chip.name, host);
if (err < 0) {
dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
- /* setup MSI data target */
- msi->pages = __get_free_pages(GFP_KERNEL | GFP_DMA32, 0);
- rcar_pcie_hw_enable_msi(host);
+ /* disable all MSIs */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
+
+ /*
+ * Setup MSI data target using RC base address address, which
+ * is guaranteed to be in the low 32bit range on any RCar HW.
+ */
+ rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
+ rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
return 0;
err:
- rcar_pcie_unmap_msi(host);
+ rcar_free_domains(msi);
return err;
}
static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
{
struct rcar_pcie *pcie = &host->pcie;
- struct rcar_msi *msi = &host->msi;
/* Disable all MSI interrupts */
rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
@@ -756,9 +732,7 @@ static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
/* Disable address decoding of the MSI interrupt, MSIFE */
rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
- free_pages(msi->pages, 0);
-
- rcar_pcie_unmap_msi(host);
+ rcar_free_domains(&host->msi);
}
static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
@@ -1011,8 +985,17 @@ static int __maybe_unused rcar_pcie_resume(struct device *dev)
dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
/* Enable MSI */
- if (IS_ENABLED(CONFIG_PCI_MSI))
- rcar_pcie_hw_enable_msi(host);
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ struct resource res;
+ u32 val;
+
+ of_address_to_resource(dev->of_node, 0, &res);
+ rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
+ rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
+
+ bitmap_to_arr32(&val, host->msi.used, INT_PCI_MSI_NR);
+ rcar_pci_write_reg(pcie, val, PCIEMSIIER);
+ }
rcar_pcie_hw_enable(host);
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 07e36661bbc2..8689311c5ef6 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -26,6 +26,7 @@
/* Bridge core config registers */
#define BRCFG_PCIE_RX0 0x00000000
+#define BRCFG_PCIE_RX1 0x00000004
#define BRCFG_INTERRUPT 0x00000010
#define BRCFG_PCIE_RX_MSG_FILTER 0x00000020
@@ -128,6 +129,7 @@
#define NWL_ECAM_VALUE_DEFAULT 12
#define CFG_DMA_REG_BAR GENMASK(2, 0)
+#define CFG_PCIE_CACHE GENMASK(7, 0)
#define INT_PCI_MSI_NR (2 * 32)
@@ -675,6 +677,11 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK,
BRCFG_PCIE_RX_MSG_FILTER);
+ /* This routes the PCIe DMA traffic to go through CCI path */
+ if (of_dma_is_coherent(dev->of_node))
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX1) |
+ CFG_PCIE_CACHE, BRCFG_PCIE_RX1);
+
err = nwl_wait_for_link(pcie);
if (err)
return err;
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index fa5baeb82653..14001febf59a 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -93,25 +93,23 @@
/**
* struct xilinx_pcie_port - PCIe port information
* @reg_base: IO Mapped Register Base
- * @irq: Interrupt number
- * @msi_pages: MSI pages
* @dev: Device pointer
+ * @msi_map: Bitmap of allocated MSIs
+ * @map_lock: Mutex protecting the MSI allocation
* @msi_domain: MSI IRQ domain pointer
* @leg_domain: Legacy IRQ domain pointer
* @resources: Bus Resources
*/
struct xilinx_pcie_port {
void __iomem *reg_base;
- u32 irq;
- unsigned long msi_pages;
struct device *dev;
+ unsigned long msi_map[BITS_TO_LONGS(XILINX_NUM_MSI_IRQS)];
+ struct mutex map_lock;
struct irq_domain *msi_domain;
struct irq_domain *leg_domain;
struct list_head resources;
};
-static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
-
static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
{
return readl(port->reg_base + reg);
@@ -196,151 +194,118 @@ static struct pci_ops xilinx_pcie_ops = {
/* MSI functions */
-/**
- * xilinx_pcie_destroy_msi - Free MSI number
- * @irq: IRQ to be freed
- */
-static void xilinx_pcie_destroy_msi(unsigned int irq)
+static void xilinx_msi_top_irq_ack(struct irq_data *d)
{
- struct msi_desc *msi;
- struct xilinx_pcie_port *port;
- struct irq_data *d = irq_get_irq_data(irq);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
-
- if (!test_bit(hwirq, msi_irq_in_use)) {
- msi = irq_get_msi_desc(irq);
- port = msi_desc_to_pci_sysdata(msi);
- dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
- } else {
- clear_bit(hwirq, msi_irq_in_use);
- }
+ /*
+ * xilinx_pcie_intr_handler() will have performed the Ack.
+ * Eventually, this should be fixed and the Ack be moved in
+ * the respective callbacks for INTx and MSI.
+ */
}
-/**
- * xilinx_pcie_assign_msi - Allocate MSI number
- *
- * Return: A valid IRQ on success and error value on failure.
- */
-static int xilinx_pcie_assign_msi(void)
-{
- int pos;
-
- pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
- if (pos < XILINX_NUM_MSI_IRQS)
- set_bit(pos, msi_irq_in_use);
- else
- return -ENOSPC;
+static struct irq_chip xilinx_msi_top_chip = {
+ .name = "PCIe MSI",
+ .irq_ack = xilinx_msi_top_irq_ack,
+};
- return pos;
+static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
}
-/**
- * xilinx_msi_teardown_irq - Destroy the MSI
- * @chip: MSI Chip descriptor
- * @irq: MSI IRQ to destroy
- */
-static void xilinx_msi_teardown_irq(struct msi_controller *chip,
- unsigned int irq)
+static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- xilinx_pcie_destroy_msi(irq);
- irq_dispose_mapping(irq);
+ struct xilinx_pcie_port *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t pa = ALIGN_DOWN(virt_to_phys(pcie), SZ_4K);
+
+ msg->address_lo = lower_32_bits(pa);
+ msg->address_hi = upper_32_bits(pa);
+ msg->data = data->hwirq;
}
-/**
- * xilinx_pcie_msi_setup_irq - Setup MSI request
- * @chip: MSI chip pointer
- * @pdev: PCIe device pointer
- * @desc: MSI descriptor pointer
- *
- * Return: '0' on success and error value on failure
- */
-static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip,
- struct pci_dev *pdev,
- struct msi_desc *desc)
-{
- struct xilinx_pcie_port *port = pdev->bus->sysdata;
- unsigned int irq;
- int hwirq;
- struct msi_msg msg;
- phys_addr_t msg_addr;
+static struct irq_chip xilinx_msi_bottom_chip = {
+ .name = "Xilinx MSI",
+ .irq_set_affinity = xilinx_msi_set_affinity,
+ .irq_compose_msi_msg = xilinx_compose_msi_msg,
+};
- hwirq = xilinx_pcie_assign_msi();
- if (hwirq < 0)
- return hwirq;
+static int xilinx_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct xilinx_pcie_port *port = domain->host_data;
+ int hwirq, i;
- irq = irq_create_mapping(port->msi_domain, hwirq);
- if (!irq)
- return -EINVAL;
+ mutex_lock(&port->map_lock);
- irq_set_msi_desc(irq, desc);
+ hwirq = bitmap_find_free_region(port->msi_map, XILINX_NUM_MSI_IRQS, order_base_2(nr_irqs));
- msg_addr = virt_to_phys((void *)port->msi_pages);
+ mutex_unlock(&port->map_lock);
- msg.address_hi = 0;
- msg.address_lo = msg_addr;
- msg.data = irq;
+ if (hwirq < 0)
+ return -ENOSPC;
- pci_write_msi_msg(irq, &msg);
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &xilinx_msi_bottom_chip, domain->host_data,
+ handle_edge_irq, NULL, NULL);
return 0;
}
-/* MSI Chip Descriptor */
-static struct msi_controller xilinx_pcie_msi_chip = {
- .setup_irq = xilinx_pcie_msi_setup_irq,
- .teardown_irq = xilinx_msi_teardown_irq,
-};
+static void xilinx_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct xilinx_pcie_port *port = domain->host_data;
-/* HW Interrupt Chip Descriptor */
-static struct irq_chip xilinx_msi_irq_chip = {
- .name = "Xilinx PCIe MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+ mutex_lock(&port->map_lock);
-/**
- * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid
- * @domain: IRQ domain
- * @irq: Virtual IRQ number
- * @hwirq: HW interrupt number
- *
- * Return: Always returns 0.
- */
-static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
+ bitmap_release_region(port->msi_map, d->hwirq, order_base_2(nr_irqs));
- return 0;
+ mutex_unlock(&port->map_lock);
}
-/* IRQ Domain operations */
-static const struct irq_domain_ops msi_domain_ops = {
- .map = xilinx_pcie_msi_map,
+static const struct irq_domain_ops xilinx_msi_domain_ops = {
+ .alloc = xilinx_msi_domain_alloc,
+ .free = xilinx_msi_domain_free,
};
-/**
- * xilinx_pcie_enable_msi - Enable MSI support
- * @port: PCIe port information
- */
-static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
+static struct msi_domain_info xilinx_msi_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .chip = &xilinx_msi_top_chip,
+};
+
+static int xilinx_allocate_msi_domains(struct xilinx_pcie_port *pcie)
{
- phys_addr_t msg_addr;
+ struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain *parent;
- port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
- if (!port->msi_pages)
+ parent = irq_domain_create_linear(fwnode, XILINX_NUM_MSI_IRQS,
+ &xilinx_msi_domain_ops, pcie);
+ if (!parent) {
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
+ }
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
- msg_addr = virt_to_phys((void *)port->msi_pages);
- pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
- pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
+ pcie->msi_domain = pci_msi_create_irq_domain(fwnode, &xilinx_msi_info, parent);
+ if (!pcie->msi_domain) {
+ dev_err(pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(parent);
+ return -ENOMEM;
+ }
return 0;
}
+static void xilinx_free_msi_domains(struct xilinx_pcie_port *pcie)
+{
+ struct irq_domain *parent = pcie->msi_domain->parent;
+
+ irq_domain_remove(pcie->msi_domain);
+ irq_domain_remove(parent);
+}
+
/* INTx Functions */
/**
@@ -420,6 +385,8 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
}
if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) {
+ unsigned int irq;
+
val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
/* Check whether interrupt valid */
@@ -432,20 +399,19 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
XILINX_PCIE_RPIFR2_MSG_DATA;
+ irq = irq_find_mapping(port->msi_domain->parent, val);
} else {
val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT;
- val = irq_find_mapping(port->leg_domain, val);
+ irq = irq_find_mapping(port->leg_domain, val);
}
/* Clear interrupt FIFO register 1 */
pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
XILINX_PCIE_REG_RPIFR1);
- /* Handle the interrupt */
- if (IS_ENABLED(CONFIG_PCI_MSI) ||
- !(val & XILINX_PCIE_RPIFR1_MSI_INTR))
- generic_handle_irq(val);
+ if (irq)
+ generic_handle_irq(irq);
}
if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
@@ -491,12 +457,11 @@ error:
static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
{
struct device *dev = port->dev;
- struct device_node *node = dev->of_node;
struct device_node *pcie_intc_node;
int ret;
/* Setup INTx */
- pcie_intc_node = of_get_next_child(node, NULL);
+ pcie_intc_node = of_get_next_child(dev->of_node, NULL);
if (!pcie_intc_node) {
dev_err(dev, "No PCIe Intc node found\n");
return -ENODEV;
@@ -513,18 +478,14 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
/* Setup MSI */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- port->msi_domain = irq_domain_add_linear(node,
- XILINX_NUM_MSI_IRQS,
- &msi_domain_ops,
- &xilinx_pcie_msi_chip);
- if (!port->msi_domain) {
- dev_err(dev, "Failed to get a MSI IRQ domain\n");
- return -ENODEV;
- }
+ phys_addr_t pa = ALIGN_DOWN(virt_to_phys(port), SZ_4K);
- ret = xilinx_pcie_enable_msi(port);
+ ret = xilinx_allocate_msi_domains(port);
if (ret)
return ret;
+
+ pcie_write(port, upper_32_bits(pa), XILINX_PCIE_REG_MSIBASE1);
+ pcie_write(port, lower_32_bits(pa), XILINX_PCIE_REG_MSIBASE2);
}
return 0;
@@ -572,6 +533,7 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
struct resource regs;
+ unsigned int irq;
int err;
err = of_address_to_resource(node, 0, &regs);
@@ -584,12 +546,12 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
if (IS_ERR(port->reg_base))
return PTR_ERR(port->reg_base);
- port->irq = irq_of_parse_and_map(node, 0);
- err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
+ irq = irq_of_parse_and_map(node, 0);
+ err = devm_request_irq(dev, irq, xilinx_pcie_intr_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"xilinx-pcie", port);
if (err) {
- dev_err(dev, "unable to request irq %d\n", port->irq);
+ dev_err(dev, "unable to request irq %d\n", irq);
return err;
}
@@ -617,7 +579,7 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
return -ENODEV;
port = pci_host_bridge_priv(bridge);
-
+ mutex_init(&port->map_lock);
port->dev = dev;
err = xilinx_pcie_parse_dt(port);
@@ -637,11 +599,11 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
bridge->sysdata = port;
bridge->ops = &xilinx_pcie_ops;
-#ifdef CONFIG_PCI_MSI
- xilinx_pcie_msi_chip.dev = dev;
- bridge->msi = &xilinx_pcie_msi_chip;
-#endif
- return pci_host_probe(bridge);
+ err = pci_host_probe(bridge);
+ if (err)
+ xilinx_free_msi_domains(port);
+
+ return err;
}
static const struct of_device_id xilinx_pcie_of_match[] = {
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 5e80f28f0119..e3fcdfec58b3 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -28,6 +28,7 @@
#define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1)
#define PCI_REG_VMCONFIG 0x44
#define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3)
+#define VMCONFIG_MSI_REMAP 0x2
#define PCI_REG_VMLOCK 0x70
#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
@@ -59,6 +60,13 @@ enum vmd_features {
* be used for MSI remapping
*/
VMD_FEAT_OFFSET_FIRST_VECTOR = (1 << 3),
+
+ /*
+ * Device can bypass remapping MSI-X transactions into its MSI-X table,
+ * avoiding the requirement of a VMD MSI domain for child device
+ * interrupt handling.
+ */
+ VMD_FEAT_CAN_BYPASS_MSI_REMAP = (1 << 4),
};
/*
@@ -306,6 +314,16 @@ static struct msi_domain_info vmd_msi_domain_info = {
.chip = &vmd_msi_controller,
};
+static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
+{
+ u16 reg;
+
+ pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, &reg);
+ reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
+ (reg | VMCONFIG_MSI_REMAP);
+ pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
+}
+
static int vmd_create_irq_domain(struct vmd_dev *vmd)
{
struct fwnode_handle *fn;
@@ -325,6 +343,13 @@ static int vmd_create_irq_domain(struct vmd_dev *vmd)
static void vmd_remove_irq_domain(struct vmd_dev *vmd)
{
+ /*
+ * Some production BIOS won't enable remapping between soft reboots.
+ * Ensure remapping is restored before unloading the driver.
+ */
+ if (!vmd->msix_count)
+ vmd_set_msi_remapping(vmd, true);
+
if (vmd->irq_domain) {
struct fwnode_handle *fn = vmd->irq_domain->fwnode;
@@ -679,15 +704,32 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
sd->node = pcibus_to_node(vmd->dev->bus);
- ret = vmd_create_irq_domain(vmd);
- if (ret)
- return ret;
-
/*
- * Override the irq domain bus token so the domain can be distinguished
- * from a regular PCI/MSI domain.
+ * Currently MSI remapping must be enabled in guest passthrough mode
+ * due to some missing interrupt remapping plumbing. This is probably
+ * acceptable because the guest is usually CPU-limited and MSI
+ * remapping doesn't become a performance bottleneck.
*/
- irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
+ if (!(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) ||
+ offset[0] || offset[1]) {
+ ret = vmd_alloc_irqs(vmd);
+ if (ret)
+ return ret;
+
+ vmd_set_msi_remapping(vmd, true);
+
+ ret = vmd_create_irq_domain(vmd);
+ if (ret)
+ return ret;
+
+ /*
+ * Override the IRQ domain bus token so the domain can be
+ * distinguished from a regular PCI/MSI domain.
+ */
+ irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
+ } else {
+ vmd_set_msi_remapping(vmd, false);
+ }
pci_add_resource(&resources, &vmd->resources[0]);
pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
@@ -753,10 +795,6 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)
vmd->first_vec = 1;
- err = vmd_alloc_irqs(vmd);
- if (err)
- return err;
-
spin_lock_init(&vmd->cfg_lock);
pci_set_drvdata(dev, vmd);
err = vmd_enable_domain(vmd, features);
@@ -825,7 +863,8 @@ static const struct pci_device_id vmd_ids[] = {
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
- VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ VMD_FEAT_HAS_BUS_RESTRICTIONS |
+ VMD_FEAT_CAN_BYPASS_MSI_REMAP,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index 338148cf56f5..bce274d02dcf 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* Endpoint Function Driver to implement Non-Transparent Bridge functionality
*
* Copyright (C) 2020 Texas Instruments
@@ -696,7 +696,8 @@ reset_handler:
/**
* epf_ntb_peer_spad_bar_clear() - Clear Peer Scratchpad BAR
- * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address.
*
*+-----------------+------->+------------------+ +-----------------+
*| BAR0 | | CONFIG REGION | | BAR0 |
@@ -740,6 +741,7 @@ static void epf_ntb_peer_spad_bar_clear(struct epf_ntb_epc *ntb_epc)
/**
* epf_ntb_peer_spad_bar_set() - Set peer scratchpad BAR
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
*
*+-----------------+------->+------------------+ +-----------------+
*| BAR0 | | CONFIG REGION | | BAR0 |
@@ -808,7 +810,8 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
/**
* epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
- * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address.
*
* +-----------------+------->+------------------+ +-----------------+
* | BAR0 | | CONFIG REGION | | BAR0 |
@@ -851,7 +854,8 @@ static void epf_ntb_config_sspad_bar_clear(struct epf_ntb_epc *ntb_epc)
/**
* epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
- * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address.
*
* +-----------------+------->+------------------+ +-----------------+
* | BAR0 | | CONFIG REGION | | BAR0 |
@@ -1312,6 +1316,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
/**
* epf_ntb_alloc_peer_mem() - Allocate memory in peer's outbound address space
+ * @dev: The PCI device.
* @ntb_epc: EPC associated with one of the HOST whose BAR holds peer's outbound
* address
* @bar: BAR of @ntb_epc in for which memory has to be allocated (could be
@@ -1660,7 +1665,6 @@ static int epf_ntb_init_epc_bar_interface(struct epf_ntb *ntb,
* epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
* constructs (scratchpad region, doorbell, memorywindow)
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
- * @type: PRIMARY interface or SECONDARY interface
*
* Wrapper to epf_ntb_init_epc_bar_interface() to identify the free BARs
* to be used for each of BAR_CONFIG, BAR_PEER_SPAD, BAR_DB_MW1, BAR_MW2,
@@ -2037,6 +2041,8 @@ static const struct config_item_type ntb_group_type = {
/**
* epf_ntb_add_cfs() - Add configfs directory specific to NTB
* @epf: NTB endpoint function device
+ * @group: A pointer to the config_group structure referencing a group of
+ * config_items of a specific type that belong to a specific sub-system.
*
* Add configfs directory specific to NTB. This directory will hold
* NTB specific properties like db_count, spad_count, num_mws etc.,
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index c0ac4e9cbe72..d2708ca4bece 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* Test driver to test endpoint functionality
*
* Copyright (C) 2017 Texas Instruments
@@ -833,15 +833,18 @@ static int pci_epf_test_bind(struct pci_epf *epf)
return -EINVAL;
epc_features = pci_epc_get_features(epc, epf->func_no);
- if (epc_features) {
- linkup_notifier = epc_features->linkup_notifier;
- core_init_notifier = epc_features->core_init_notifier;
- test_reg_bar = pci_epc_get_first_free_bar(epc_features);
- if (test_reg_bar < 0)
- return -EINVAL;
- pci_epf_configure_bar(epf, epc_features);
+ if (!epc_features) {
+ dev_err(&epf->dev, "epc_features not implemented\n");
+ return -EOPNOTSUPP;
}
+ linkup_notifier = epc_features->linkup_notifier;
+ core_init_notifier = epc_features->core_init_notifier;
+ test_reg_bar = pci_epc_get_first_free_bar(epc_features);
+ if (test_reg_bar < 0)
+ return -EINVAL;
+ pci_epf_configure_bar(epf, epc_features);
+
epf_test->test_reg_bar = test_reg_bar;
epf_test->epc_features = epc_features;
@@ -922,6 +925,7 @@ static int __init pci_epf_test_init(void)
ret = pci_epf_register_driver(&test_driver);
if (ret) {
+ destroy_workqueue(kpcitest_workqueue);
pr_err("Failed to register pci epf test driver --> %d\n", ret);
return ret;
}
@@ -932,6 +936,8 @@ module_init(pci_epf_test_init);
static void __exit pci_epf_test_exit(void)
{
+ if (kpcitest_workqueue)
+ destroy_workqueue(kpcitest_workqueue);
pci_epf_unregister_driver(&test_driver);
}
module_exit(pci_epf_test_exit);
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index cc8f9eb2b177..adec9bee72cf 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -594,6 +594,8 @@ EXPORT_SYMBOL_GPL(pci_epc_add_epf);
* pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
* @epc: the EPC device from which the endpoint function should be removed
* @epf: the endpoint function to be removed
+ * @type: identifies if the EPC is connected to the primary or secondary
+ * interface of EPF
*
* Invoke to remove PCI endpoint function from the endpoint controller.
*/
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 7646c8660d42..e9289d10f822 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -113,7 +113,7 @@ EXPORT_SYMBOL_GPL(pci_epf_bind);
void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
enum pci_epc_interface_type type)
{
- struct device *dev = epf->epc->dev.parent;
+ struct device *dev;
struct pci_epf_bar *epf_bar;
struct pci_epc *epc;
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 2750a64cecd3..4fedebf2f8c1 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -157,7 +157,7 @@ static int pcihp_is_ejectable(acpi_handle handle)
}
/**
- * acpi_pcihp_check_ejectable - check if handle is ejectable ACPI PCI slot
+ * acpi_pci_check_ejectable - check if handle is ejectable ACPI PCI slot
* @pbus: the PCI bus of the PCI slot corresponding to 'handle'
* @handle: ACPI handle to check
*
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index a74b274a8c45..1f8ab4377ad8 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -148,8 +148,7 @@ static inline struct acpiphp_root_context *to_acpiphp_root_context(struct acpi_h
* ACPI has no generic method of setting/getting attention status
* this allows for device specific driver registration
*/
-struct acpiphp_attention_info
-{
+struct acpiphp_attention_info {
int (*set_attn)(struct hotplug_slot *slot, u8 status);
int (*get_attn)(struct hotplug_slot *slot, u8 *status);
struct module *owner;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 3365c93abf0e..f031302ad401 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -533,6 +533,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
slot->flags &= ~SLOT_ENABLED;
continue;
}
+ pci_dev_put(dev);
}
}
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index 00cd2b43364f..7a65d427ac11 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -80,7 +80,7 @@ static u8 evbuffer[1024];
static void __iomem *compaq_int15_entry_point;
/* lock for ordering int15_bios_call() */
-static spinlock_t int15_lock;
+static DEFINE_SPINLOCK(int15_lock);
/* This is a series of function that deals with
@@ -415,9 +415,6 @@ void compaq_nvram_init(void __iomem *rom_start)
compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
dbg("int15 entry = %p\n", compaq_int15_entry_point);
-
- /* initialize our int15 lock */
- spin_lock_init(&int15_lock);
}
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index db047284c291..9e3b27744305 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -174,11 +174,6 @@ static inline u8 shpc_readb(struct controller *ctrl, int reg)
return readb(ctrl->creg + reg);
}
-static inline void shpc_writeb(struct controller *ctrl, int reg, u8 val)
-{
- writeb(val, ctrl->creg + reg);
-}
-
static inline u16 shpc_readw(struct controller *ctrl, int reg)
{
return readw(ctrl->creg + reg);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 3162f88fe940..217dc9f0231f 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -64,39 +64,18 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
/* Arch hooks */
int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
- struct msi_controller *chip = dev->bus->msi;
- int err;
-
- if (!chip || !chip->setup_irq)
- return -EINVAL;
-
- err = chip->setup_irq(chip, dev, desc);
- if (err < 0)
- return err;
-
- irq_set_chip_data(desc->irq, chip);
-
- return 0;
+ return -EINVAL;
}
void __weak arch_teardown_msi_irq(unsigned int irq)
{
- struct msi_controller *chip = irq_get_chip_data(irq);
-
- if (!chip || !chip->teardown_irq)
- return;
-
- chip->teardown_irq(chip, irq);
}
int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
- struct msi_controller *chip = dev->bus->msi;
struct msi_desc *entry;
int ret;
- if (chip && chip->setup_irqs)
- return chip->setup_irqs(chip, dev, nvec, type);
/*
* If an architecture wants to support multiple MSI, it needs to
* override arch_setup_msi_irqs()
@@ -115,11 +94,7 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return 0;
}
-/*
- * We have a default implementation available as a separate non-weak
- * function, as it is used by the Xen x86 PCI code
- */
-void default_teardown_msi_irqs(struct pci_dev *dev)
+void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
{
int i;
struct msi_desc *entry;
@@ -129,11 +104,6 @@ void default_teardown_msi_irqs(struct pci_dev *dev)
for (i = 0; i < entry->nvec_used; i++)
arch_teardown_msi_irq(entry->irq + i);
}
-
-void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
-{
- return default_teardown_msi_irqs(dev);
-}
#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
static void default_restore_msi_irq(struct pci_dev *dev, int irq)
@@ -901,8 +871,15 @@ static int pci_msi_supported(struct pci_dev *dev, int nvec)
* Any bridge which does NOT route MSI transactions from its
* secondary bus to its primary bus must set NO_MSI flag on
* the secondary pci_bus.
- * We expect only arch-specific PCI host bus controller driver
- * or quirks for specific PCI bridges to be setting NO_MSI.
+ *
+ * The NO_MSI flag can either be set directly by:
+ * - arch-specific PCI host bus controller drivers (deprecated)
+ * - quirks for specific PCI bridges
+ *
+ * or indirectly by platform-specific PCI host bridge drivers by
+ * advertising the 'msi_domain' property, which results in
+ * the NO_MSI flag when no MSI domain is found for this bridge
+ * at probe time.
*/
for (bus = dev->bus; bus; bus = bus->parent)
if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 5ea472ae22ac..da5b414d585a 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -190,10 +190,18 @@ int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
/**
- * This function will try to obtain the host bridge domain number by
- * finding a property called "linux,pci-domain" of the given device node.
+ * of_get_pci_domain_nr - Find the host bridge domain number
+ * of the given device node.
+ * @node: Device tree node with the domain information.
*
- * @node: device tree node with the domain information
+ * This function will try to obtain the host bridge domain number by finding
+ * a property called "linux,pci-domain" of the given device node.
+ *
+ * Return:
+ * * > 0 - On success, an associated domain number.
+ * * -EINVAL - The property "linux,pci-domain" does not exist.
+ * * -ENODATA - The linux,pci-domain" property does not have value.
+ * * -EOVERFLOW - Invalid "linux,pci-domain" property value.
*
* Returns the associated domain number from DT in the range [0-0xffff], or
* a negative value if the required property is not found.
@@ -585,10 +593,16 @@ int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge)
#endif /* CONFIG_PCI */
/**
+ * of_pci_get_max_link_speed - Find the maximum link speed of the given device node.
+ * @node: Device tree node with the maximum link speed information.
+ *
* This function will try to find the limitation of link speed by finding
* a property called "max-link-speed" of the given device node.
*
- * @node: device tree node with the max link speed information
+ * Return:
+ * * > 0 - On success, a maximum link speed.
+ * * -EINVAL - Invalid "max-link-speed" property value, or failure to access
+ * the property of the device tree node.
*
* Returns the associated max link speed from DT, or a negative value if the
* required property is not found or is invalid.
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 53502a751914..36bc23e21759 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -1021,7 +1021,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
if (!error)
pci_dbg(dev, "power state changed by ACPI to %s\n",
- acpi_power_state_string(state_conv[state]));
+ acpi_power_state_string(adev->power.state));
return error;
}
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 781e45cf60d1..c32f3b7540e8 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -33,6 +33,21 @@
#include <linux/pci-acpi.h>
#include "pci.h"
+static bool device_has_acpi_name(struct device *dev)
+{
+#ifdef CONFIG_ACPI
+ acpi_handle handle = ACPI_HANDLE(dev);
+
+ if (!handle)
+ return false;
+
+ return acpi_check_dsm(handle, &pci_acpi_dsm_guid, 0x2,
+ 1 << DSM_PCI_DEVICE_NAME);
+#else
+ return false;
+#endif
+}
+
#ifdef CONFIG_DMI
enum smbios_attr_enum {
SMBIOS_ATTR_NONE = 0,
@@ -45,13 +60,9 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
{
const struct dmi_device *dmi;
struct dmi_dev_onboard *donboard;
- int domain_nr;
- int bus;
- int devfn;
-
- domain_nr = pci_domain_nr(pdev->bus);
- bus = pdev->bus->number;
- devfn = pdev->devfn;
+ int domain_nr = pci_domain_nr(pdev->bus);
+ int bus = pdev->bus->number;
+ int devfn = pdev->devfn;
dmi = NULL;
while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD,
@@ -62,13 +73,11 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
donboard->devfn == devfn) {
if (buf) {
if (attribute == SMBIOS_ATTR_INSTANCE_SHOW)
- return scnprintf(buf, PAGE_SIZE,
- "%d\n",
- donboard->instance);
+ return sysfs_emit(buf, "%d\n",
+ donboard->instance);
else if (attribute == SMBIOS_ATTR_LABEL_SHOW)
- return scnprintf(buf, PAGE_SIZE,
- "%s\n",
- dmi->name);
+ return sysfs_emit(buf, "%s\n",
+ dmi->name);
}
return strlen(dmi->name);
}
@@ -76,78 +85,52 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
return 0;
}
-static umode_t smbios_instance_string_exist(struct kobject *kobj,
- struct attribute *attr, int n)
+static ssize_t smbios_label_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct device *dev;
- struct pci_dev *pdev;
-
- dev = kobj_to_dev(kobj);
- pdev = to_pci_dev(dev);
-
- return find_smbios_instance_string(pdev, NULL, SMBIOS_ATTR_NONE) ?
- S_IRUGO : 0;
-}
-
-static ssize_t smbioslabel_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pci_dev *pdev;
- pdev = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
return find_smbios_instance_string(pdev, buf,
SMBIOS_ATTR_LABEL_SHOW);
}
+static struct device_attribute dev_attr_smbios_label = __ATTR(label, 0444,
+ smbios_label_show, NULL);
-static ssize_t smbiosinstance_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t index_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct pci_dev *pdev;
- pdev = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
return find_smbios_instance_string(pdev, buf,
SMBIOS_ATTR_INSTANCE_SHOW);
}
+static DEVICE_ATTR_RO(index);
-static struct device_attribute smbios_attr_label = {
- .attr = {.name = "label", .mode = 0444},
- .show = smbioslabel_show,
-};
-
-static struct device_attribute smbios_attr_instance = {
- .attr = {.name = "index", .mode = 0444},
- .show = smbiosinstance_show,
-};
-
-static struct attribute *smbios_attributes[] = {
- &smbios_attr_label.attr,
- &smbios_attr_instance.attr,
+static struct attribute *smbios_attrs[] = {
+ &dev_attr_smbios_label.attr,
+ &dev_attr_index.attr,
NULL,
};
-static const struct attribute_group smbios_attr_group = {
- .attrs = smbios_attributes,
- .is_visible = smbios_instance_string_exist,
-};
-
-static int pci_create_smbiosname_file(struct pci_dev *pdev)
+static umode_t smbios_attr_is_visible(struct kobject *kobj, struct attribute *a,
+ int n)
{
- return sysfs_create_group(&pdev->dev.kobj, &smbios_attr_group);
-}
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
-static void pci_remove_smbiosname_file(struct pci_dev *pdev)
-{
- sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group);
-}
-#else
-static inline int pci_create_smbiosname_file(struct pci_dev *pdev)
-{
- return -1;
-}
+ if (device_has_acpi_name(dev))
+ return 0;
-static inline void pci_remove_smbiosname_file(struct pci_dev *pdev)
-{
+ if (!find_smbios_instance_string(pdev, NULL, SMBIOS_ATTR_NONE))
+ return 0;
+
+ return a->mode;
}
+
+const struct attribute_group pci_dev_smbios_attr_group = {
+ .attrs = smbios_attrs,
+ .is_visible = smbios_attr_is_visible,
+};
#endif
#ifdef CONFIG_ACPI
@@ -169,11 +152,10 @@ static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
static int dsm_get_label(struct device *dev, char *buf,
enum acpi_attr_enum attr)
{
- acpi_handle handle;
+ acpi_handle handle = ACPI_HANDLE(dev);
union acpi_object *obj, *tmp;
int len = -1;
- handle = ACPI_HANDLE(dev);
if (!handle)
return -1;
@@ -209,103 +191,39 @@ static int dsm_get_label(struct device *dev, char *buf,
return len;
}
-static bool device_has_dsm(struct device *dev)
-{
- acpi_handle handle;
-
- handle = ACPI_HANDLE(dev);
- if (!handle)
- return false;
-
- return !!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 0x2,
- 1 << DSM_PCI_DEVICE_NAME);
-}
-
-static umode_t acpi_index_string_exist(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev;
-
- dev = kobj_to_dev(kobj);
-
- if (device_has_dsm(dev))
- return S_IRUGO;
-
- return 0;
-}
-
-static ssize_t acpilabel_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t label_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
return dsm_get_label(dev, buf, ACPI_ATTR_LABEL_SHOW);
}
+static DEVICE_ATTR_RO(label);
-static ssize_t acpiindex_show(struct device *dev,
+static ssize_t acpi_index_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return dsm_get_label(dev, buf, ACPI_ATTR_INDEX_SHOW);
}
+static DEVICE_ATTR_RO(acpi_index);
-static struct device_attribute acpi_attr_label = {
- .attr = {.name = "label", .mode = 0444},
- .show = acpilabel_show,
-};
-
-static struct device_attribute acpi_attr_index = {
- .attr = {.name = "acpi_index", .mode = 0444},
- .show = acpiindex_show,
-};
-
-static struct attribute *acpi_attributes[] = {
- &acpi_attr_label.attr,
- &acpi_attr_index.attr,
+static struct attribute *acpi_attrs[] = {
+ &dev_attr_label.attr,
+ &dev_attr_acpi_index.attr,
NULL,
};
-static const struct attribute_group acpi_attr_group = {
- .attrs = acpi_attributes,
- .is_visible = acpi_index_string_exist,
-};
-
-static int pci_create_acpi_index_label_files(struct pci_dev *pdev)
+static umode_t acpi_attr_is_visible(struct kobject *kobj, struct attribute *a,
+ int n)
{
- return sysfs_create_group(&pdev->dev.kobj, &acpi_attr_group);
-}
+ struct device *dev = kobj_to_dev(kobj);
-static int pci_remove_acpi_index_label_files(struct pci_dev *pdev)
-{
- sysfs_remove_group(&pdev->dev.kobj, &acpi_attr_group);
- return 0;
-}
-#else
-static inline int pci_create_acpi_index_label_files(struct pci_dev *pdev)
-{
- return -1;
-}
+ if (!device_has_acpi_name(dev))
+ return 0;
-static inline int pci_remove_acpi_index_label_files(struct pci_dev *pdev)
-{
- return -1;
+ return a->mode;
}
-static inline bool device_has_dsm(struct device *dev)
-{
- return false;
-}
+const struct attribute_group pci_dev_acpi_attr_group = {
+ .attrs = acpi_attrs,
+ .is_visible = acpi_attr_is_visible,
+};
#endif
-
-void pci_create_firmware_label_files(struct pci_dev *pdev)
-{
- if (device_has_dsm(&pdev->dev))
- pci_create_acpi_index_label_files(pdev);
- else
- pci_create_smbiosname_file(pdev);
-}
-
-void pci_remove_firmware_label_files(struct pci_dev *pdev)
-{
- if (device_has_dsm(&pdev->dev))
- pci_remove_acpi_index_label_files(pdev);
- else
- pci_remove_smbiosname_file(pdev);
-}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index a6b8fbbba6d2..beb8d1f4fafe 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -39,7 +39,7 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
struct pci_dev *pdev; \
\
pdev = to_pci_dev(dev); \
- return sprintf(buf, format_string, pdev->field); \
+ return sysfs_emit(buf, format_string, pdev->field); \
} \
static DEVICE_ATTR_RO(field)
@@ -56,7 +56,7 @@ static ssize_t broken_parity_status_show(struct device *dev,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", pdev->broken_parity_status);
+ return sysfs_emit(buf, "%u\n", pdev->broken_parity_status);
}
static ssize_t broken_parity_status_store(struct device *dev,
@@ -129,7 +129,7 @@ static ssize_t power_state_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%s\n", pci_power_name(pdev->current_state));
+ return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state));
}
static DEVICE_ATTR_RO(power_state);
@@ -138,10 +138,10 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- char *str = buf;
int i;
int max;
resource_size_t start, end;
+ size_t len = 0;
if (pci_dev->subordinate)
max = DEVICE_COUNT_RESOURCE;
@@ -151,12 +151,12 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
for (i = 0; i < max; i++) {
struct resource *res = &pci_dev->resource[i];
pci_resource_to_user(pci_dev, i, res, &start, &end);
- str += sprintf(str, "0x%016llx 0x%016llx 0x%016llx\n",
- (unsigned long long)start,
- (unsigned long long)end,
- (unsigned long long)res->flags);
+ len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n",
+ (unsigned long long)start,
+ (unsigned long long)end,
+ (unsigned long long)res->flags);
}
- return (str - buf);
+ return len;
}
static DEVICE_ATTR_RO(resource);
@@ -165,8 +165,8 @@ static ssize_t max_link_speed_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%s\n",
- pci_speed_string(pcie_get_speed_cap(pdev)));
+ return sysfs_emit(buf, "%s\n",
+ pci_speed_string(pcie_get_speed_cap(pdev)));
}
static DEVICE_ATTR_RO(max_link_speed);
@@ -175,7 +175,7 @@ static ssize_t max_link_width_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", pcie_get_width_cap(pdev));
+ return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
}
static DEVICE_ATTR_RO(max_link_width);
@@ -193,7 +193,7 @@ static ssize_t current_link_speed_show(struct device *dev,
speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS];
- return sprintf(buf, "%s\n", pci_speed_string(speed));
+ return sysfs_emit(buf, "%s\n", pci_speed_string(speed));
}
static DEVICE_ATTR_RO(current_link_speed);
@@ -208,7 +208,7 @@ static ssize_t current_link_width_show(struct device *dev,
if (err)
return -EINVAL;
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
(linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
}
static DEVICE_ATTR_RO(current_link_width);
@@ -225,7 +225,7 @@ static ssize_t secondary_bus_number_show(struct device *dev,
if (err)
return -EINVAL;
- return sprintf(buf, "%u\n", sec_bus);
+ return sysfs_emit(buf, "%u\n", sec_bus);
}
static DEVICE_ATTR_RO(secondary_bus_number);
@@ -241,7 +241,7 @@ static ssize_t subordinate_bus_number_show(struct device *dev,
if (err)
return -EINVAL;
- return sprintf(buf, "%u\n", sub_bus);
+ return sysfs_emit(buf, "%u\n", sub_bus);
}
static DEVICE_ATTR_RO(subordinate_bus_number);
@@ -251,7 +251,7 @@ static ssize_t ari_enabled_show(struct device *dev,
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", pci_ari_enabled(pci_dev->bus));
+ return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus));
}
static DEVICE_ATTR_RO(ari_enabled);
@@ -260,11 +260,11 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
- pci_dev->vendor, pci_dev->device,
- pci_dev->subsystem_vendor, pci_dev->subsystem_device,
- (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
- (u8)(pci_dev->class));
+ return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
+ pci_dev->vendor, pci_dev->device,
+ pci_dev->subsystem_vendor, pci_dev->subsystem_device,
+ (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
+ (u8)(pci_dev->class));
}
static DEVICE_ATTR_RO(modalias);
@@ -302,7 +302,7 @@ static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
struct pci_dev *pdev;
pdev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt));
+ return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt));
}
static DEVICE_ATTR_RW(enable);
@@ -338,7 +338,7 @@ static ssize_t numa_node_store(struct device *dev,
static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", dev->numa_node);
+ return sysfs_emit(buf, "%d\n", dev->numa_node);
}
static DEVICE_ATTR_RW(numa_node);
#endif
@@ -348,7 +348,7 @@ static ssize_t dma_mask_bits_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%d\n", fls64(pdev->dma_mask));
+ return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask));
}
static DEVICE_ATTR_RO(dma_mask_bits);
@@ -356,7 +356,7 @@ static ssize_t consistent_dma_mask_bits_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", fls64(dev->coherent_dma_mask));
+ return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask));
}
static DEVICE_ATTR_RO(consistent_dma_mask_bits);
@@ -366,9 +366,9 @@ static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_bus *subordinate = pdev->subordinate;
- return sprintf(buf, "%u\n", subordinate ?
- !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
- : !pdev->no_msi);
+ return sysfs_emit(buf, "%u\n", subordinate ?
+ !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
+ : !pdev->no_msi);
}
static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
@@ -523,7 +523,7 @@ static ssize_t d3cold_allowed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", pdev->d3cold_allowed);
+ return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed);
}
static DEVICE_ATTR_RW(d3cold_allowed);
#endif
@@ -537,7 +537,7 @@ static ssize_t devspec_show(struct device *dev,
if (np == NULL)
return 0;
- return sprintf(buf, "%pOF", np);
+ return sysfs_emit(buf, "%pOF", np);
}
static DEVICE_ATTR_RO(devspec);
#endif
@@ -583,7 +583,7 @@ static ssize_t driver_override_show(struct device *dev,
ssize_t len;
device_lock(dev);
- len = scnprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+ len = sysfs_emit(buf, "%s\n", pdev->driver_override);
device_unlock(dev);
return len;
}
@@ -658,11 +658,11 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
struct pci_dev *vga_dev = vga_default_device();
if (vga_dev)
- return sprintf(buf, "%u\n", (pdev == vga_dev));
+ return sysfs_emit(buf, "%u\n", (pdev == vga_dev));
- return sprintf(buf, "%u\n",
- !!(pdev->resource[PCI_ROM_RESOURCE].flags &
- IORESOURCE_ROM_SHADOW));
+ return sysfs_emit(buf, "%u\n",
+ !!(pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW));
}
static DEVICE_ATTR_RO(boot_vga);
@@ -808,6 +808,29 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
return count;
}
+static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
+
+static struct bin_attribute *pci_dev_config_attrs[] = {
+ &bin_attr_config,
+ NULL,
+};
+
+static umode_t pci_dev_config_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *a, int n)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ a->size = PCI_CFG_SPACE_SIZE;
+ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
+ a->size = PCI_CFG_SPACE_EXP_SIZE;
+
+ return a->attr.mode;
+}
+
+static const struct attribute_group pci_dev_config_attr_group = {
+ .bin_attrs = pci_dev_config_attrs,
+ .is_bin_visible = pci_dev_config_attr_is_visible,
+};
#ifdef HAVE_PCI_LEGACY
/**
@@ -1283,25 +1306,32 @@ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
return count;
}
+static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
-static const struct bin_attribute pci_config_attr = {
- .attr = {
- .name = "config",
- .mode = 0644,
- },
- .size = PCI_CFG_SPACE_SIZE,
- .read = pci_read_config,
- .write = pci_write_config,
+static struct bin_attribute *pci_dev_rom_attrs[] = {
+ &bin_attr_rom,
+ NULL,
};
-static const struct bin_attribute pcie_config_attr = {
- .attr = {
- .name = "config",
- .mode = 0644,
- },
- .size = PCI_CFG_SPACE_EXP_SIZE,
- .read = pci_read_config,
- .write = pci_write_config,
+static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *a, int n)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+ size_t rom_size;
+
+ /* If the device has a ROM, try to expose it in sysfs. */
+ rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
+ if (!rom_size)
+ return 0;
+
+ a->size = rom_size;
+
+ return a->attr.mode;
+}
+
+static const struct attribute_group pci_dev_rom_attr_group = {
+ .bin_attrs = pci_dev_rom_attrs,
+ .is_bin_visible = pci_dev_rom_attr_is_visible,
};
static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
@@ -1325,102 +1355,35 @@ static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
return count;
}
+static DEVICE_ATTR_WO(reset);
-static DEVICE_ATTR(reset, 0200, NULL, reset_store);
+static struct attribute *pci_dev_reset_attrs[] = {
+ &dev_attr_reset.attr,
+ NULL,
+};
-static int pci_create_capabilities_sysfs(struct pci_dev *dev)
+static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj,
+ struct attribute *a, int n)
{
- int retval;
-
- pcie_vpd_create_sysfs_dev_files(dev);
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- if (dev->reset_fn) {
- retval = device_create_file(&dev->dev, &dev_attr_reset);
- if (retval)
- goto error;
- }
- return 0;
+ if (!pdev->reset_fn)
+ return 0;
-error:
- pcie_vpd_remove_sysfs_dev_files(dev);
- return retval;
+ return a->mode;
}
+static const struct attribute_group pci_dev_reset_attr_group = {
+ .attrs = pci_dev_reset_attrs,
+ .is_visible = pci_dev_reset_attr_is_visible,
+};
+
int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
{
- int retval;
- int rom_size;
- struct bin_attribute *attr;
-
if (!sysfs_initialized)
return -EACCES;
- if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
- retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr);
- else
- retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
- if (retval)
- goto err;
-
- retval = pci_create_resource_files(pdev);
- if (retval)
- goto err_config_file;
-
- /* If the device has a ROM, try to expose it in sysfs. */
- rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
- if (rom_size) {
- attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
- if (!attr) {
- retval = -ENOMEM;
- goto err_resource_files;
- }
- sysfs_bin_attr_init(attr);
- attr->size = rom_size;
- attr->attr.name = "rom";
- attr->attr.mode = 0600;
- attr->read = pci_read_rom;
- attr->write = pci_write_rom;
- retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
- if (retval) {
- kfree(attr);
- goto err_resource_files;
- }
- pdev->rom_attr = attr;
- }
-
- /* add sysfs entries for various capabilities */
- retval = pci_create_capabilities_sysfs(pdev);
- if (retval)
- goto err_rom_file;
-
- pci_create_firmware_label_files(pdev);
-
- return 0;
-
-err_rom_file:
- if (pdev->rom_attr) {
- sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
- kfree(pdev->rom_attr);
- pdev->rom_attr = NULL;
- }
-err_resource_files:
- pci_remove_resource_files(pdev);
-err_config_file:
- if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
- sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
- else
- sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
-err:
- return retval;
-}
-
-static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
-{
- pcie_vpd_remove_sysfs_dev_files(dev);
- if (dev->reset_fn) {
- device_remove_file(&dev->dev, &dev_attr_reset);
- dev->reset_fn = 0;
- }
+ return pci_create_resource_files(pdev);
}
/**
@@ -1434,22 +1397,7 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
if (!sysfs_initialized)
return;
- pci_remove_capabilities_sysfs(pdev);
-
- if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
- sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
- else
- sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
-
pci_remove_resource_files(pdev);
-
- if (pdev->rom_attr) {
- sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
- kfree(pdev->rom_attr);
- pdev->rom_attr = NULL;
- }
-
- pci_remove_firmware_label_files(pdev);
}
static int __init pci_sysfs_init(void)
@@ -1540,6 +1488,16 @@ static const struct attribute_group pci_dev_group = {
const struct attribute_group *pci_dev_groups[] = {
&pci_dev_group,
+ &pci_dev_config_attr_group,
+ &pci_dev_rom_attr_group,
+ &pci_dev_reset_attr_group,
+ &pci_dev_vpd_attr_group,
+#ifdef CONFIG_DMI
+ &pci_dev_smbios_attr_group,
+#endif
+#ifdef CONFIG_ACPI
+ &pci_dev_acpi_attr_group,
+#endif
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index f4c26e6118ea..b717680377a9 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -693,6 +693,36 @@ u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
EXPORT_SYMBOL_GPL(pci_find_ht_capability);
/**
+ * pci_find_vsec_capability - Find a vendor-specific extended capability
+ * @dev: PCI device to query
+ * @vendor: Vendor ID for which capability is defined
+ * @cap: Vendor-specific capability ID
+ *
+ * If @dev has Vendor ID @vendor, search for a VSEC capability with
+ * VSEC ID @cap. If found, return the capability offset in
+ * config space; otherwise return 0.
+ */
+u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
+{
+ u16 vsec = 0;
+ u32 header;
+
+ if (vendor != dev->vendor)
+ return 0;
+
+ while ((vsec = pci_find_next_ext_capability(dev, vsec,
+ PCI_EXT_CAP_ID_VNDR))) {
+ if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
+ &header) == PCIBIOS_SUCCESSFUL &&
+ PCI_VNDR_HEADER_ID(header) == cap)
+ return vsec;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
+
+/**
* pci_find_parent_resource - return resource region of parent bus of given
* region
* @dev: PCI device structure contains resources to be searched
@@ -4042,6 +4072,7 @@ phys_addr_t pci_pio_to_address(unsigned long pio)
return address;
}
+EXPORT_SYMBOL_GPL(pci_pio_to_address);
unsigned long __weak pci_address_to_pio(phys_addr_t address)
{
@@ -4444,6 +4475,23 @@ void pci_clear_mwi(struct pci_dev *dev)
EXPORT_SYMBOL(pci_clear_mwi);
/**
+ * pci_disable_parity - disable parity checking for device
+ * @dev: the PCI device to operate on
+ *
+ * Disable parity checking for device @dev
+ */
+void pci_disable_parity(struct pci_dev *dev)
+{
+ u16 cmd;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_PARITY) {
+ cmd &= ~PCI_COMMAND_PARITY;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+}
+
+/**
* pci_intx - enables/disables PCI INTx for device dev
* @pdev: the PCI device to operate on
* @enable: boolean: whether to enable or disable PCI INTx
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index afb87b917f07..37c913bbc6e1 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -21,16 +21,10 @@ bool pcie_cap_has_rtctl(const struct pci_dev *dev);
int pci_create_sysfs_dev_files(struct pci_dev *pdev);
void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
-#if !defined(CONFIG_DMI) && !defined(CONFIG_ACPI)
-static inline void pci_create_firmware_label_files(struct pci_dev *pdev)
-{ return; }
-static inline void pci_remove_firmware_label_files(struct pci_dev *pdev)
-{ return; }
-#else
-void pci_create_firmware_label_files(struct pci_dev *pdev);
-void pci_remove_firmware_label_files(struct pci_dev *pdev);
-#endif
void pci_cleanup_rom(struct pci_dev *dev);
+#ifdef CONFIG_DMI
+extern const struct attribute_group pci_dev_smbios_attr_group;
+#endif
enum pci_mmap_api {
PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
@@ -141,10 +135,9 @@ static inline bool pcie_downstream_port(const struct pci_dev *dev)
type == PCI_EXP_TYPE_PCIE_BRIDGE;
}
-int pci_vpd_init(struct pci_dev *dev);
+void pci_vpd_init(struct pci_dev *dev);
void pci_vpd_release(struct pci_dev *dev);
-void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev);
-void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev);
+extern const struct attribute_group pci_dev_vpd_attr_group;
/* PCI Virtual Channel */
int pci_save_vc_state(struct pci_dev *dev);
@@ -625,6 +618,12 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
struct resource *res);
+#else
+static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
+ u16 segment, struct resource *res)
+{
+ return -ENODEV;
+}
#endif
int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
@@ -697,6 +696,7 @@ static inline int pci_aer_raw_clear_status(struct pci_dev *dev) { return -EINVAL
#ifdef CONFIG_ACPI
int pci_acpi_program_hp_params(struct pci_dev *dev);
+extern const struct attribute_group pci_dev_acpi_attr_group;
#else
static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
{
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index ba22388342d1..ec943cee5ecc 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -129,7 +129,7 @@ static const char * const ecrc_policy_str[] = {
};
/**
- * enable_ercr_checking - enable PCIe ECRC checking for a device
+ * enable_ecrc_checking - enable PCIe ECRC checking for a device
* @dev: the PCI device
*
* Returns 0 on success, or negative on failure.
@@ -153,7 +153,7 @@ static int enable_ecrc_checking(struct pci_dev *dev)
}
/**
- * disable_ercr_checking - disables PCIe ECRC checking for a device
+ * disable_ecrc_checking - disables PCIe ECRC checking for a device
* @dev: the PCI device
*
* Returns 0 on success, or negative on failure.
@@ -1442,7 +1442,7 @@ static struct pcie_port_service_driver aerdriver = {
};
/**
- * aer_service_init - register AER root service driver
+ * pcie_aer_init - register AER root service driver
*
* Invoked when AER root service driver is loaded.
*/
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 3fc08488d65f..1d0dd77fed3a 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -463,7 +463,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
};
/**
- * pcie_pme_service_init - Register the PCIe PME service driver.
+ * pcie_pme_init - Register the PCIe PME service driver.
*/
int __init pcie_pme_init(void)
{
diff --git a/drivers/pci/pcie/rcec.c b/drivers/pci/pcie/rcec.c
index 2c5c552994e4..d0bcd141ac9c 100644
--- a/drivers/pci/pcie/rcec.c
+++ b/drivers/pci/pcie/rcec.c
@@ -32,7 +32,7 @@ static bool rcec_assoc_rciep(struct pci_dev *rcec, struct pci_dev *rciep)
/* Same bus, so check bitmap */
for_each_set_bit(devn, &bitmap, 32)
- if (devn == rciep->devfn)
+ if (devn == PCI_SLOT(rciep->devfn))
return true;
return false;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 953f15abc850..3a62d09b8869 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -895,7 +895,6 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
/* Temporarily move resources off the list */
list_splice_init(&bridge->windows, &resources);
bus->sysdata = bridge->sysdata;
- bus->msi = bridge->msi;
bus->ops = bridge->ops;
bus->number = bus->busn_res.start = bridge->busnr;
#ifdef CONFIG_PCI_DOMAINS_GENERIC
@@ -926,6 +925,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
device_enable_async_suspend(bus->bridge);
pci_set_bus_of_node(bus);
pci_set_bus_msi_domain(bus);
+ if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev))
+ bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
if (!parent)
set_dev_node(bus->bridge, pcibus_to_node(bus));
@@ -1053,7 +1054,6 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
return NULL;
child->parent = parent;
- child->msi = parent->msi;
child->sysdata = parent->sysdata;
child->bus_flags = parent->bus_flags;
@@ -2353,6 +2353,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
pci_set_of_node(dev);
if (pci_setup_device(dev)) {
+ pci_release_of_node(dev);
pci_bus_put(dev->bus);
kfree(dev);
return NULL;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 653660e3ba9e..dcb229de1acb 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -206,16 +206,11 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
/*
- * The Mellanox Tavor device gives false positive parity errors. Mark this
- * device with a broken_parity_status to allow PCI scanning code to "skip"
- * this now blacklisted device.
+ * The Mellanox Tavor device gives false positive parity errors. Disable
+ * parity error reporting.
*/
-static void quirk_mellanox_tavor(struct pci_dev *dev)
-{
- dev->broken_parity_status = 1; /* This device gives false positives */
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, pci_disable_parity);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, pci_disable_parity);
/*
* Deal with broken BIOSes that neglect to enable passive release,
@@ -2585,10 +2580,8 @@ static int msi_ht_cap_enabled(struct pci_dev *dev)
/* Check the HyperTransport MSI mapping to know whether MSI is enabled or not */
static void quirk_msi_ht_cap(struct pci_dev *dev)
{
- if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
- pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
- dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
- }
+ if (!msi_ht_cap_enabled(dev))
+ quirk_disable_msi(dev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
quirk_msi_ht_cap);
@@ -2601,9 +2594,6 @@ static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
{
struct pci_dev *pdev;
- if (!dev->subordinate)
- return;
-
/*
* Check HT MSI cap on this chipset and the root one. A single one
* having MSI is enough to be sure that MSI is supported.
@@ -2611,10 +2601,8 @@ static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
pdev = pci_get_slot(dev->bus, 0);
if (!pdev)
return;
- if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
- pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
- dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
- }
+ if (!msi_ht_cap_enabled(pdev))
+ quirk_msi_ht_cap(dev);
pci_dev_put(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
@@ -3922,6 +3910,7 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
reset_ivb_igd },
{ PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr },
{ PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
+ { PCI_VENDOR_ID_INTEL, 0x0a54, delay_250ms_after_flr },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
{ 0 }
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 95dec03d9f2a..dd12c2fcc7dc 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -19,6 +19,8 @@ static void pci_stop_dev(struct pci_dev *dev)
pci_pme_active(dev, false);
if (pci_dev_is_added(dev)) {
+ dev->reset_fn = 0;
+
device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index 7915d10f9aa1..26bf7c877de5 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -16,12 +16,10 @@
struct pci_vpd_ops {
ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
- int (*set_size)(struct pci_dev *dev, size_t len);
};
struct pci_vpd {
const struct pci_vpd_ops *ops;
- struct bin_attribute *attr; /* Descriptor for sysfs VPD entry */
struct mutex lock;
unsigned int len;
u16 flag;
@@ -30,6 +28,11 @@ struct pci_vpd {
unsigned int valid:1;
};
+static struct pci_dev *pci_get_func0_dev(struct pci_dev *dev)
+{
+ return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+}
+
/**
* pci_read_vpd - Read one entry from Vital Product Data
* @dev: pci device struct
@@ -60,19 +63,6 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
}
EXPORT_SYMBOL(pci_write_vpd);
-/**
- * pci_set_vpd_size - Set size of Vital Product Data space
- * @dev: pci device struct
- * @len: size of vpd space
- */
-int pci_set_vpd_size(struct pci_dev *dev, size_t len)
-{
- if (!dev->vpd || !dev->vpd->ops)
- return -ENODEV;
- return dev->vpd->ops->set_size(dev, len);
-}
-EXPORT_SYMBOL(pci_set_vpd_size);
-
#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
/**
@@ -85,10 +75,14 @@ static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
size_t off = 0;
unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
- while (off < old_size &&
- pci_read_vpd(dev, off, 1, header) == 1) {
+ while (off < old_size && pci_read_vpd(dev, off, 1, header) == 1) {
unsigned char tag;
+ if (!header[0] && !off) {
+ pci_info(dev, "Invalid VPD tag 00, assume missing optional VPD EPROM\n");
+ return 0;
+ }
+
if (header[0] & PCI_VPD_LRDT) {
/* Large Resource Data Type Tag */
tag = pci_vpd_lrdt_tag(header);
@@ -297,30 +291,15 @@ out:
return ret ? ret : count;
}
-static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
-{
- struct pci_vpd *vpd = dev->vpd;
-
- if (len == 0 || len > PCI_VPD_MAX_SIZE)
- return -EIO;
-
- vpd->valid = 1;
- vpd->len = len;
-
- return 0;
-}
-
static const struct pci_vpd_ops pci_vpd_ops = {
.read = pci_vpd_read,
.write = pci_vpd_write,
- .set_size = pci_vpd_set_size,
};
static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
void *arg)
{
- struct pci_dev *tdev = pci_get_slot(dev->bus,
- PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ struct pci_dev *tdev = pci_get_func0_dev(dev);
ssize_t ret;
if (!tdev)
@@ -334,8 +313,7 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
const void *arg)
{
- struct pci_dev *tdev = pci_get_slot(dev->bus,
- PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ struct pci_dev *tdev = pci_get_func0_dev(dev);
ssize_t ret;
if (!tdev)
@@ -346,38 +324,23 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
return ret;
}
-static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
-{
- struct pci_dev *tdev = pci_get_slot(dev->bus,
- PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
- int ret;
-
- if (!tdev)
- return -ENODEV;
-
- ret = pci_set_vpd_size(tdev, len);
- pci_dev_put(tdev);
- return ret;
-}
-
static const struct pci_vpd_ops pci_vpd_f0_ops = {
.read = pci_vpd_f0_read,
.write = pci_vpd_f0_write,
- .set_size = pci_vpd_f0_set_size,
};
-int pci_vpd_init(struct pci_dev *dev)
+void pci_vpd_init(struct pci_dev *dev)
{
struct pci_vpd *vpd;
u8 cap;
cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
if (!cap)
- return -ENODEV;
+ return;
vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
if (!vpd)
- return -ENOMEM;
+ return;
vpd->len = PCI_VPD_MAX_SIZE;
if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
@@ -389,7 +352,6 @@ int pci_vpd_init(struct pci_dev *dev)
vpd->busy = 0;
vpd->valid = 0;
dev->vpd = vpd;
- return 0;
}
void pci_vpd_release(struct pci_dev *dev)
@@ -397,102 +359,56 @@ void pci_vpd_release(struct pci_dev *dev)
kfree(dev->vpd);
}
-static ssize_t read_vpd_attr(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off,
+ size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
- if (bin_attr->size > 0) {
- if (off > bin_attr->size)
- count = 0;
- else if (count > bin_attr->size - off)
- count = bin_attr->size - off;
- }
-
return pci_read_vpd(dev, off, count, buf);
}
-static ssize_t write_vpd_attr(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off,
+ size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
- if (bin_attr->size > 0) {
- if (off > bin_attr->size)
- count = 0;
- else if (count > bin_attr->size - off)
- count = bin_attr->size - off;
- }
-
return pci_write_vpd(dev, off, count, buf);
}
+static BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
-void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev)
-{
- int retval;
- struct bin_attribute *attr;
-
- if (!dev->vpd)
- return;
+static struct bin_attribute *vpd_attrs[] = {
+ &bin_attr_vpd,
+ NULL,
+};
- attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
- if (!attr)
- return;
+static umode_t vpd_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *a, int n)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- sysfs_bin_attr_init(attr);
- attr->size = 0;
- attr->attr.name = "vpd";
- attr->attr.mode = S_IRUSR | S_IWUSR;
- attr->read = read_vpd_attr;
- attr->write = write_vpd_attr;
- retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
- if (retval) {
- kfree(attr);
- return;
- }
+ if (!pdev->vpd)
+ return 0;
- dev->vpd->attr = attr;
+ return a->attr.mode;
}
-void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev)
-{
- if (dev->vpd && dev->vpd->attr) {
- sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
- kfree(dev->vpd->attr);
- }
-}
+const struct attribute_group pci_dev_vpd_attr_group = {
+ .bin_attrs = vpd_attrs,
+ .is_bin_visible = vpd_attr_is_visible,
+};
-int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt)
+int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt)
{
- int i;
+ int i = 0;
- for (i = off; i < len; ) {
- u8 val = buf[i];
-
- if (val & PCI_VPD_LRDT) {
- /* Don't return success of the tag isn't complete */
- if (i + PCI_VPD_LRDT_TAG_SIZE > len)
- break;
-
- if (val == rdt)
- return i;
-
- i += PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(&buf[i]);
- } else {
- u8 tag = val & ~PCI_VPD_SRDT_LEN_MASK;
-
- if (tag == rdt)
- return i;
-
- if (tag == PCI_VPD_SRDT_END)
- break;
+ /* look for LRDT tags only, end tag is the only SRDT tag */
+ while (i + PCI_VPD_LRDT_TAG_SIZE <= len && buf[i] & PCI_VPD_LRDT) {
+ if (buf[i] == rdt)
+ return i;
- i += PCI_VPD_SRDT_TAG_SIZE +
- pci_vpd_srdt_size(&buf[i]);
- }
+ i += PCI_VPD_LRDT_TAG_SIZE + pci_vpd_lrdt_size(buf + i);
}
return -ENOENT;
@@ -530,7 +446,7 @@ static void quirk_f0_vpd_link(struct pci_dev *dev)
if (!PCI_FUNC(dev->devfn))
return;
- f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ f0 = pci_get_func0_dev(dev);
if (!f0)
return;
@@ -570,7 +486,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
/*
* The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
* device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
@@ -578,51 +493,16 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd);
-/*
- * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
- * VPD end tag will hang the device. This problem was initially
- * observed when a vpd entry was created in sysfs
- * ('/sys/bus/pci/devices/<id>/vpd'). A read to this sysfs entry
- * will dump 32k of data. Reading a full 32k will cause an access
- * beyond the VPD end tag causing the device to hang. Once the device
- * is hung, the bnx2 driver will not be able to reset the device.
- * We believe that it is legal to read beyond the end tag and
- * therefore the solution is to limit the read/write length.
- */
-static void quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
+static void pci_vpd_set_size(struct pci_dev *dev, size_t len)
{
- /*
- * Only disable the VPD capability for 5706, 5706S, 5708,
- * 5708S and 5709 rev. A
- */
- if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
- (dev->device == PCI_DEVICE_ID_NX2_5706S) ||
- (dev->device == PCI_DEVICE_ID_NX2_5708) ||
- (dev->device == PCI_DEVICE_ID_NX2_5708S) ||
- ((dev->device == PCI_DEVICE_ID_NX2_5709) &&
- (dev->revision & 0xf0) == 0x0)) {
- if (dev->vpd)
- dev->vpd->len = 0x80;
- }
+ struct pci_vpd *vpd = dev->vpd;
+
+ if (!vpd || len == 0 || len > PCI_VPD_MAX_SIZE)
+ return;
+
+ vpd->valid = 1;
+ vpd->len = len;
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5706,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5706S,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5708,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5708S,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5709,
- quirk_brcm_570x_limit_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
- PCI_DEVICE_ID_NX2_5709S,
- quirk_brcm_570x_limit_vpd);
static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
{
@@ -642,9 +522,9 @@ static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
* limits.
*/
if (chip == 0x0 && prod >= 0x20)
- pci_set_vpd_size(dev, 8192);
+ pci_vpd_set_size(dev, 8192);
else if (chip >= 0x4 && func < 0x8)
- pci_set_vpd_size(dev, 2048);
+ pci_vpd_set_size(dev, 2048);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 2d7502648219..b7a8f3a1921f 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -693,7 +693,7 @@ static int pcifront_connect_and_init_dma(struct pcifront_device *pdev)
spin_unlock(&pcifront_dev_lock);
- if (!err && !swiotlb_nr_tbl()) {
+ if (!err && !is_swiotlb_active()) {
err = pci_xen_swiotlb_init_late();
if (err)
dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n");
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 2d10d84fb79c..d4f7f1f9cc77 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -581,33 +581,6 @@ static const struct attribute_group armpmu_common_attr_group = {
.attrs = armpmu_common_attrs,
};
-/* Set at runtime when we know what CPU type we are. */
-static struct arm_pmu *__oprofile_cpu_pmu;
-
-/*
- * Despite the names, these two functions are CPU-specific and are used
- * by the OProfile/perf code.
- */
-const char *perf_pmu_name(void)
-{
- if (!__oprofile_cpu_pmu)
- return NULL;
-
- return __oprofile_cpu_pmu->name;
-}
-EXPORT_SYMBOL_GPL(perf_pmu_name);
-
-int perf_num_counters(void)
-{
- int max_events = 0;
-
- if (__oprofile_cpu_pmu != NULL)
- max_events = __oprofile_cpu_pmu->num_events;
-
- return max_events;
-}
-EXPORT_SYMBOL_GPL(perf_num_counters);
-
static int armpmu_count_irq_users(const int irq)
{
int cpu, count = 0;
@@ -979,9 +952,6 @@ int armpmu_register(struct arm_pmu *pmu)
if (ret)
goto out_destroy;
- if (!__oprofile_cpu_pmu)
- __oprofile_cpu_pmu = pmu;
-
pr_info("enabled with %s PMU driver, %d counters available%s\n",
pmu->name, pmu->num_events,
has_nmi ? ", using NMIs" : "");
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index b7675cce0027..c2c7e7963ed0 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -208,13 +208,18 @@ config PINCTRL_OXNAS
select MFD_SYSCON
config PINCTRL_ROCKCHIP
- bool
+ tristate "Rockchip gpio and pinctrl driver"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
+ select GPIOLIB
select PINMUX
select GENERIC_PINCONF
select GENERIC_IRQ_CHIP
select MFD_SYSCON
select OF_GPIO
+ default ARCH_ROCKCHIP
+ help
+ This support pinctrl and gpio driver for Rockchip SoCs.
config PINCTRL_SINGLE
tristate "One-register-per-pin type device tree based pinctrl driver"
@@ -318,6 +323,20 @@ config PINCTRL_ZYNQ
help
This selects the pinctrl driver for Xilinx Zynq.
+config PINCTRL_ZYNQMP
+ tristate "Pinctrl driver for Xilinx ZynqMP"
+ depends on ZYNQMP_FIRMWARE
+ select PINMUX
+ select GENERIC_PINCONF
+ default ZYNQMP_FIRMWARE
+ help
+ This selects the pinctrl driver for Xilinx ZynqMP platform.
+ This driver will query the pin information from the firmware
+ and allow configuring the pins.
+ Configuration can include the mux function to select on those
+ pin(s)/group(s), and various pin configuration parameters
+ such as pull-up, slew rate, etc.
+
config PINCTRL_INGENIC
bool "Pinctrl driver for the Ingenic JZ47xx SoCs"
default MACH_INGENIC
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 8bf459c32a76..5ef5334a797f 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
+obj-$(CONFIG_PINCTRL_ZYNQMP) += pinctrl-zynqmp.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 0ed14de0134c..c9c5efc92731 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -29,6 +29,68 @@ config PINCTRL_BCM2835
help
Say Y here to enable the Broadcom BCM2835 GPIO driver.
+config PINCTRL_BCM63XX
+ bool
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIO_REGMAP
+
+config PINCTRL_BCM6318
+ bool "Broadcom BCM6318 GPIO driver"
+ depends on (BMIPS_GENERIC || COMPILE_TEST)
+ depends on OF
+ select PINCTRL_BCM63XX
+ default BMIPS_GENERIC
+ help
+ Say Y here to enable the Broadcom BCM6318 GPIO driver.
+
+config PINCTRL_BCM6328
+ bool "Broadcom BCM6328 GPIO driver"
+ depends on (BMIPS_GENERIC || COMPILE_TEST)
+ depends on OF
+ select PINCTRL_BCM63XX
+ default BMIPS_GENERIC
+ help
+ Say Y here to enable the Broadcom BCM6328 GPIO driver.
+
+config PINCTRL_BCM6358
+ bool "Broadcom BCM6358 GPIO driver"
+ depends on (BMIPS_GENERIC || COMPILE_TEST)
+ depends on OF
+ select PINCTRL_BCM63XX
+ default BMIPS_GENERIC
+ help
+ Say Y here to enable the Broadcom BCM6358 GPIO driver.
+
+config PINCTRL_BCM6362
+ bool "Broadcom BCM6362 GPIO driver"
+ depends on (BMIPS_GENERIC || COMPILE_TEST)
+ depends on OF
+ select PINCTRL_BCM63XX
+ default BMIPS_GENERIC
+ help
+ Say Y here to enable the Broadcom BCM6362 GPIO driver.
+
+config PINCTRL_BCM6368
+ bool "Broadcom BCM6368 GPIO driver"
+ depends on (BMIPS_GENERIC || COMPILE_TEST)
+ depends on OF
+ select PINCTRL_BCM63XX
+ default BMIPS_GENERIC
+ help
+ Say Y here to enable the Broadcom BCM6368 GPIO driver.
+
+config PINCTRL_BCM63268
+ bool "Broadcom BCM63268 GPIO driver"
+ depends on (BMIPS_GENERIC || COMPILE_TEST)
+ depends on OF
+ select PINCTRL_BCM63XX
+ default BMIPS_GENERIC
+ help
+ Say Y here to enable the Broadcom BCM63268 GPIO driver.
+
config PINCTRL_IPROC_GPIO
bool "Broadcom iProc GPIO (with PINCONF) driver"
depends on OF_GPIO && (ARCH_BCM_IPROC || COMPILE_TEST)
diff --git a/drivers/pinctrl/bcm/Makefile b/drivers/pinctrl/bcm/Makefile
index 79d5e49fdd9a..00c7b7775e63 100644
--- a/drivers/pinctrl/bcm/Makefile
+++ b/drivers/pinctrl/bcm/Makefile
@@ -3,6 +3,13 @@
obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o
obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
+obj-$(CONFIG_PINCTRL_BCM63XX) += pinctrl-bcm63xx.o
+obj-$(CONFIG_PINCTRL_BCM6318) += pinctrl-bcm6318.o
+obj-$(CONFIG_PINCTRL_BCM6328) += pinctrl-bcm6328.o
+obj-$(CONFIG_PINCTRL_BCM6358) += pinctrl-bcm6358.o
+obj-$(CONFIG_PINCTRL_BCM6362) += pinctrl-bcm6362.o
+obj-$(CONFIG_PINCTRL_BCM6368) += pinctrl-bcm6368.o
+obj-$(CONFIG_PINCTRL_BCM63268) += pinctrl-bcm63268.o
obj-$(CONFIG_PINCTRL_IPROC_GPIO) += pinctrl-iproc-gpio.o
obj-$(CONFIG_PINCTRL_CYGNUS_MUX) += pinctrl-cygnus-mux.o
obj-$(CONFIG_PINCTRL_NS) += pinctrl-ns.o
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6318.c b/drivers/pinctrl/bcm/pinctrl-bcm6318.c
new file mode 100644
index 000000000000..77fd9b58067d
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6318.c
@@ -0,0 +1,498 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for BCM6318 GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../pinctrl-utils.h"
+
+#include "pinctrl-bcm63xx.h"
+
+#define BCM6318_NUM_GPIOS 50
+#define BCM6318_NUM_MUX 48
+
+#define BCM6318_MODE_REG 0x18
+#define BCM6318_MUX_REG 0x1c
+#define BCM6328_MUX_MASK GENMASK(1, 0)
+#define BCM6318_PAD_REG 0x54
+#define BCM6328_PAD_MASK GENMASK(3, 0)
+
+struct bcm6318_pingroup {
+ const char *name;
+ const unsigned * const pins;
+ const unsigned num_pins;
+};
+
+struct bcm6318_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+
+ unsigned mode_val:1;
+ unsigned mux_val:2;
+};
+
+static const struct pinctrl_pin_desc bcm6318_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+ PINCTRL_PIN(23, "gpio23"),
+ PINCTRL_PIN(24, "gpio24"),
+ PINCTRL_PIN(25, "gpio25"),
+ PINCTRL_PIN(26, "gpio26"),
+ PINCTRL_PIN(27, "gpio27"),
+ PINCTRL_PIN(28, "gpio28"),
+ PINCTRL_PIN(29, "gpio29"),
+ PINCTRL_PIN(30, "gpio30"),
+ PINCTRL_PIN(31, "gpio31"),
+ PINCTRL_PIN(32, "gpio32"),
+ PINCTRL_PIN(33, "gpio33"),
+ PINCTRL_PIN(34, "gpio34"),
+ PINCTRL_PIN(35, "gpio35"),
+ PINCTRL_PIN(36, "gpio36"),
+ PINCTRL_PIN(37, "gpio37"),
+ PINCTRL_PIN(38, "gpio38"),
+ PINCTRL_PIN(39, "gpio39"),
+ PINCTRL_PIN(40, "gpio40"),
+ PINCTRL_PIN(41, "gpio41"),
+ PINCTRL_PIN(42, "gpio42"),
+ PINCTRL_PIN(43, "gpio43"),
+ PINCTRL_PIN(44, "gpio44"),
+ PINCTRL_PIN(45, "gpio45"),
+ PINCTRL_PIN(46, "gpio46"),
+ PINCTRL_PIN(47, "gpio47"),
+ PINCTRL_PIN(48, "gpio48"),
+ PINCTRL_PIN(49, "gpio49"),
+};
+
+static unsigned gpio0_pins[] = { 0 };
+static unsigned gpio1_pins[] = { 1 };
+static unsigned gpio2_pins[] = { 2 };
+static unsigned gpio3_pins[] = { 3 };
+static unsigned gpio4_pins[] = { 4 };
+static unsigned gpio5_pins[] = { 5 };
+static unsigned gpio6_pins[] = { 6 };
+static unsigned gpio7_pins[] = { 7 };
+static unsigned gpio8_pins[] = { 8 };
+static unsigned gpio9_pins[] = { 9 };
+static unsigned gpio10_pins[] = { 10 };
+static unsigned gpio11_pins[] = { 11 };
+static unsigned gpio12_pins[] = { 12 };
+static unsigned gpio13_pins[] = { 13 };
+static unsigned gpio14_pins[] = { 14 };
+static unsigned gpio15_pins[] = { 15 };
+static unsigned gpio16_pins[] = { 16 };
+static unsigned gpio17_pins[] = { 17 };
+static unsigned gpio18_pins[] = { 18 };
+static unsigned gpio19_pins[] = { 19 };
+static unsigned gpio20_pins[] = { 20 };
+static unsigned gpio21_pins[] = { 21 };
+static unsigned gpio22_pins[] = { 22 };
+static unsigned gpio23_pins[] = { 23 };
+static unsigned gpio24_pins[] = { 24 };
+static unsigned gpio25_pins[] = { 25 };
+static unsigned gpio26_pins[] = { 26 };
+static unsigned gpio27_pins[] = { 27 };
+static unsigned gpio28_pins[] = { 28 };
+static unsigned gpio29_pins[] = { 29 };
+static unsigned gpio30_pins[] = { 30 };
+static unsigned gpio31_pins[] = { 31 };
+static unsigned gpio32_pins[] = { 32 };
+static unsigned gpio33_pins[] = { 33 };
+static unsigned gpio34_pins[] = { 34 };
+static unsigned gpio35_pins[] = { 35 };
+static unsigned gpio36_pins[] = { 36 };
+static unsigned gpio37_pins[] = { 37 };
+static unsigned gpio38_pins[] = { 38 };
+static unsigned gpio39_pins[] = { 39 };
+static unsigned gpio40_pins[] = { 40 };
+static unsigned gpio41_pins[] = { 41 };
+static unsigned gpio42_pins[] = { 42 };
+static unsigned gpio43_pins[] = { 43 };
+static unsigned gpio44_pins[] = { 44 };
+static unsigned gpio45_pins[] = { 45 };
+static unsigned gpio46_pins[] = { 46 };
+static unsigned gpio47_pins[] = { 47 };
+static unsigned gpio48_pins[] = { 48 };
+static unsigned gpio49_pins[] = { 49 };
+
+#define BCM6318_GROUP(n) \
+ { \
+ .name = #n, \
+ .pins = n##_pins, \
+ .num_pins = ARRAY_SIZE(n##_pins), \
+ }
+
+static struct bcm6318_pingroup bcm6318_groups[] = {
+ BCM6318_GROUP(gpio0),
+ BCM6318_GROUP(gpio1),
+ BCM6318_GROUP(gpio2),
+ BCM6318_GROUP(gpio3),
+ BCM6318_GROUP(gpio4),
+ BCM6318_GROUP(gpio5),
+ BCM6318_GROUP(gpio6),
+ BCM6318_GROUP(gpio7),
+ BCM6318_GROUP(gpio8),
+ BCM6318_GROUP(gpio9),
+ BCM6318_GROUP(gpio10),
+ BCM6318_GROUP(gpio11),
+ BCM6318_GROUP(gpio12),
+ BCM6318_GROUP(gpio13),
+ BCM6318_GROUP(gpio14),
+ BCM6318_GROUP(gpio15),
+ BCM6318_GROUP(gpio16),
+ BCM6318_GROUP(gpio17),
+ BCM6318_GROUP(gpio18),
+ BCM6318_GROUP(gpio19),
+ BCM6318_GROUP(gpio20),
+ BCM6318_GROUP(gpio21),
+ BCM6318_GROUP(gpio22),
+ BCM6318_GROUP(gpio23),
+ BCM6318_GROUP(gpio24),
+ BCM6318_GROUP(gpio25),
+ BCM6318_GROUP(gpio26),
+ BCM6318_GROUP(gpio27),
+ BCM6318_GROUP(gpio28),
+ BCM6318_GROUP(gpio29),
+ BCM6318_GROUP(gpio30),
+ BCM6318_GROUP(gpio31),
+ BCM6318_GROUP(gpio32),
+ BCM6318_GROUP(gpio33),
+ BCM6318_GROUP(gpio34),
+ BCM6318_GROUP(gpio35),
+ BCM6318_GROUP(gpio36),
+ BCM6318_GROUP(gpio37),
+ BCM6318_GROUP(gpio38),
+ BCM6318_GROUP(gpio39),
+ BCM6318_GROUP(gpio40),
+ BCM6318_GROUP(gpio41),
+ BCM6318_GROUP(gpio42),
+ BCM6318_GROUP(gpio43),
+ BCM6318_GROUP(gpio44),
+ BCM6318_GROUP(gpio45),
+ BCM6318_GROUP(gpio46),
+ BCM6318_GROUP(gpio47),
+ BCM6318_GROUP(gpio48),
+ BCM6318_GROUP(gpio49),
+};
+
+/* GPIO_MODE */
+static const char * const led_groups[] = {
+ "gpio0",
+ "gpio1",
+ "gpio2",
+ "gpio3",
+ "gpio4",
+ "gpio5",
+ "gpio6",
+ "gpio7",
+ "gpio8",
+ "gpio9",
+ "gpio10",
+ "gpio11",
+ "gpio12",
+ "gpio13",
+ "gpio14",
+ "gpio15",
+ "gpio16",
+ "gpio17",
+ "gpio18",
+ "gpio19",
+ "gpio20",
+ "gpio21",
+ "gpio22",
+ "gpio23",
+};
+
+/* PINMUX_SEL */
+static const char * const ephy0_spd_led_groups[] = {
+ "gpio0",
+};
+
+static const char * const ephy1_spd_led_groups[] = {
+ "gpio1",
+};
+
+static const char * const ephy2_spd_led_groups[] = {
+ "gpio2",
+};
+
+static const char * const ephy3_spd_led_groups[] = {
+ "gpio3",
+};
+
+static const char * const ephy0_act_led_groups[] = {
+ "gpio4",
+};
+
+static const char * const ephy1_act_led_groups[] = {
+ "gpio5",
+};
+
+static const char * const ephy2_act_led_groups[] = {
+ "gpio6",
+};
+
+static const char * const ephy3_act_led_groups[] = {
+ "gpio7",
+};
+
+static const char * const serial_led_data_groups[] = {
+ "gpio6",
+};
+
+static const char * const serial_led_clk_groups[] = {
+ "gpio7",
+};
+
+static const char * const inet_act_led_groups[] = {
+ "gpio8",
+};
+
+static const char * const inet_fail_led_groups[] = {
+ "gpio9",
+};
+
+static const char * const dsl_led_groups[] = {
+ "gpio10",
+};
+
+static const char * const post_fail_led_groups[] = {
+ "gpio11",
+};
+
+static const char * const wlan_wps_led_groups[] = {
+ "gpio12",
+};
+
+static const char * const usb_pwron_groups[] = {
+ "gpio13",
+};
+
+static const char * const usb_device_led_groups[] = {
+ "gpio13",
+};
+
+static const char * const usb_active_groups[] = {
+ "gpio40",
+};
+
+#define BCM6318_MODE_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .mode_val = 1, \
+ }
+
+#define BCM6318_MUX_FUN(n, mux) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .mux_val = mux, \
+ }
+
+static const struct bcm6318_function bcm6318_funcs[] = {
+ BCM6318_MODE_FUN(led),
+ BCM6318_MUX_FUN(ephy0_spd_led, 1),
+ BCM6318_MUX_FUN(ephy1_spd_led, 1),
+ BCM6318_MUX_FUN(ephy2_spd_led, 1),
+ BCM6318_MUX_FUN(ephy3_spd_led, 1),
+ BCM6318_MUX_FUN(ephy0_act_led, 1),
+ BCM6318_MUX_FUN(ephy1_act_led, 1),
+ BCM6318_MUX_FUN(ephy2_act_led, 1),
+ BCM6318_MUX_FUN(ephy3_act_led, 1),
+ BCM6318_MUX_FUN(serial_led_data, 3),
+ BCM6318_MUX_FUN(serial_led_clk, 3),
+ BCM6318_MUX_FUN(inet_act_led, 1),
+ BCM6318_MUX_FUN(inet_fail_led, 1),
+ BCM6318_MUX_FUN(dsl_led, 1),
+ BCM6318_MUX_FUN(post_fail_led, 1),
+ BCM6318_MUX_FUN(wlan_wps_led, 1),
+ BCM6318_MUX_FUN(usb_pwron, 1),
+ BCM6318_MUX_FUN(usb_device_led, 2),
+ BCM6318_MUX_FUN(usb_active, 2),
+};
+
+static inline unsigned int bcm6318_mux_off(unsigned int pin)
+{
+ return BCM6318_MUX_REG + (pin / 16) * 4;
+}
+
+static inline unsigned int bcm6318_pad_off(unsigned int pin)
+{
+ return BCM6318_PAD_REG + (pin / 8) * 4;
+}
+
+static int bcm6318_pinctrl_get_group_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6318_groups);
+}
+
+static const char *bcm6318_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ return bcm6318_groups[group].name;
+}
+
+static int bcm6318_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = bcm6318_groups[group].pins;
+ *num_pins = bcm6318_groups[group].num_pins;
+
+ return 0;
+}
+
+static int bcm6318_pinctrl_get_func_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6318_funcs);
+}
+
+static const char *bcm6318_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return bcm6318_funcs[selector].name;
+}
+
+static int bcm6318_pinctrl_get_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = bcm6318_funcs[selector].groups;
+ *num_groups = bcm6318_funcs[selector].num_groups;
+
+ return 0;
+}
+
+static inline void bcm6318_rmw_mux(struct bcm63xx_pinctrl *pc, unsigned pin,
+ unsigned int mode, unsigned int mux)
+{
+ if (pin < BCM63XX_BANK_GPIOS)
+ regmap_update_bits(pc->regs, BCM6318_MODE_REG, BIT(pin),
+ mode ? BIT(pin) : 0);
+
+ if (pin < BCM6318_NUM_MUX)
+ regmap_update_bits(pc->regs,
+ bcm6318_mux_off(pin),
+ BCM6328_MUX_MASK << ((pin % 16) * 2),
+ mux << ((pin % 16) * 2));
+}
+
+static inline void bcm6318_set_pad(struct bcm63xx_pinctrl *pc, unsigned pin,
+ uint8_t val)
+{
+ regmap_update_bits(pc->regs, bcm6318_pad_off(pin),
+ BCM6328_PAD_MASK << ((pin % 8) * 4),
+ val << ((pin % 8) * 4));
+}
+
+static int bcm6318_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned selector, unsigned group)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ const struct bcm6318_pingroup *pg = &bcm6318_groups[group];
+ const struct bcm6318_function *f = &bcm6318_funcs[selector];
+
+ bcm6318_rmw_mux(pc, pg->pins[0], f->mode_val, f->mux_val);
+
+ return 0;
+}
+
+static int bcm6318_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ /* disable all functions using this pin */
+ if (offset < 13) {
+ /* GPIOs 0-12 use mux 0 as GPIO function */
+ bcm6318_rmw_mux(pc, offset, 0, 0);
+ } else if (offset < 42) {
+ /* GPIOs 13-41 use mux 3 as GPIO function */
+ bcm6318_rmw_mux(pc, offset, 0, 3);
+
+ bcm6318_set_pad(pc, offset, 0);
+ }
+
+ return 0;
+}
+
+static struct pinctrl_ops bcm6318_pctl_ops = {
+ .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .get_group_name = bcm6318_pinctrl_get_group_name,
+ .get_group_pins = bcm6318_pinctrl_get_group_pins,
+ .get_groups_count = bcm6318_pinctrl_get_group_count,
+};
+
+static struct pinmux_ops bcm6318_pmx_ops = {
+ .get_function_groups = bcm6318_pinctrl_get_groups,
+ .get_function_name = bcm6318_pinctrl_get_func_name,
+ .get_functions_count = bcm6318_pinctrl_get_func_count,
+ .gpio_request_enable = bcm6318_gpio_request_enable,
+ .set_mux = bcm6318_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct bcm63xx_pinctrl_soc bcm6318_soc = {
+ .ngpios = BCM6318_NUM_GPIOS,
+ .npins = ARRAY_SIZE(bcm6318_pins),
+ .pctl_ops = &bcm6318_pctl_ops,
+ .pins = bcm6318_pins,
+ .pmx_ops = &bcm6318_pmx_ops,
+};
+
+static int bcm6318_pinctrl_probe(struct platform_device *pdev)
+{
+ return bcm63xx_pinctrl_probe(pdev, &bcm6318_soc, NULL);
+}
+
+static const struct of_device_id bcm6318_pinctrl_match[] = {
+ { .compatible = "brcm,bcm6318-pinctrl", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm6318_pinctrl_driver = {
+ .probe = bcm6318_pinctrl_probe,
+ .driver = {
+ .name = "bcm6318-pinctrl",
+ .of_match_table = bcm6318_pinctrl_match,
+ },
+};
+
+builtin_platform_driver(bcm6318_pinctrl_driver);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm63268.c b/drivers/pinctrl/bcm/pinctrl-bcm63268.c
new file mode 100644
index 000000000000..d4c5fad7fb7d
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm63268.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for BCM63268 GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../pinctrl-utils.h"
+
+#include "pinctrl-bcm63xx.h"
+
+#define BCM63268_NUM_GPIOS 52
+#define BCM63268_NUM_LEDS 24
+
+#define BCM63268_LED_REG 0x10
+#define BCM63268_MODE_REG 0x18
+#define BCM63268_CTRL_REG 0x1c
+#define BCM63268_BASEMODE_REG 0x38
+#define BCM63268_BASEMODE_NAND BIT(2) /* GPIOs 2-7, 24-31 */
+#define BCM63268_BASEMODE_GPIO35 BIT(4) /* GPIO 35 */
+#define BCM63268_BASEMODE_DECTPD BIT(5) /* GPIOs 8/9 */
+#define BCM63268_BASEMODE_VDSL_PHY_0 BIT(6) /* GPIOs 10/11 */
+#define BCM63268_BASEMODE_VDSL_PHY_1 BIT(7) /* GPIOs 12/13 */
+#define BCM63268_BASEMODE_VDSL_PHY_2 BIT(8) /* GPIOs 24/25 */
+#define BCM63268_BASEMODE_VDSL_PHY_3 BIT(9) /* GPIOs 26/27 */
+
+enum bcm63268_pinctrl_reg {
+ BCM63268_LEDCTRL,
+ BCM63268_MODE,
+ BCM63268_CTRL,
+ BCM63268_BASEMODE,
+};
+
+struct bcm63268_pingroup {
+ const char *name;
+ const unsigned * const pins;
+ const unsigned num_pins;
+};
+
+struct bcm63268_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+
+ enum bcm63268_pinctrl_reg reg;
+ uint32_t mask;
+};
+
+#define BCM63268_PIN(a, b, basemode) \
+ { \
+ .number = a, \
+ .name = b, \
+ .drv_data = (void *)(basemode) \
+ }
+
+static const struct pinctrl_pin_desc bcm63268_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ BCM63268_PIN(2, "gpio2", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(3, "gpio3", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(4, "gpio4", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(5, "gpio5", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(6, "gpio6", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(7, "gpio7", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(8, "gpio8", BCM63268_BASEMODE_DECTPD),
+ BCM63268_PIN(9, "gpio9", BCM63268_BASEMODE_DECTPD),
+ BCM63268_PIN(10, "gpio10", BCM63268_BASEMODE_VDSL_PHY_0),
+ BCM63268_PIN(11, "gpio11", BCM63268_BASEMODE_VDSL_PHY_0),
+ BCM63268_PIN(12, "gpio12", BCM63268_BASEMODE_VDSL_PHY_1),
+ BCM63268_PIN(13, "gpio13", BCM63268_BASEMODE_VDSL_PHY_1),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+ PINCTRL_PIN(23, "gpio23"),
+ BCM63268_PIN(24, "gpio24",
+ BCM63268_BASEMODE_NAND | BCM63268_BASEMODE_VDSL_PHY_2),
+ BCM63268_PIN(25, "gpio25",
+ BCM63268_BASEMODE_NAND | BCM63268_BASEMODE_VDSL_PHY_2),
+ BCM63268_PIN(26, "gpio26",
+ BCM63268_BASEMODE_NAND | BCM63268_BASEMODE_VDSL_PHY_3),
+ BCM63268_PIN(27, "gpio27",
+ BCM63268_BASEMODE_NAND | BCM63268_BASEMODE_VDSL_PHY_3),
+ BCM63268_PIN(28, "gpio28", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(29, "gpio29", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(30, "gpio30", BCM63268_BASEMODE_NAND),
+ BCM63268_PIN(31, "gpio31", BCM63268_BASEMODE_NAND),
+ PINCTRL_PIN(32, "gpio32"),
+ PINCTRL_PIN(33, "gpio33"),
+ PINCTRL_PIN(34, "gpio34"),
+ PINCTRL_PIN(35, "gpio35"),
+ PINCTRL_PIN(36, "gpio36"),
+ PINCTRL_PIN(37, "gpio37"),
+ PINCTRL_PIN(38, "gpio38"),
+ PINCTRL_PIN(39, "gpio39"),
+ PINCTRL_PIN(40, "gpio40"),
+ PINCTRL_PIN(41, "gpio41"),
+ PINCTRL_PIN(42, "gpio42"),
+ PINCTRL_PIN(43, "gpio43"),
+ PINCTRL_PIN(44, "gpio44"),
+ PINCTRL_PIN(45, "gpio45"),
+ PINCTRL_PIN(46, "gpio46"),
+ PINCTRL_PIN(47, "gpio47"),
+ PINCTRL_PIN(48, "gpio48"),
+ PINCTRL_PIN(49, "gpio49"),
+ PINCTRL_PIN(50, "gpio50"),
+ PINCTRL_PIN(51, "gpio51"),
+};
+
+static unsigned gpio0_pins[] = { 0 };
+static unsigned gpio1_pins[] = { 1 };
+static unsigned gpio2_pins[] = { 2 };
+static unsigned gpio3_pins[] = { 3 };
+static unsigned gpio4_pins[] = { 4 };
+static unsigned gpio5_pins[] = { 5 };
+static unsigned gpio6_pins[] = { 6 };
+static unsigned gpio7_pins[] = { 7 };
+static unsigned gpio8_pins[] = { 8 };
+static unsigned gpio9_pins[] = { 9 };
+static unsigned gpio10_pins[] = { 10 };
+static unsigned gpio11_pins[] = { 11 };
+static unsigned gpio12_pins[] = { 12 };
+static unsigned gpio13_pins[] = { 13 };
+static unsigned gpio14_pins[] = { 14 };
+static unsigned gpio15_pins[] = { 15 };
+static unsigned gpio16_pins[] = { 16 };
+static unsigned gpio17_pins[] = { 17 };
+static unsigned gpio18_pins[] = { 18 };
+static unsigned gpio19_pins[] = { 19 };
+static unsigned gpio20_pins[] = { 20 };
+static unsigned gpio21_pins[] = { 21 };
+static unsigned gpio22_pins[] = { 22 };
+static unsigned gpio23_pins[] = { 23 };
+static unsigned gpio24_pins[] = { 24 };
+static unsigned gpio25_pins[] = { 25 };
+static unsigned gpio26_pins[] = { 26 };
+static unsigned gpio27_pins[] = { 27 };
+static unsigned gpio28_pins[] = { 28 };
+static unsigned gpio29_pins[] = { 29 };
+static unsigned gpio30_pins[] = { 30 };
+static unsigned gpio31_pins[] = { 31 };
+static unsigned gpio32_pins[] = { 32 };
+static unsigned gpio33_pins[] = { 33 };
+static unsigned gpio34_pins[] = { 34 };
+static unsigned gpio35_pins[] = { 35 };
+static unsigned gpio36_pins[] = { 36 };
+static unsigned gpio37_pins[] = { 37 };
+static unsigned gpio38_pins[] = { 38 };
+static unsigned gpio39_pins[] = { 39 };
+static unsigned gpio40_pins[] = { 40 };
+static unsigned gpio41_pins[] = { 41 };
+static unsigned gpio42_pins[] = { 42 };
+static unsigned gpio43_pins[] = { 43 };
+static unsigned gpio44_pins[] = { 44 };
+static unsigned gpio45_pins[] = { 45 };
+static unsigned gpio46_pins[] = { 46 };
+static unsigned gpio47_pins[] = { 47 };
+static unsigned gpio48_pins[] = { 48 };
+static unsigned gpio49_pins[] = { 49 };
+static unsigned gpio50_pins[] = { 50 };
+static unsigned gpio51_pins[] = { 51 };
+
+static unsigned nand_grp_pins[] = {
+ 2, 3, 4, 5, 6, 7, 24,
+ 25, 26, 27, 28, 29, 30, 31,
+};
+
+static unsigned dectpd_grp_pins[] = { 8, 9 };
+static unsigned vdsl_phy0_grp_pins[] = { 10, 11 };
+static unsigned vdsl_phy1_grp_pins[] = { 12, 13 };
+static unsigned vdsl_phy2_grp_pins[] = { 24, 25 };
+static unsigned vdsl_phy3_grp_pins[] = { 26, 27 };
+
+#define BCM63268_GROUP(n) \
+ { \
+ .name = #n, \
+ .pins = n##_pins, \
+ .num_pins = ARRAY_SIZE(n##_pins), \
+ }
+
+static struct bcm63268_pingroup bcm63268_groups[] = {
+ BCM63268_GROUP(gpio0),
+ BCM63268_GROUP(gpio1),
+ BCM63268_GROUP(gpio2),
+ BCM63268_GROUP(gpio3),
+ BCM63268_GROUP(gpio4),
+ BCM63268_GROUP(gpio5),
+ BCM63268_GROUP(gpio6),
+ BCM63268_GROUP(gpio7),
+ BCM63268_GROUP(gpio8),
+ BCM63268_GROUP(gpio9),
+ BCM63268_GROUP(gpio10),
+ BCM63268_GROUP(gpio11),
+ BCM63268_GROUP(gpio12),
+ BCM63268_GROUP(gpio13),
+ BCM63268_GROUP(gpio14),
+ BCM63268_GROUP(gpio15),
+ BCM63268_GROUP(gpio16),
+ BCM63268_GROUP(gpio17),
+ BCM63268_GROUP(gpio18),
+ BCM63268_GROUP(gpio19),
+ BCM63268_GROUP(gpio20),
+ BCM63268_GROUP(gpio21),
+ BCM63268_GROUP(gpio22),
+ BCM63268_GROUP(gpio23),
+ BCM63268_GROUP(gpio24),
+ BCM63268_GROUP(gpio25),
+ BCM63268_GROUP(gpio26),
+ BCM63268_GROUP(gpio27),
+ BCM63268_GROUP(gpio28),
+ BCM63268_GROUP(gpio29),
+ BCM63268_GROUP(gpio30),
+ BCM63268_GROUP(gpio31),
+ BCM63268_GROUP(gpio32),
+ BCM63268_GROUP(gpio33),
+ BCM63268_GROUP(gpio34),
+ BCM63268_GROUP(gpio35),
+ BCM63268_GROUP(gpio36),
+ BCM63268_GROUP(gpio37),
+ BCM63268_GROUP(gpio38),
+ BCM63268_GROUP(gpio39),
+ BCM63268_GROUP(gpio40),
+ BCM63268_GROUP(gpio41),
+ BCM63268_GROUP(gpio42),
+ BCM63268_GROUP(gpio43),
+ BCM63268_GROUP(gpio44),
+ BCM63268_GROUP(gpio45),
+ BCM63268_GROUP(gpio46),
+ BCM63268_GROUP(gpio47),
+ BCM63268_GROUP(gpio48),
+ BCM63268_GROUP(gpio49),
+ BCM63268_GROUP(gpio50),
+ BCM63268_GROUP(gpio51),
+
+ /* multi pin groups */
+ BCM63268_GROUP(nand_grp),
+ BCM63268_GROUP(dectpd_grp),
+ BCM63268_GROUP(vdsl_phy0_grp),
+ BCM63268_GROUP(vdsl_phy1_grp),
+ BCM63268_GROUP(vdsl_phy2_grp),
+ BCM63268_GROUP(vdsl_phy3_grp),
+};
+
+static const char * const led_groups[] = {
+ "gpio0",
+ "gpio1",
+ "gpio2",
+ "gpio3",
+ "gpio4",
+ "gpio5",
+ "gpio6",
+ "gpio7",
+ "gpio8",
+ "gpio9",
+ "gpio10",
+ "gpio11",
+ "gpio12",
+ "gpio13",
+ "gpio14",
+ "gpio15",
+ "gpio16",
+ "gpio17",
+ "gpio18",
+ "gpio19",
+ "gpio20",
+ "gpio21",
+ "gpio22",
+ "gpio23",
+};
+
+static const char * const serial_led_clk_groups[] = {
+ "gpio0",
+};
+
+static const char * const serial_led_data_groups[] = {
+ "gpio1",
+};
+
+static const char * const hsspi_cs4_groups[] = {
+ "gpio16",
+};
+
+static const char * const hsspi_cs5_groups[] = {
+ "gpio17",
+};
+
+static const char * const hsspi_cs6_groups[] = {
+ "gpio8",
+};
+
+static const char * const hsspi_cs7_groups[] = {
+ "gpio9",
+};
+
+static const char * const uart1_scts_groups[] = {
+ "gpio10",
+ "gpio24",
+};
+
+static const char * const uart1_srts_groups[] = {
+ "gpio11",
+ "gpio25",
+};
+
+static const char * const uart1_sdin_groups[] = {
+ "gpio12",
+ "gpio26",
+};
+
+static const char * const uart1_sdout_groups[] = {
+ "gpio13",
+ "gpio27",
+};
+
+static const char * const ntr_pulse_in_groups[] = {
+ "gpio14",
+ "gpio28",
+};
+
+static const char * const dsl_ntr_pulse_out_groups[] = {
+ "gpio15",
+ "gpio29",
+};
+
+static const char * const adsl_spi_miso_groups[] = {
+ "gpio18",
+};
+
+static const char * const adsl_spi_mosi_groups[] = {
+ "gpio19",
+};
+
+static const char * const vreg_clk_groups[] = {
+ "gpio22",
+};
+
+static const char * const pcie_clkreq_b_groups[] = {
+ "gpio23",
+};
+
+static const char * const switch_led_clk_groups[] = {
+ "gpio30",
+};
+
+static const char * const switch_led_data_groups[] = {
+ "gpio31",
+};
+
+static const char * const wifi_groups[] = {
+ "gpio32",
+ "gpio33",
+ "gpio34",
+ "gpio35",
+ "gpio36",
+ "gpio37",
+ "gpio38",
+ "gpio39",
+ "gpio40",
+ "gpio41",
+ "gpio42",
+ "gpio43",
+ "gpio44",
+ "gpio45",
+ "gpio46",
+ "gpio47",
+ "gpio48",
+ "gpio49",
+ "gpio50",
+ "gpio51",
+};
+
+static const char * const nand_groups[] = {
+ "nand_grp",
+};
+
+static const char * const dectpd_groups[] = {
+ "dectpd_grp",
+};
+
+static const char * const vdsl_phy_override_0_groups[] = {
+ "vdsl_phy_override_0_grp",
+};
+
+static const char * const vdsl_phy_override_1_groups[] = {
+ "vdsl_phy_override_1_grp",
+};
+
+static const char * const vdsl_phy_override_2_groups[] = {
+ "vdsl_phy_override_2_grp",
+};
+
+static const char * const vdsl_phy_override_3_groups[] = {
+ "vdsl_phy_override_3_grp",
+};
+
+#define BCM63268_LED_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM63268_LEDCTRL, \
+ }
+
+#define BCM63268_MODE_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM63268_MODE, \
+ }
+
+#define BCM63268_CTRL_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM63268_CTRL, \
+ }
+
+#define BCM63268_BASEMODE_FUN(n, val) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM63268_BASEMODE, \
+ .mask = val, \
+ }
+
+static const struct bcm63268_function bcm63268_funcs[] = {
+ BCM63268_LED_FUN(led),
+ BCM63268_MODE_FUN(serial_led_clk),
+ BCM63268_MODE_FUN(serial_led_data),
+ BCM63268_MODE_FUN(hsspi_cs6),
+ BCM63268_MODE_FUN(hsspi_cs7),
+ BCM63268_MODE_FUN(uart1_scts),
+ BCM63268_MODE_FUN(uart1_srts),
+ BCM63268_MODE_FUN(uart1_sdin),
+ BCM63268_MODE_FUN(uart1_sdout),
+ BCM63268_MODE_FUN(ntr_pulse_in),
+ BCM63268_MODE_FUN(dsl_ntr_pulse_out),
+ BCM63268_MODE_FUN(hsspi_cs4),
+ BCM63268_MODE_FUN(hsspi_cs5),
+ BCM63268_MODE_FUN(adsl_spi_miso),
+ BCM63268_MODE_FUN(adsl_spi_mosi),
+ BCM63268_MODE_FUN(vreg_clk),
+ BCM63268_MODE_FUN(pcie_clkreq_b),
+ BCM63268_MODE_FUN(switch_led_clk),
+ BCM63268_MODE_FUN(switch_led_data),
+ BCM63268_CTRL_FUN(wifi),
+ BCM63268_BASEMODE_FUN(nand, BCM63268_BASEMODE_NAND),
+ BCM63268_BASEMODE_FUN(dectpd, BCM63268_BASEMODE_DECTPD),
+ BCM63268_BASEMODE_FUN(vdsl_phy_override_0,
+ BCM63268_BASEMODE_VDSL_PHY_0),
+ BCM63268_BASEMODE_FUN(vdsl_phy_override_1,
+ BCM63268_BASEMODE_VDSL_PHY_1),
+ BCM63268_BASEMODE_FUN(vdsl_phy_override_2,
+ BCM63268_BASEMODE_VDSL_PHY_2),
+ BCM63268_BASEMODE_FUN(vdsl_phy_override_3,
+ BCM63268_BASEMODE_VDSL_PHY_3),
+};
+
+static int bcm63268_pinctrl_get_group_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm63268_groups);
+}
+
+static const char *bcm63268_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ return bcm63268_groups[group].name;
+}
+
+static int bcm63268_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = bcm63268_groups[group].pins;
+ *num_pins = bcm63268_groups[group].num_pins;
+
+ return 0;
+}
+
+static int bcm63268_pinctrl_get_func_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm63268_funcs);
+}
+
+static const char *bcm63268_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return bcm63268_funcs[selector].name;
+}
+
+static int bcm63268_pinctrl_get_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = bcm63268_funcs[selector].groups;
+ *num_groups = bcm63268_funcs[selector].num_groups;
+
+ return 0;
+}
+
+static void bcm63268_set_gpio(struct bcm63xx_pinctrl *pc, unsigned pin)
+{
+ const struct pinctrl_pin_desc *desc = &bcm63268_pins[pin];
+ unsigned int basemode = (unsigned long) desc->drv_data;
+ unsigned int mask = BIT(bcm63xx_bank_pin(pin));
+
+ if (basemode)
+ regmap_update_bits(pc->regs, BCM63268_BASEMODE_REG, basemode,
+ 0);
+
+ if (pin < BCM63XX_BANK_GPIOS) {
+ /* base mode: 0 => gpio, 1 => mux function */
+ regmap_update_bits(pc->regs, BCM63268_MODE_REG, mask, 0);
+
+ /* pins 0-23 might be muxed to led */
+ if (pin < BCM63268_NUM_LEDS)
+ regmap_update_bits(pc->regs, BCM63268_LED_REG, mask,
+ 0);
+ } else if (pin < BCM63268_NUM_GPIOS) {
+ /* ctrl reg: 0 => wifi function, 1 => gpio */
+ regmap_update_bits(pc->regs, BCM63268_CTRL_REG, mask, mask);
+ }
+}
+
+static int bcm63268_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned selector, unsigned group)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ const struct bcm63268_pingroup *pg = &bcm63268_groups[group];
+ const struct bcm63268_function *f = &bcm63268_funcs[selector];
+ unsigned i;
+ unsigned int reg;
+ unsigned int val, mask;
+
+ for (i = 0; i < pg->num_pins; i++)
+ bcm63268_set_gpio(pc, pg->pins[i]);
+
+ switch (f->reg) {
+ case BCM63268_LEDCTRL:
+ reg = BCM63268_LED_REG;
+ mask = BIT(pg->pins[0]);
+ val = BIT(pg->pins[0]);
+ break;
+ case BCM63268_MODE:
+ reg = BCM63268_MODE_REG;
+ mask = BIT(pg->pins[0]);
+ val = BIT(pg->pins[0]);
+ break;
+ case BCM63268_CTRL:
+ reg = BCM63268_CTRL_REG;
+ mask = BIT(pg->pins[0]);
+ val = 0;
+ break;
+ case BCM63268_BASEMODE:
+ reg = BCM63268_BASEMODE_REG;
+ mask = f->mask;
+ val = f->mask;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(pc->regs, reg, mask, val);
+
+ return 0;
+}
+
+static int bcm63268_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ /* disable all functions using this pin */
+ bcm63268_set_gpio(pc, offset);
+
+ return 0;
+}
+
+static struct pinctrl_ops bcm63268_pctl_ops = {
+ .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .get_group_name = bcm63268_pinctrl_get_group_name,
+ .get_group_pins = bcm63268_pinctrl_get_group_pins,
+ .get_groups_count = bcm63268_pinctrl_get_group_count,
+};
+
+static struct pinmux_ops bcm63268_pmx_ops = {
+ .get_function_groups = bcm63268_pinctrl_get_groups,
+ .get_function_name = bcm63268_pinctrl_get_func_name,
+ .get_functions_count = bcm63268_pinctrl_get_func_count,
+ .gpio_request_enable = bcm63268_gpio_request_enable,
+ .set_mux = bcm63268_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct bcm63xx_pinctrl_soc bcm63268_soc = {
+ .ngpios = BCM63268_NUM_GPIOS,
+ .npins = ARRAY_SIZE(bcm63268_pins),
+ .pctl_ops = &bcm63268_pctl_ops,
+ .pins = bcm63268_pins,
+ .pmx_ops = &bcm63268_pmx_ops,
+};
+
+static int bcm63268_pinctrl_probe(struct platform_device *pdev)
+{
+ return bcm63xx_pinctrl_probe(pdev, &bcm63268_soc, NULL);
+}
+
+static const struct of_device_id bcm63268_pinctrl_match[] = {
+ { .compatible = "brcm,bcm63268-pinctrl", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm63268_pinctrl_driver = {
+ .probe = bcm63268_pinctrl_probe,
+ .driver = {
+ .name = "bcm63268-pinctrl",
+ .of_match_table = bcm63268_pinctrl_match,
+ },
+};
+
+builtin_platform_driver(bcm63268_pinctrl_driver);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6328.c b/drivers/pinctrl/bcm/pinctrl-bcm6328.c
new file mode 100644
index 000000000000..c9efce600550
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6328.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for BCM6328 GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../pinctrl-utils.h"
+
+#include "pinctrl-bcm63xx.h"
+
+#define BCM6328_NUM_GPIOS 32
+
+#define BCM6328_MODE_REG 0x18
+#define BCM6328_MUX_HI_REG 0x1c
+#define BCM6328_MUX_LO_REG 0x20
+#define BCM6328_MUX_OTHER_REG 0x24
+#define BCM6328_MUX_MASK GENMASK(1, 0)
+
+struct bcm6328_pingroup {
+ const char *name;
+ const unsigned * const pins;
+ const unsigned num_pins;
+};
+
+struct bcm6328_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+
+ unsigned mode_val:1;
+ unsigned mux_val:2;
+};
+
+static const unsigned int bcm6328_mux[] = {
+ BCM6328_MUX_LO_REG,
+ BCM6328_MUX_HI_REG,
+ BCM6328_MUX_OTHER_REG
+};
+
+static const struct pinctrl_pin_desc bcm6328_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+ PINCTRL_PIN(23, "gpio23"),
+ PINCTRL_PIN(24, "gpio24"),
+ PINCTRL_PIN(25, "gpio25"),
+ PINCTRL_PIN(26, "gpio26"),
+ PINCTRL_PIN(27, "gpio27"),
+ PINCTRL_PIN(28, "gpio28"),
+ PINCTRL_PIN(29, "gpio29"),
+ PINCTRL_PIN(30, "gpio30"),
+ PINCTRL_PIN(31, "gpio31"),
+
+ /*
+ * No idea where they really are; so let's put them according
+ * to their mux offsets.
+ */
+ PINCTRL_PIN(36, "hsspi_cs1"),
+ PINCTRL_PIN(38, "usb_p2"),
+};
+
+static unsigned gpio0_pins[] = { 0 };
+static unsigned gpio1_pins[] = { 1 };
+static unsigned gpio2_pins[] = { 2 };
+static unsigned gpio3_pins[] = { 3 };
+static unsigned gpio4_pins[] = { 4 };
+static unsigned gpio5_pins[] = { 5 };
+static unsigned gpio6_pins[] = { 6 };
+static unsigned gpio7_pins[] = { 7 };
+static unsigned gpio8_pins[] = { 8 };
+static unsigned gpio9_pins[] = { 9 };
+static unsigned gpio10_pins[] = { 10 };
+static unsigned gpio11_pins[] = { 11 };
+static unsigned gpio12_pins[] = { 12 };
+static unsigned gpio13_pins[] = { 13 };
+static unsigned gpio14_pins[] = { 14 };
+static unsigned gpio15_pins[] = { 15 };
+static unsigned gpio16_pins[] = { 16 };
+static unsigned gpio17_pins[] = { 17 };
+static unsigned gpio18_pins[] = { 18 };
+static unsigned gpio19_pins[] = { 19 };
+static unsigned gpio20_pins[] = { 20 };
+static unsigned gpio21_pins[] = { 21 };
+static unsigned gpio22_pins[] = { 22 };
+static unsigned gpio23_pins[] = { 23 };
+static unsigned gpio24_pins[] = { 24 };
+static unsigned gpio25_pins[] = { 25 };
+static unsigned gpio26_pins[] = { 26 };
+static unsigned gpio27_pins[] = { 27 };
+static unsigned gpio28_pins[] = { 28 };
+static unsigned gpio29_pins[] = { 29 };
+static unsigned gpio30_pins[] = { 30 };
+static unsigned gpio31_pins[] = { 31 };
+
+static unsigned hsspi_cs1_pins[] = { 36 };
+static unsigned usb_port1_pins[] = { 38 };
+
+#define BCM6328_GROUP(n) \
+ { \
+ .name = #n, \
+ .pins = n##_pins, \
+ .num_pins = ARRAY_SIZE(n##_pins), \
+ }
+
+static struct bcm6328_pingroup bcm6328_groups[] = {
+ BCM6328_GROUP(gpio0),
+ BCM6328_GROUP(gpio1),
+ BCM6328_GROUP(gpio2),
+ BCM6328_GROUP(gpio3),
+ BCM6328_GROUP(gpio4),
+ BCM6328_GROUP(gpio5),
+ BCM6328_GROUP(gpio6),
+ BCM6328_GROUP(gpio7),
+ BCM6328_GROUP(gpio8),
+ BCM6328_GROUP(gpio9),
+ BCM6328_GROUP(gpio10),
+ BCM6328_GROUP(gpio11),
+ BCM6328_GROUP(gpio12),
+ BCM6328_GROUP(gpio13),
+ BCM6328_GROUP(gpio14),
+ BCM6328_GROUP(gpio15),
+ BCM6328_GROUP(gpio16),
+ BCM6328_GROUP(gpio17),
+ BCM6328_GROUP(gpio18),
+ BCM6328_GROUP(gpio19),
+ BCM6328_GROUP(gpio20),
+ BCM6328_GROUP(gpio21),
+ BCM6328_GROUP(gpio22),
+ BCM6328_GROUP(gpio23),
+ BCM6328_GROUP(gpio24),
+ BCM6328_GROUP(gpio25),
+ BCM6328_GROUP(gpio26),
+ BCM6328_GROUP(gpio27),
+ BCM6328_GROUP(gpio28),
+ BCM6328_GROUP(gpio29),
+ BCM6328_GROUP(gpio30),
+ BCM6328_GROUP(gpio31),
+
+ BCM6328_GROUP(hsspi_cs1),
+ BCM6328_GROUP(usb_port1),
+};
+
+/* GPIO_MODE */
+static const char * const led_groups[] = {
+ "gpio0",
+ "gpio1",
+ "gpio2",
+ "gpio3",
+ "gpio4",
+ "gpio5",
+ "gpio6",
+ "gpio7",
+ "gpio8",
+ "gpio9",
+ "gpio10",
+ "gpio11",
+ "gpio12",
+ "gpio13",
+ "gpio14",
+ "gpio15",
+ "gpio16",
+ "gpio17",
+ "gpio18",
+ "gpio19",
+ "gpio20",
+ "gpio21",
+ "gpio22",
+ "gpio23",
+};
+
+/* PINMUX_SEL */
+static const char * const serial_led_data_groups[] = {
+ "gpio6",
+};
+
+static const char * const serial_led_clk_groups[] = {
+ "gpio7",
+};
+
+static const char * const inet_act_led_groups[] = {
+ "gpio11",
+};
+
+static const char * const pcie_clkreq_groups[] = {
+ "gpio16",
+};
+
+static const char * const ephy0_act_led_groups[] = {
+ "gpio25",
+};
+
+static const char * const ephy1_act_led_groups[] = {
+ "gpio26",
+};
+
+static const char * const ephy2_act_led_groups[] = {
+ "gpio27",
+};
+
+static const char * const ephy3_act_led_groups[] = {
+ "gpio28",
+};
+
+static const char * const hsspi_cs1_groups[] = {
+ "hsspi_cs1"
+};
+
+static const char * const usb_host_port_groups[] = {
+ "usb_port1",
+};
+
+static const char * const usb_device_port_groups[] = {
+ "usb_port1",
+};
+
+#define BCM6328_MODE_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .mode_val = 1, \
+ }
+
+#define BCM6328_MUX_FUN(n, mux) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .mux_val = mux, \
+ }
+
+static const struct bcm6328_function bcm6328_funcs[] = {
+ BCM6328_MODE_FUN(led),
+ BCM6328_MUX_FUN(serial_led_data, 2),
+ BCM6328_MUX_FUN(serial_led_clk, 2),
+ BCM6328_MUX_FUN(inet_act_led, 1),
+ BCM6328_MUX_FUN(pcie_clkreq, 2),
+ BCM6328_MUX_FUN(ephy0_act_led, 1),
+ BCM6328_MUX_FUN(ephy1_act_led, 1),
+ BCM6328_MUX_FUN(ephy2_act_led, 1),
+ BCM6328_MUX_FUN(ephy3_act_led, 1),
+ BCM6328_MUX_FUN(hsspi_cs1, 2),
+ BCM6328_MUX_FUN(usb_host_port, 1),
+ BCM6328_MUX_FUN(usb_device_port, 2),
+};
+
+static inline unsigned int bcm6328_mux_off(unsigned int pin)
+{
+ return bcm6328_mux[pin / 16];
+}
+
+static int bcm6328_pinctrl_get_group_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6328_groups);
+}
+
+static const char *bcm6328_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ return bcm6328_groups[group].name;
+}
+
+static int bcm6328_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = bcm6328_groups[group].pins;
+ *num_pins = bcm6328_groups[group].num_pins;
+
+ return 0;
+}
+
+static int bcm6328_pinctrl_get_func_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6328_funcs);
+}
+
+static const char *bcm6328_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return bcm6328_funcs[selector].name;
+}
+
+static int bcm6328_pinctrl_get_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = bcm6328_funcs[selector].groups;
+ *num_groups = bcm6328_funcs[selector].num_groups;
+
+ return 0;
+}
+
+static void bcm6328_rmw_mux(struct bcm63xx_pinctrl *pc, unsigned pin,
+ unsigned int mode, unsigned int mux)
+{
+ if (pin < BCM6328_NUM_GPIOS)
+ regmap_update_bits(pc->regs, BCM6328_MODE_REG, BIT(pin),
+ mode ? BIT(pin) : 0);
+
+ regmap_update_bits(pc->regs, bcm6328_mux_off(pin),
+ BCM6328_MUX_MASK << ((pin % 16) * 2),
+ mux << ((pin % 16) * 2));
+}
+
+static int bcm6328_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned selector, unsigned group)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ const struct bcm6328_pingroup *pg = &bcm6328_groups[group];
+ const struct bcm6328_function *f = &bcm6328_funcs[selector];
+
+ bcm6328_rmw_mux(pc, pg->pins[0], f->mode_val, f->mux_val);
+
+ return 0;
+}
+
+static int bcm6328_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ /* disable all functions using this pin */
+ bcm6328_rmw_mux(pc, offset, 0, 0);
+
+ return 0;
+}
+
+static struct pinctrl_ops bcm6328_pctl_ops = {
+ .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .get_group_name = bcm6328_pinctrl_get_group_name,
+ .get_group_pins = bcm6328_pinctrl_get_group_pins,
+ .get_groups_count = bcm6328_pinctrl_get_group_count,
+};
+
+static struct pinmux_ops bcm6328_pmx_ops = {
+ .get_function_groups = bcm6328_pinctrl_get_groups,
+ .get_function_name = bcm6328_pinctrl_get_func_name,
+ .get_functions_count = bcm6328_pinctrl_get_func_count,
+ .gpio_request_enable = bcm6328_gpio_request_enable,
+ .set_mux = bcm6328_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct bcm63xx_pinctrl_soc bcm6328_soc = {
+ .ngpios = BCM6328_NUM_GPIOS,
+ .npins = ARRAY_SIZE(bcm6328_pins),
+ .pctl_ops = &bcm6328_pctl_ops,
+ .pins = bcm6328_pins,
+ .pmx_ops = &bcm6328_pmx_ops,
+};
+
+static int bcm6328_pinctrl_probe(struct platform_device *pdev)
+{
+ return bcm63xx_pinctrl_probe(pdev, &bcm6328_soc, NULL);
+}
+
+static const struct of_device_id bcm6328_pinctrl_match[] = {
+ { .compatible = "brcm,bcm6328-pinctrl", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm6328_pinctrl_driver = {
+ .probe = bcm6328_pinctrl_probe,
+ .driver = {
+ .name = "bcm6328-pinctrl",
+ .of_match_table = bcm6328_pinctrl_match,
+ },
+};
+
+builtin_platform_driver(bcm6328_pinctrl_driver);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6358.c b/drivers/pinctrl/bcm/pinctrl-bcm6358.c
new file mode 100644
index 000000000000..d638578727f3
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6358.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for BCM6358 GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../pinctrl-utils.h"
+
+#include "pinctrl-bcm63xx.h"
+
+#define BCM6358_NUM_GPIOS 40
+
+#define BCM6358_MODE_REG 0x18
+#define BCM6358_MODE_MUX_NONE 0
+#define BCM6358_MODE_MUX_EBI_CS BIT(5)
+#define BCM6358_MODE_MUX_UART1 BIT(6)
+#define BCM6358_MODE_MUX_SPI_CS BIT(7)
+#define BCM6358_MODE_MUX_ASYNC_MODEM BIT(8)
+#define BCM6358_MODE_MUX_LEGACY_LED BIT(9)
+#define BCM6358_MODE_MUX_SERIAL_LED BIT(10)
+#define BCM6358_MODE_MUX_LED BIT(11)
+#define BCM6358_MODE_MUX_UTOPIA BIT(12)
+#define BCM6358_MODE_MUX_CLKRST BIT(13)
+#define BCM6358_MODE_MUX_PWM_SYN_CLK BIT(14)
+#define BCM6358_MODE_MUX_SYS_IRQ BIT(15)
+
+struct bcm6358_pingroup {
+ const char *name;
+ const unsigned * const pins;
+ const unsigned num_pins;
+
+ const uint16_t mode_val;
+
+ /* non-GPIO function muxes require the gpio direction to be set */
+ const uint16_t direction;
+};
+
+struct bcm6358_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+};
+
+struct bcm6358_priv {
+ struct regmap_field *overlays;
+};
+
+#define BCM6358_GPIO_PIN(a, b, bit1, bit2, bit3) \
+ { \
+ .number = a, \
+ .name = b, \
+ .drv_data = (void *)(BCM6358_MODE_MUX_##bit1 | \
+ BCM6358_MODE_MUX_##bit2 | \
+ BCM6358_MODE_MUX_##bit3), \
+ }
+
+static const struct pinctrl_pin_desc bcm6358_pins[] = {
+ BCM6358_GPIO_PIN(0, "gpio0", LED, NONE, NONE),
+ BCM6358_GPIO_PIN(1, "gpio1", LED, NONE, NONE),
+ BCM6358_GPIO_PIN(2, "gpio2", LED, NONE, NONE),
+ BCM6358_GPIO_PIN(3, "gpio3", LED, NONE, NONE),
+ PINCTRL_PIN(4, "gpio4"),
+ BCM6358_GPIO_PIN(5, "gpio5", SYS_IRQ, NONE, NONE),
+ BCM6358_GPIO_PIN(6, "gpio6", SERIAL_LED, NONE, NONE),
+ BCM6358_GPIO_PIN(7, "gpio7", SERIAL_LED, NONE, NONE),
+ BCM6358_GPIO_PIN(8, "gpio8", PWM_SYN_CLK, NONE, NONE),
+ BCM6358_GPIO_PIN(9, "gpio09", LEGACY_LED, NONE, NONE),
+ BCM6358_GPIO_PIN(10, "gpio10", LEGACY_LED, NONE, NONE),
+ BCM6358_GPIO_PIN(11, "gpio11", LEGACY_LED, NONE, NONE),
+ BCM6358_GPIO_PIN(12, "gpio12", LEGACY_LED, ASYNC_MODEM, UTOPIA),
+ BCM6358_GPIO_PIN(13, "gpio13", LEGACY_LED, ASYNC_MODEM, UTOPIA),
+ BCM6358_GPIO_PIN(14, "gpio14", LEGACY_LED, ASYNC_MODEM, UTOPIA),
+ BCM6358_GPIO_PIN(15, "gpio15", LEGACY_LED, ASYNC_MODEM, UTOPIA),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ BCM6358_GPIO_PIN(22, "gpio22", UTOPIA, NONE, NONE),
+ BCM6358_GPIO_PIN(23, "gpio23", UTOPIA, NONE, NONE),
+ BCM6358_GPIO_PIN(24, "gpio24", UTOPIA, NONE, NONE),
+ BCM6358_GPIO_PIN(25, "gpio25", UTOPIA, NONE, NONE),
+ BCM6358_GPIO_PIN(26, "gpio26", UTOPIA, NONE, NONE),
+ BCM6358_GPIO_PIN(27, "gpio27", UTOPIA, NONE, NONE),
+ BCM6358_GPIO_PIN(28, "gpio28", UTOPIA, UART1, NONE),
+ BCM6358_GPIO_PIN(29, "gpio29", UTOPIA, UART1, NONE),
+ BCM6358_GPIO_PIN(30, "gpio30", UTOPIA, UART1, EBI_CS),
+ BCM6358_GPIO_PIN(31, "gpio31", UTOPIA, UART1, EBI_CS),
+ BCM6358_GPIO_PIN(32, "gpio32", SPI_CS, NONE, NONE),
+ BCM6358_GPIO_PIN(33, "gpio33", SPI_CS, NONE, NONE),
+ PINCTRL_PIN(34, "gpio34"),
+ PINCTRL_PIN(35, "gpio35"),
+ PINCTRL_PIN(36, "gpio36"),
+ PINCTRL_PIN(37, "gpio37"),
+ PINCTRL_PIN(38, "gpio38"),
+ PINCTRL_PIN(39, "gpio39"),
+};
+
+static unsigned ebi_cs_grp_pins[] = { 30, 31 };
+
+static unsigned uart1_grp_pins[] = { 28, 29, 30, 31 };
+
+static unsigned spi_cs_grp_pins[] = { 32, 33 };
+
+static unsigned async_modem_grp_pins[] = { 12, 13, 14, 15 };
+
+static unsigned serial_led_grp_pins[] = { 6, 7 };
+
+static unsigned legacy_led_grp_pins[] = { 9, 10, 11, 12, 13, 14, 15 };
+
+static unsigned led_grp_pins[] = { 0, 1, 2, 3 };
+
+static unsigned utopia_grp_pins[] = {
+ 12, 13, 14, 15, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static unsigned pwm_syn_clk_grp_pins[] = { 8 };
+
+static unsigned sys_irq_grp_pins[] = { 5 };
+
+#define BCM6358_GPIO_MUX_GROUP(n, bit, dir) \
+ { \
+ .name = #n, \
+ .pins = n##_pins, \
+ .num_pins = ARRAY_SIZE(n##_pins), \
+ .mode_val = BCM6358_MODE_MUX_##bit, \
+ .direction = dir, \
+ }
+
+static const struct bcm6358_pingroup bcm6358_groups[] = {
+ BCM6358_GPIO_MUX_GROUP(ebi_cs_grp, EBI_CS, 0x3),
+ BCM6358_GPIO_MUX_GROUP(uart1_grp, UART1, 0x2),
+ BCM6358_GPIO_MUX_GROUP(spi_cs_grp, SPI_CS, 0x6),
+ BCM6358_GPIO_MUX_GROUP(async_modem_grp, ASYNC_MODEM, 0x6),
+ BCM6358_GPIO_MUX_GROUP(legacy_led_grp, LEGACY_LED, 0x7f),
+ BCM6358_GPIO_MUX_GROUP(serial_led_grp, SERIAL_LED, 0x3),
+ BCM6358_GPIO_MUX_GROUP(led_grp, LED, 0xf),
+ BCM6358_GPIO_MUX_GROUP(utopia_grp, UTOPIA, 0x000f),
+ BCM6358_GPIO_MUX_GROUP(pwm_syn_clk_grp, PWM_SYN_CLK, 0x1),
+ BCM6358_GPIO_MUX_GROUP(sys_irq_grp, SYS_IRQ, 0x1),
+};
+
+static const char * const ebi_cs_groups[] = {
+ "ebi_cs_grp"
+};
+
+static const char * const uart1_groups[] = {
+ "uart1_grp"
+};
+
+static const char * const spi_cs_2_3_groups[] = {
+ "spi_cs_2_3_grp"
+};
+
+static const char * const async_modem_groups[] = {
+ "async_modem_grp"
+};
+
+static const char * const legacy_led_groups[] = {
+ "legacy_led_grp",
+};
+
+static const char * const serial_led_groups[] = {
+ "serial_led_grp",
+};
+
+static const char * const led_groups[] = {
+ "led_grp",
+};
+
+static const char * const clkrst_groups[] = {
+ "clkrst_grp",
+};
+
+static const char * const pwm_syn_clk_groups[] = {
+ "pwm_syn_clk_grp",
+};
+
+static const char * const sys_irq_groups[] = {
+ "sys_irq_grp",
+};
+
+#define BCM6358_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ }
+
+static const struct bcm6358_function bcm6358_funcs[] = {
+ BCM6358_FUN(ebi_cs),
+ BCM6358_FUN(uart1),
+ BCM6358_FUN(spi_cs_2_3),
+ BCM6358_FUN(async_modem),
+ BCM6358_FUN(legacy_led),
+ BCM6358_FUN(serial_led),
+ BCM6358_FUN(led),
+ BCM6358_FUN(clkrst),
+ BCM6358_FUN(pwm_syn_clk),
+ BCM6358_FUN(sys_irq),
+};
+
+static int bcm6358_pinctrl_get_group_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6358_groups);
+}
+
+static const char *bcm6358_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ return bcm6358_groups[group].name;
+}
+
+static int bcm6358_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = bcm6358_groups[group].pins;
+ *num_pins = bcm6358_groups[group].num_pins;
+
+ return 0;
+}
+
+static int bcm6358_pinctrl_get_func_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6358_funcs);
+}
+
+static const char *bcm6358_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return bcm6358_funcs[selector].name;
+}
+
+static int bcm6358_pinctrl_get_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = bcm6358_funcs[selector].groups;
+ *num_groups = bcm6358_funcs[selector].num_groups;
+
+ return 0;
+}
+
+static int bcm6358_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned selector, unsigned group)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ struct bcm6358_priv *priv = pc->driver_data;
+ const struct bcm6358_pingroup *pg = &bcm6358_groups[group];
+ unsigned int val = pg->mode_val;
+ unsigned int mask = val;
+ unsigned pin;
+
+ for (pin = 0; pin < pg->num_pins; pin++)
+ mask |= (unsigned long)bcm6358_pins[pin].drv_data;
+
+ regmap_field_update_bits(priv->overlays, mask, val);
+
+ for (pin = 0; pin < pg->num_pins; pin++) {
+ struct pinctrl_gpio_range *range;
+ unsigned int hw_gpio = bcm6358_pins[pin].number;
+
+ range = pinctrl_find_gpio_range_from_pin(pctldev, hw_gpio);
+ if (range) {
+ struct gpio_chip *gc = range->gc;
+
+ if (pg->direction & BIT(pin))
+ gc->direction_output(gc, hw_gpio, 0);
+ else
+ gc->direction_input(gc, hw_gpio);
+ }
+ }
+
+ return 0;
+}
+
+static int bcm6358_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ struct bcm6358_priv *priv = pc->driver_data;
+ unsigned int mask;
+
+ mask = (unsigned long) bcm6358_pins[offset].drv_data;
+ if (!mask)
+ return 0;
+
+ /* disable all functions using this pin */
+ return regmap_field_update_bits(priv->overlays, mask, 0);
+}
+
+static struct pinctrl_ops bcm6358_pctl_ops = {
+ .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .get_group_name = bcm6358_pinctrl_get_group_name,
+ .get_group_pins = bcm6358_pinctrl_get_group_pins,
+ .get_groups_count = bcm6358_pinctrl_get_group_count,
+};
+
+static struct pinmux_ops bcm6358_pmx_ops = {
+ .get_function_groups = bcm6358_pinctrl_get_groups,
+ .get_function_name = bcm6358_pinctrl_get_func_name,
+ .get_functions_count = bcm6358_pinctrl_get_func_count,
+ .gpio_request_enable = bcm6358_gpio_request_enable,
+ .set_mux = bcm6358_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct bcm63xx_pinctrl_soc bcm6358_soc = {
+ .ngpios = BCM6358_NUM_GPIOS,
+ .npins = ARRAY_SIZE(bcm6358_pins),
+ .pctl_ops = &bcm6358_pctl_ops,
+ .pins = bcm6358_pins,
+ .pmx_ops = &bcm6358_pmx_ops,
+};
+
+static int bcm6358_pinctrl_probe(struct platform_device *pdev)
+{
+ struct reg_field overlays = REG_FIELD(BCM6358_MODE_REG, 0, 15);
+ struct device *dev = &pdev->dev;
+ struct bcm63xx_pinctrl *pc;
+ struct bcm6358_priv *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ err = bcm63xx_pinctrl_probe(pdev, &bcm6358_soc, (void *) priv);
+ if (err)
+ return err;
+
+ pc = platform_get_drvdata(pdev);
+
+ priv->overlays = devm_regmap_field_alloc(dev, pc->regs, overlays);
+ if (IS_ERR(priv->overlays))
+ return PTR_ERR(priv->overlays);
+
+ return 0;
+}
+
+static const struct of_device_id bcm6358_pinctrl_match[] = {
+ { .compatible = "brcm,bcm6358-pinctrl", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm6358_pinctrl_driver = {
+ .probe = bcm6358_pinctrl_probe,
+ .driver = {
+ .name = "bcm6358-pinctrl",
+ .of_match_table = bcm6358_pinctrl_match,
+ },
+};
+
+builtin_platform_driver(bcm6358_pinctrl_driver);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6362.c b/drivers/pinctrl/bcm/pinctrl-bcm6362.c
new file mode 100644
index 000000000000..40ef495b6301
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6362.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for BCM6362 GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../pinctrl-utils.h"
+
+#include "pinctrl-bcm63xx.h"
+
+#define BCM6362_BANK_GPIOS 32
+#define BCM6362_NUM_GPIOS 48
+#define BCM6362_NUM_LEDS 24
+
+#define BCM6362_LED_REG 0x10
+#define BCM6362_MODE_REG 0x18
+#define BCM6362_CTRL_REG 0x1c
+#define BCM6362_BASEMODE_REG 0x38
+#define BASEMODE_NAND BIT(2)
+
+enum bcm6362_pinctrl_reg {
+ BCM6362_LEDCTRL,
+ BCM6362_MODE,
+ BCM6362_CTRL,
+ BCM6362_BASEMODE,
+};
+
+struct bcm6362_pingroup {
+ const char *name;
+ const unsigned * const pins;
+ const unsigned num_pins;
+};
+
+struct bcm6362_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+
+ enum bcm6362_pinctrl_reg reg;
+ uint32_t basemode_mask;
+};
+
+#define BCM6362_PIN(a, b, mask) \
+ { \
+ .number = a, \
+ .name = b, \
+ .drv_data = (void *)(mask), \
+ }
+
+static const struct pinctrl_pin_desc bcm6362_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ BCM6362_PIN(8, "gpio8", BASEMODE_NAND),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ BCM6362_PIN(12, "gpio12", BASEMODE_NAND),
+ BCM6362_PIN(13, "gpio13", BASEMODE_NAND),
+ BCM6362_PIN(14, "gpio14", BASEMODE_NAND),
+ BCM6362_PIN(15, "gpio15", BASEMODE_NAND),
+ BCM6362_PIN(16, "gpio16", BASEMODE_NAND),
+ BCM6362_PIN(17, "gpio17", BASEMODE_NAND),
+ BCM6362_PIN(18, "gpio18", BASEMODE_NAND),
+ BCM6362_PIN(19, "gpio19", BASEMODE_NAND),
+ BCM6362_PIN(20, "gpio20", BASEMODE_NAND),
+ BCM6362_PIN(21, "gpio21", BASEMODE_NAND),
+ BCM6362_PIN(22, "gpio22", BASEMODE_NAND),
+ BCM6362_PIN(23, "gpio23", BASEMODE_NAND),
+ PINCTRL_PIN(24, "gpio24"),
+ PINCTRL_PIN(25, "gpio25"),
+ PINCTRL_PIN(26, "gpio26"),
+ BCM6362_PIN(27, "gpio27", BASEMODE_NAND),
+ PINCTRL_PIN(28, "gpio28"),
+ PINCTRL_PIN(29, "gpio29"),
+ PINCTRL_PIN(30, "gpio30"),
+ PINCTRL_PIN(31, "gpio31"),
+ PINCTRL_PIN(32, "gpio32"),
+ PINCTRL_PIN(33, "gpio33"),
+ PINCTRL_PIN(34, "gpio34"),
+ PINCTRL_PIN(35, "gpio35"),
+ PINCTRL_PIN(36, "gpio36"),
+ PINCTRL_PIN(37, "gpio37"),
+ PINCTRL_PIN(38, "gpio38"),
+ PINCTRL_PIN(39, "gpio39"),
+ PINCTRL_PIN(40, "gpio40"),
+ PINCTRL_PIN(41, "gpio41"),
+ PINCTRL_PIN(42, "gpio42"),
+ PINCTRL_PIN(43, "gpio43"),
+ PINCTRL_PIN(44, "gpio44"),
+ PINCTRL_PIN(45, "gpio45"),
+ PINCTRL_PIN(46, "gpio46"),
+ PINCTRL_PIN(47, "gpio47"),
+};
+
+static unsigned gpio0_pins[] = { 0 };
+static unsigned gpio1_pins[] = { 1 };
+static unsigned gpio2_pins[] = { 2 };
+static unsigned gpio3_pins[] = { 3 };
+static unsigned gpio4_pins[] = { 4 };
+static unsigned gpio5_pins[] = { 5 };
+static unsigned gpio6_pins[] = { 6 };
+static unsigned gpio7_pins[] = { 7 };
+static unsigned gpio8_pins[] = { 8 };
+static unsigned gpio9_pins[] = { 9 };
+static unsigned gpio10_pins[] = { 10 };
+static unsigned gpio11_pins[] = { 11 };
+static unsigned gpio12_pins[] = { 12 };
+static unsigned gpio13_pins[] = { 13 };
+static unsigned gpio14_pins[] = { 14 };
+static unsigned gpio15_pins[] = { 15 };
+static unsigned gpio16_pins[] = { 16 };
+static unsigned gpio17_pins[] = { 17 };
+static unsigned gpio18_pins[] = { 18 };
+static unsigned gpio19_pins[] = { 19 };
+static unsigned gpio20_pins[] = { 20 };
+static unsigned gpio21_pins[] = { 21 };
+static unsigned gpio22_pins[] = { 22 };
+static unsigned gpio23_pins[] = { 23 };
+static unsigned gpio24_pins[] = { 24 };
+static unsigned gpio25_pins[] = { 25 };
+static unsigned gpio26_pins[] = { 26 };
+static unsigned gpio27_pins[] = { 27 };
+static unsigned gpio28_pins[] = { 28 };
+static unsigned gpio29_pins[] = { 29 };
+static unsigned gpio30_pins[] = { 30 };
+static unsigned gpio31_pins[] = { 31 };
+static unsigned gpio32_pins[] = { 32 };
+static unsigned gpio33_pins[] = { 33 };
+static unsigned gpio34_pins[] = { 34 };
+static unsigned gpio35_pins[] = { 35 };
+static unsigned gpio36_pins[] = { 36 };
+static unsigned gpio37_pins[] = { 37 };
+static unsigned gpio38_pins[] = { 38 };
+static unsigned gpio39_pins[] = { 39 };
+static unsigned gpio40_pins[] = { 40 };
+static unsigned gpio41_pins[] = { 41 };
+static unsigned gpio42_pins[] = { 42 };
+static unsigned gpio43_pins[] = { 43 };
+static unsigned gpio44_pins[] = { 44 };
+static unsigned gpio45_pins[] = { 45 };
+static unsigned gpio46_pins[] = { 46 };
+static unsigned gpio47_pins[] = { 47 };
+
+static unsigned nand_grp_pins[] = {
+ 8, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 27,
+};
+
+#define BCM6362_GROUP(n) \
+ { \
+ .name = #n, \
+ .pins = n##_pins, \
+ .num_pins = ARRAY_SIZE(n##_pins), \
+ }
+
+static struct bcm6362_pingroup bcm6362_groups[] = {
+ BCM6362_GROUP(gpio0),
+ BCM6362_GROUP(gpio1),
+ BCM6362_GROUP(gpio2),
+ BCM6362_GROUP(gpio3),
+ BCM6362_GROUP(gpio4),
+ BCM6362_GROUP(gpio5),
+ BCM6362_GROUP(gpio6),
+ BCM6362_GROUP(gpio7),
+ BCM6362_GROUP(gpio8),
+ BCM6362_GROUP(gpio9),
+ BCM6362_GROUP(gpio10),
+ BCM6362_GROUP(gpio11),
+ BCM6362_GROUP(gpio12),
+ BCM6362_GROUP(gpio13),
+ BCM6362_GROUP(gpio14),
+ BCM6362_GROUP(gpio15),
+ BCM6362_GROUP(gpio16),
+ BCM6362_GROUP(gpio17),
+ BCM6362_GROUP(gpio18),
+ BCM6362_GROUP(gpio19),
+ BCM6362_GROUP(gpio20),
+ BCM6362_GROUP(gpio21),
+ BCM6362_GROUP(gpio22),
+ BCM6362_GROUP(gpio23),
+ BCM6362_GROUP(gpio24),
+ BCM6362_GROUP(gpio25),
+ BCM6362_GROUP(gpio26),
+ BCM6362_GROUP(gpio27),
+ BCM6362_GROUP(gpio28),
+ BCM6362_GROUP(gpio29),
+ BCM6362_GROUP(gpio30),
+ BCM6362_GROUP(gpio31),
+ BCM6362_GROUP(gpio32),
+ BCM6362_GROUP(gpio33),
+ BCM6362_GROUP(gpio34),
+ BCM6362_GROUP(gpio35),
+ BCM6362_GROUP(gpio36),
+ BCM6362_GROUP(gpio37),
+ BCM6362_GROUP(gpio38),
+ BCM6362_GROUP(gpio39),
+ BCM6362_GROUP(gpio40),
+ BCM6362_GROUP(gpio41),
+ BCM6362_GROUP(gpio42),
+ BCM6362_GROUP(gpio43),
+ BCM6362_GROUP(gpio44),
+ BCM6362_GROUP(gpio45),
+ BCM6362_GROUP(gpio46),
+ BCM6362_GROUP(gpio47),
+ BCM6362_GROUP(nand_grp),
+};
+
+static const char * const led_groups[] = {
+ "gpio0",
+ "gpio1",
+ "gpio2",
+ "gpio3",
+ "gpio4",
+ "gpio5",
+ "gpio6",
+ "gpio7",
+ "gpio8",
+ "gpio9",
+ "gpio10",
+ "gpio11",
+ "gpio12",
+ "gpio13",
+ "gpio14",
+ "gpio15",
+ "gpio16",
+ "gpio17",
+ "gpio18",
+ "gpio19",
+ "gpio20",
+ "gpio21",
+ "gpio22",
+ "gpio23",
+};
+
+static const char * const usb_device_led_groups[] = {
+ "gpio0",
+};
+
+static const char * const sys_irq_groups[] = {
+ "gpio1",
+};
+
+static const char * const serial_led_clk_groups[] = {
+ "gpio2",
+};
+
+static const char * const serial_led_data_groups[] = {
+ "gpio3",
+};
+
+static const char * const robosw_led_data_groups[] = {
+ "gpio4",
+};
+
+static const char * const robosw_led_clk_groups[] = {
+ "gpio5",
+};
+
+static const char * const robosw_led0_groups[] = {
+ "gpio6",
+};
+
+static const char * const robosw_led1_groups[] = {
+ "gpio7",
+};
+
+static const char * const inet_led_groups[] = {
+ "gpio8",
+};
+
+static const char * const spi_cs2_groups[] = {
+ "gpio9",
+};
+
+static const char * const spi_cs3_groups[] = {
+ "gpio10",
+};
+
+static const char * const ntr_pulse_groups[] = {
+ "gpio11",
+};
+
+static const char * const uart1_scts_groups[] = {
+ "gpio12",
+};
+
+static const char * const uart1_srts_groups[] = {
+ "gpio13",
+};
+
+static const char * const uart1_sdin_groups[] = {
+ "gpio14",
+};
+
+static const char * const uart1_sdout_groups[] = {
+ "gpio15",
+};
+
+static const char * const adsl_spi_miso_groups[] = {
+ "gpio16",
+};
+
+static const char * const adsl_spi_mosi_groups[] = {
+ "gpio17",
+};
+
+static const char * const adsl_spi_clk_groups[] = {
+ "gpio18",
+};
+
+static const char * const adsl_spi_cs_groups[] = {
+ "gpio19",
+};
+
+static const char * const ephy0_led_groups[] = {
+ "gpio20",
+};
+
+static const char * const ephy1_led_groups[] = {
+ "gpio21",
+};
+
+static const char * const ephy2_led_groups[] = {
+ "gpio22",
+};
+
+static const char * const ephy3_led_groups[] = {
+ "gpio23",
+};
+
+static const char * const ext_irq0_groups[] = {
+ "gpio24",
+};
+
+static const char * const ext_irq1_groups[] = {
+ "gpio25",
+};
+
+static const char * const ext_irq2_groups[] = {
+ "gpio26",
+};
+
+static const char * const ext_irq3_groups[] = {
+ "gpio27",
+};
+
+static const char * const wifi_groups[] = {
+ "gpio32",
+ "gpio33",
+ "gpio34",
+ "gpio35",
+ "gpio36",
+ "gpio37",
+ "gpio38",
+ "gpio39",
+ "gpio40",
+ "gpio41",
+ "gpio42",
+ "gpio43",
+ "gpio44",
+ "gpio45",
+ "gpio46",
+ "gpio47",
+};
+
+static const char * const nand_groups[] = {
+ "nand_grp",
+};
+
+#define BCM6362_LED_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM6362_LEDCTRL, \
+ }
+
+#define BCM6362_MODE_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM6362_MODE, \
+ }
+
+#define BCM6362_CTRL_FUN(n) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM6362_CTRL, \
+ }
+
+#define BCM6362_BASEMODE_FUN(n, mask) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .reg = BCM6362_BASEMODE, \
+ .basemode_mask = (mask), \
+ }
+
+static const struct bcm6362_function bcm6362_funcs[] = {
+ BCM6362_LED_FUN(led),
+ BCM6362_MODE_FUN(usb_device_led),
+ BCM6362_MODE_FUN(sys_irq),
+ BCM6362_MODE_FUN(serial_led_clk),
+ BCM6362_MODE_FUN(serial_led_data),
+ BCM6362_MODE_FUN(robosw_led_data),
+ BCM6362_MODE_FUN(robosw_led_clk),
+ BCM6362_MODE_FUN(robosw_led0),
+ BCM6362_MODE_FUN(robosw_led1),
+ BCM6362_MODE_FUN(inet_led),
+ BCM6362_MODE_FUN(spi_cs2),
+ BCM6362_MODE_FUN(spi_cs3),
+ BCM6362_MODE_FUN(ntr_pulse),
+ BCM6362_MODE_FUN(uart1_scts),
+ BCM6362_MODE_FUN(uart1_srts),
+ BCM6362_MODE_FUN(uart1_sdin),
+ BCM6362_MODE_FUN(uart1_sdout),
+ BCM6362_MODE_FUN(adsl_spi_miso),
+ BCM6362_MODE_FUN(adsl_spi_mosi),
+ BCM6362_MODE_FUN(adsl_spi_clk),
+ BCM6362_MODE_FUN(adsl_spi_cs),
+ BCM6362_MODE_FUN(ephy0_led),
+ BCM6362_MODE_FUN(ephy1_led),
+ BCM6362_MODE_FUN(ephy2_led),
+ BCM6362_MODE_FUN(ephy3_led),
+ BCM6362_MODE_FUN(ext_irq0),
+ BCM6362_MODE_FUN(ext_irq1),
+ BCM6362_MODE_FUN(ext_irq2),
+ BCM6362_MODE_FUN(ext_irq3),
+ BCM6362_CTRL_FUN(wifi),
+ BCM6362_BASEMODE_FUN(nand, BASEMODE_NAND),
+};
+
+static int bcm6362_pinctrl_get_group_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6362_groups);
+}
+
+static const char *bcm6362_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ return bcm6362_groups[group].name;
+}
+
+static int bcm6362_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = bcm6362_groups[group].pins;
+ *num_pins = bcm6362_groups[group].num_pins;
+
+ return 0;
+}
+
+static int bcm6362_pinctrl_get_func_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6362_funcs);
+}
+
+static const char *bcm6362_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return bcm6362_funcs[selector].name;
+}
+
+static int bcm6362_pinctrl_get_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = bcm6362_funcs[selector].groups;
+ *num_groups = bcm6362_funcs[selector].num_groups;
+
+ return 0;
+}
+
+static void bcm6362_set_gpio(struct bcm63xx_pinctrl *pc, unsigned pin)
+{
+ const struct pinctrl_pin_desc *desc = &bcm6362_pins[pin];
+ unsigned int basemode = (uintptr_t)desc->drv_data;
+ unsigned int mask = bcm63xx_bank_pin(pin);
+
+ if (basemode)
+ regmap_update_bits(pc->regs, BCM6362_BASEMODE_REG, basemode, 0);
+
+ if (pin < BCM63XX_BANK_GPIOS) {
+ /* base mode 0 => gpio 1 => mux function */
+ regmap_update_bits(pc->regs, BCM6362_MODE_REG, mask, 0);
+
+ /* pins 0-23 might be muxed to led */
+ if (pin < BCM6362_NUM_LEDS)
+ regmap_update_bits(pc->regs, BCM6362_LED_REG, mask, 0);
+ } else {
+ /* ctrl reg 0 => wifi function 1 => gpio */
+ regmap_update_bits(pc->regs, BCM6362_CTRL_REG, mask, mask);
+ }
+}
+
+static int bcm6362_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned selector, unsigned group)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ const struct bcm6362_pingroup *pg = &bcm6362_groups[group];
+ const struct bcm6362_function *f = &bcm6362_funcs[selector];
+ unsigned i;
+ unsigned int reg;
+ unsigned int val, mask;
+
+ for (i = 0; i < pg->num_pins; i++)
+ bcm6362_set_gpio(pc, pg->pins[i]);
+
+ switch (f->reg) {
+ case BCM6362_LEDCTRL:
+ reg = BCM6362_LED_REG;
+ mask = BIT(pg->pins[0]);
+ val = BIT(pg->pins[0]);
+ break;
+ case BCM6362_MODE:
+ reg = BCM6362_MODE_REG;
+ mask = BIT(pg->pins[0]);
+ val = BIT(pg->pins[0]);
+ break;
+ case BCM6362_CTRL:
+ reg = BCM6362_CTRL_REG;
+ mask = BIT(pg->pins[0]);
+ val = 0;
+ break;
+ case BCM6362_BASEMODE:
+ reg = BCM6362_BASEMODE_REG;
+ mask = f->basemode_mask;
+ val = f->basemode_mask;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(pc->regs, reg, mask, val);
+
+ return 0;
+}
+
+static int bcm6362_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ /* disable all functions using this pin */
+ bcm6362_set_gpio(pc, offset);
+
+ return 0;
+}
+
+static struct pinctrl_ops bcm6362_pctl_ops = {
+ .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .get_group_name = bcm6362_pinctrl_get_group_name,
+ .get_group_pins = bcm6362_pinctrl_get_group_pins,
+ .get_groups_count = bcm6362_pinctrl_get_group_count,
+};
+
+static struct pinmux_ops bcm6362_pmx_ops = {
+ .get_function_groups = bcm6362_pinctrl_get_groups,
+ .get_function_name = bcm6362_pinctrl_get_func_name,
+ .get_functions_count = bcm6362_pinctrl_get_func_count,
+ .gpio_request_enable = bcm6362_gpio_request_enable,
+ .set_mux = bcm6362_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct bcm63xx_pinctrl_soc bcm6362_soc = {
+ .ngpios = BCM6362_NUM_GPIOS,
+ .npins = ARRAY_SIZE(bcm6362_pins),
+ .pctl_ops = &bcm6362_pctl_ops,
+ .pins = bcm6362_pins,
+ .pmx_ops = &bcm6362_pmx_ops,
+};
+
+static int bcm6362_pinctrl_probe(struct platform_device *pdev)
+{
+ return bcm63xx_pinctrl_probe(pdev, &bcm6362_soc, NULL);
+}
+
+static const struct of_device_id bcm6362_pinctrl_match[] = {
+ { .compatible = "brcm,bcm6362-pinctrl", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm6362_pinctrl_driver = {
+ .probe = bcm6362_pinctrl_probe,
+ .driver = {
+ .name = "bcm6362-pinctrl",
+ .of_match_table = bcm6362_pinctrl_match,
+ },
+};
+
+builtin_platform_driver(bcm6362_pinctrl_driver);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6368.c b/drivers/pinctrl/bcm/pinctrl-bcm6368.c
new file mode 100644
index 000000000000..838095f9e890
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6368.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for BCM6368 GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../pinctrl-utils.h"
+
+#include "pinctrl-bcm63xx.h"
+
+#define BCM6368_NUM_GPIOS 38
+
+#define BCM6368_MODE_REG 0x18
+#define BCM6368_BASEMODE_REG 0x38
+#define BCM6368_BASEMODE_MASK 0x7
+#define BCM6368_BASEMODE_GPIO 0x0
+#define BCM6368_BASEMODE_UART1 0x1
+
+struct bcm6368_pingroup {
+ const char *name;
+ const unsigned * const pins;
+ const unsigned num_pins;
+};
+
+struct bcm6368_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+
+ unsigned dir_out:16;
+ unsigned basemode:3;
+};
+
+struct bcm6368_priv {
+ struct regmap_field *overlays;
+};
+
+#define BCM6368_BASEMODE_PIN(a, b) \
+ { \
+ .number = a, \
+ .name = b, \
+ .drv_data = (void *)true \
+ }
+
+static const struct pinctrl_pin_desc bcm6368_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+ PINCTRL_PIN(23, "gpio23"),
+ PINCTRL_PIN(24, "gpio24"),
+ PINCTRL_PIN(25, "gpio25"),
+ PINCTRL_PIN(26, "gpio26"),
+ PINCTRL_PIN(27, "gpio27"),
+ PINCTRL_PIN(28, "gpio28"),
+ PINCTRL_PIN(29, "gpio29"),
+ BCM6368_BASEMODE_PIN(30, "gpio30"),
+ BCM6368_BASEMODE_PIN(31, "gpio31"),
+ BCM6368_BASEMODE_PIN(32, "gpio32"),
+ BCM6368_BASEMODE_PIN(33, "gpio33"),
+ PINCTRL_PIN(34, "gpio34"),
+ PINCTRL_PIN(35, "gpio35"),
+ PINCTRL_PIN(36, "gpio36"),
+ PINCTRL_PIN(37, "gpio37"),
+};
+
+static unsigned gpio0_pins[] = { 0 };
+static unsigned gpio1_pins[] = { 1 };
+static unsigned gpio2_pins[] = { 2 };
+static unsigned gpio3_pins[] = { 3 };
+static unsigned gpio4_pins[] = { 4 };
+static unsigned gpio5_pins[] = { 5 };
+static unsigned gpio6_pins[] = { 6 };
+static unsigned gpio7_pins[] = { 7 };
+static unsigned gpio8_pins[] = { 8 };
+static unsigned gpio9_pins[] = { 9 };
+static unsigned gpio10_pins[] = { 10 };
+static unsigned gpio11_pins[] = { 11 };
+static unsigned gpio12_pins[] = { 12 };
+static unsigned gpio13_pins[] = { 13 };
+static unsigned gpio14_pins[] = { 14 };
+static unsigned gpio15_pins[] = { 15 };
+static unsigned gpio16_pins[] = { 16 };
+static unsigned gpio17_pins[] = { 17 };
+static unsigned gpio18_pins[] = { 18 };
+static unsigned gpio19_pins[] = { 19 };
+static unsigned gpio20_pins[] = { 20 };
+static unsigned gpio21_pins[] = { 21 };
+static unsigned gpio22_pins[] = { 22 };
+static unsigned gpio23_pins[] = { 23 };
+static unsigned gpio24_pins[] = { 24 };
+static unsigned gpio25_pins[] = { 25 };
+static unsigned gpio26_pins[] = { 26 };
+static unsigned gpio27_pins[] = { 27 };
+static unsigned gpio28_pins[] = { 28 };
+static unsigned gpio29_pins[] = { 29 };
+static unsigned gpio30_pins[] = { 30 };
+static unsigned gpio31_pins[] = { 31 };
+static unsigned uart1_grp_pins[] = { 30, 31, 32, 33 };
+
+#define BCM6368_GROUP(n) \
+ { \
+ .name = #n, \
+ .pins = n##_pins, \
+ .num_pins = ARRAY_SIZE(n##_pins), \
+ }
+
+static struct bcm6368_pingroup bcm6368_groups[] = {
+ BCM6368_GROUP(gpio0),
+ BCM6368_GROUP(gpio1),
+ BCM6368_GROUP(gpio2),
+ BCM6368_GROUP(gpio3),
+ BCM6368_GROUP(gpio4),
+ BCM6368_GROUP(gpio5),
+ BCM6368_GROUP(gpio6),
+ BCM6368_GROUP(gpio7),
+ BCM6368_GROUP(gpio8),
+ BCM6368_GROUP(gpio9),
+ BCM6368_GROUP(gpio10),
+ BCM6368_GROUP(gpio11),
+ BCM6368_GROUP(gpio12),
+ BCM6368_GROUP(gpio13),
+ BCM6368_GROUP(gpio14),
+ BCM6368_GROUP(gpio15),
+ BCM6368_GROUP(gpio16),
+ BCM6368_GROUP(gpio17),
+ BCM6368_GROUP(gpio18),
+ BCM6368_GROUP(gpio19),
+ BCM6368_GROUP(gpio20),
+ BCM6368_GROUP(gpio21),
+ BCM6368_GROUP(gpio22),
+ BCM6368_GROUP(gpio23),
+ BCM6368_GROUP(gpio24),
+ BCM6368_GROUP(gpio25),
+ BCM6368_GROUP(gpio26),
+ BCM6368_GROUP(gpio27),
+ BCM6368_GROUP(gpio28),
+ BCM6368_GROUP(gpio29),
+ BCM6368_GROUP(gpio30),
+ BCM6368_GROUP(gpio31),
+ BCM6368_GROUP(uart1_grp),
+};
+
+static const char * const analog_afe_0_groups[] = {
+ "gpio0",
+};
+
+static const char * const analog_afe_1_groups[] = {
+ "gpio1",
+};
+
+static const char * const sys_irq_groups[] = {
+ "gpio2",
+};
+
+static const char * const serial_led_data_groups[] = {
+ "gpio3",
+};
+
+static const char * const serial_led_clk_groups[] = {
+ "gpio4",
+};
+
+static const char * const inet_led_groups[] = {
+ "gpio5",
+};
+
+static const char * const ephy0_led_groups[] = {
+ "gpio6",
+};
+
+static const char * const ephy1_led_groups[] = {
+ "gpio7",
+};
+
+static const char * const ephy2_led_groups[] = {
+ "gpio8",
+};
+
+static const char * const ephy3_led_groups[] = {
+ "gpio9",
+};
+
+static const char * const robosw_led_data_groups[] = {
+ "gpio10",
+};
+
+static const char * const robosw_led_clk_groups[] = {
+ "gpio11",
+};
+
+static const char * const robosw_led0_groups[] = {
+ "gpio12",
+};
+
+static const char * const robosw_led1_groups[] = {
+ "gpio13",
+};
+
+static const char * const usb_device_led_groups[] = {
+ "gpio14",
+};
+
+static const char * const pci_req1_groups[] = {
+ "gpio16",
+};
+
+static const char * const pci_gnt1_groups[] = {
+ "gpio17",
+};
+
+static const char * const pci_intb_groups[] = {
+ "gpio18",
+};
+
+static const char * const pci_req0_groups[] = {
+ "gpio19",
+};
+
+static const char * const pci_gnt0_groups[] = {
+ "gpio20",
+};
+
+static const char * const pcmcia_cd1_groups[] = {
+ "gpio22",
+};
+
+static const char * const pcmcia_cd2_groups[] = {
+ "gpio23",
+};
+
+static const char * const pcmcia_vs1_groups[] = {
+ "gpio24",
+};
+
+static const char * const pcmcia_vs2_groups[] = {
+ "gpio25",
+};
+
+static const char * const ebi_cs2_groups[] = {
+ "gpio26",
+};
+
+static const char * const ebi_cs3_groups[] = {
+ "gpio27",
+};
+
+static const char * const spi_cs2_groups[] = {
+ "gpio28",
+};
+
+static const char * const spi_cs3_groups[] = {
+ "gpio29",
+};
+
+static const char * const spi_cs4_groups[] = {
+ "gpio30",
+};
+
+static const char * const spi_cs5_groups[] = {
+ "gpio31",
+};
+
+static const char * const uart1_groups[] = {
+ "uart1_grp",
+};
+
+#define BCM6368_FUN(n, out) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .dir_out = out, \
+ }
+
+#define BCM6368_BASEMODE_FUN(n, val, out) \
+ { \
+ .name = #n, \
+ .groups = n##_groups, \
+ .num_groups = ARRAY_SIZE(n##_groups), \
+ .basemode = BCM6368_BASEMODE_##val, \
+ .dir_out = out, \
+ }
+
+static const struct bcm6368_function bcm6368_funcs[] = {
+ BCM6368_FUN(analog_afe_0, 1),
+ BCM6368_FUN(analog_afe_1, 1),
+ BCM6368_FUN(sys_irq, 1),
+ BCM6368_FUN(serial_led_data, 1),
+ BCM6368_FUN(serial_led_clk, 1),
+ BCM6368_FUN(inet_led, 1),
+ BCM6368_FUN(ephy0_led, 1),
+ BCM6368_FUN(ephy1_led, 1),
+ BCM6368_FUN(ephy2_led, 1),
+ BCM6368_FUN(ephy3_led, 1),
+ BCM6368_FUN(robosw_led_data, 1),
+ BCM6368_FUN(robosw_led_clk, 1),
+ BCM6368_FUN(robosw_led0, 1),
+ BCM6368_FUN(robosw_led1, 1),
+ BCM6368_FUN(usb_device_led, 1),
+ BCM6368_FUN(pci_req1, 0),
+ BCM6368_FUN(pci_gnt1, 0),
+ BCM6368_FUN(pci_intb, 0),
+ BCM6368_FUN(pci_req0, 0),
+ BCM6368_FUN(pci_gnt0, 0),
+ BCM6368_FUN(pcmcia_cd1, 0),
+ BCM6368_FUN(pcmcia_cd2, 0),
+ BCM6368_FUN(pcmcia_vs1, 0),
+ BCM6368_FUN(pcmcia_vs2, 0),
+ BCM6368_FUN(ebi_cs2, 1),
+ BCM6368_FUN(ebi_cs3, 1),
+ BCM6368_FUN(spi_cs2, 1),
+ BCM6368_FUN(spi_cs3, 1),
+ BCM6368_FUN(spi_cs4, 1),
+ BCM6368_FUN(spi_cs5, 1),
+ BCM6368_BASEMODE_FUN(uart1, UART1, 0x6),
+};
+
+static int bcm6368_pinctrl_get_group_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6368_groups);
+}
+
+static const char *bcm6368_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ return bcm6368_groups[group].name;
+}
+
+static int bcm6368_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = bcm6368_groups[group].pins;
+ *num_pins = bcm6368_groups[group].num_pins;
+
+ return 0;
+}
+
+static int bcm6368_pinctrl_get_func_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(bcm6368_funcs);
+}
+
+static const char *bcm6368_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return bcm6368_funcs[selector].name;
+}
+
+static int bcm6368_pinctrl_get_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *groups = bcm6368_funcs[selector].groups;
+ *num_groups = bcm6368_funcs[selector].num_groups;
+
+ return 0;
+}
+
+static int bcm6368_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned selector, unsigned group)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ struct bcm6368_priv *priv = pc->driver_data;
+ const struct bcm6368_pingroup *pg = &bcm6368_groups[group];
+ const struct bcm6368_function *fun = &bcm6368_funcs[selector];
+ int i, pin;
+
+ if (fun->basemode) {
+ unsigned int mask = 0;
+
+ for (i = 0; i < pg->num_pins; i++) {
+ pin = pg->pins[i];
+ if (pin < BCM63XX_BANK_GPIOS)
+ mask |= BIT(pin);
+ }
+
+ regmap_update_bits(pc->regs, BCM6368_MODE_REG, mask, 0);
+ regmap_field_write(priv->overlays, fun->basemode);
+ } else {
+ pin = pg->pins[0];
+
+ if (bcm6368_pins[pin].drv_data)
+ regmap_field_write(priv->overlays,
+ BCM6368_BASEMODE_GPIO);
+
+ regmap_update_bits(pc->regs, BCM6368_MODE_REG, BIT(pin),
+ BIT(pin));
+ }
+
+ for (pin = 0; pin < pg->num_pins; pin++) {
+ struct pinctrl_gpio_range *range;
+ int hw_gpio = bcm6368_pins[pin].number;
+
+ range = pinctrl_find_gpio_range_from_pin(pctldev, hw_gpio);
+ if (range) {
+ struct gpio_chip *gc = range->gc;
+
+ if (fun->dir_out & BIT(pin))
+ gc->direction_output(gc, hw_gpio, 0);
+ else
+ gc->direction_input(gc, hw_gpio);
+ }
+ }
+
+ return 0;
+}
+
+static int bcm6368_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct bcm63xx_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ struct bcm6368_priv *priv = pc->driver_data;
+
+ if (offset >= BCM63XX_BANK_GPIOS && !bcm6368_pins[offset].drv_data)
+ return 0;
+
+ /* disable all functions using this pin */
+ if (offset < BCM63XX_BANK_GPIOS)
+ regmap_update_bits(pc->regs, BCM6368_MODE_REG, BIT(offset), 0);
+
+ if (bcm6368_pins[offset].drv_data)
+ regmap_field_write(priv->overlays, BCM6368_BASEMODE_GPIO);
+
+ return 0;
+}
+
+static struct pinctrl_ops bcm6368_pctl_ops = {
+ .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .get_group_name = bcm6368_pinctrl_get_group_name,
+ .get_group_pins = bcm6368_pinctrl_get_group_pins,
+ .get_groups_count = bcm6368_pinctrl_get_group_count,
+};
+
+static struct pinmux_ops bcm6368_pmx_ops = {
+ .get_function_groups = bcm6368_pinctrl_get_groups,
+ .get_function_name = bcm6368_pinctrl_get_func_name,
+ .get_functions_count = bcm6368_pinctrl_get_func_count,
+ .gpio_request_enable = bcm6368_gpio_request_enable,
+ .set_mux = bcm6368_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct bcm63xx_pinctrl_soc bcm6368_soc = {
+ .ngpios = BCM6368_NUM_GPIOS,
+ .npins = ARRAY_SIZE(bcm6368_pins),
+ .pctl_ops = &bcm6368_pctl_ops,
+ .pins = bcm6368_pins,
+ .pmx_ops = &bcm6368_pmx_ops,
+};
+
+static int bcm6368_pinctrl_probe(struct platform_device *pdev)
+{
+ struct reg_field overlays = REG_FIELD(BCM6368_BASEMODE_REG, 0, 15);
+ struct device *dev = &pdev->dev;
+ struct bcm63xx_pinctrl *pc;
+ struct bcm6368_priv *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ err = bcm63xx_pinctrl_probe(pdev, &bcm6368_soc, (void *) priv);
+ if (err)
+ return err;
+
+ pc = platform_get_drvdata(pdev);
+
+ priv->overlays = devm_regmap_field_alloc(dev, pc->regs, overlays);
+ if (IS_ERR(priv->overlays))
+ return PTR_ERR(priv->overlays);
+
+ return 0;
+}
+
+static const struct of_device_id bcm6368_pinctrl_match[] = {
+ { .compatible = "brcm,bcm6368-pinctrl", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm6368_pinctrl_driver = {
+ .probe = bcm6368_pinctrl_probe,
+ .driver = {
+ .name = "bcm6368-pinctrl",
+ .of_match_table = bcm6368_pinctrl_match,
+ },
+};
+
+builtin_platform_driver(bcm6368_pinctrl_driver);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm63xx.c b/drivers/pinctrl/bcm/pinctrl-bcm63xx.c
new file mode 100644
index 000000000000..e1285fe2fbc0
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm63xx.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for BCM63xx GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/gpio/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-bcm63xx.h"
+
+#define BCM63XX_BANK_SIZE 4
+
+#define BCM63XX_DIROUT_REG 0x04
+#define BCM63XX_DATA_REG 0x0c
+
+static int bcm63xx_reg_mask_xlate(struct gpio_regmap *gpio,
+ unsigned int base, unsigned int offset,
+ unsigned int *reg, unsigned int *mask)
+{
+ unsigned int line = offset % BCM63XX_BANK_GPIOS;
+ unsigned int stride = offset / BCM63XX_BANK_GPIOS;
+
+ *reg = base - stride * BCM63XX_BANK_SIZE;
+ *mask = BIT(line);
+
+ return 0;
+}
+
+static const struct of_device_id bcm63xx_gpio_of_match[] = {
+ { .compatible = "brcm,bcm6318-gpio", },
+ { .compatible = "brcm,bcm6328-gpio", },
+ { .compatible = "brcm,bcm6358-gpio", },
+ { .compatible = "brcm,bcm6362-gpio", },
+ { .compatible = "brcm,bcm6368-gpio", },
+ { .compatible = "brcm,bcm63268-gpio", },
+ { /* sentinel */ }
+};
+
+static int bcm63xx_gpio_probe(struct device *dev, struct device_node *node,
+ const struct bcm63xx_pinctrl_soc *soc,
+ struct bcm63xx_pinctrl *pc)
+{
+ struct gpio_regmap_config grc = {0};
+
+ grc.parent = dev;
+ grc.fwnode = &node->fwnode;
+ grc.ngpio = soc->ngpios;
+ grc.ngpio_per_reg = BCM63XX_BANK_GPIOS;
+ grc.regmap = pc->regs;
+ grc.reg_dat_base = BCM63XX_DATA_REG;
+ grc.reg_dir_out_base = BCM63XX_DIROUT_REG;
+ grc.reg_set_base = BCM63XX_DATA_REG;
+ grc.reg_mask_xlate = bcm63xx_reg_mask_xlate;
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &grc));
+}
+
+int bcm63xx_pinctrl_probe(struct platform_device *pdev,
+ const struct bcm63xx_pinctrl_soc *soc,
+ void *driver_data)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm63xx_pinctrl *pc;
+ struct device_node *node;
+ int err;
+
+ pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pc);
+
+ pc->dev = dev;
+ pc->driver_data = driver_data;
+
+ pc->regs = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(pc->regs))
+ return PTR_ERR(pc->regs);
+
+ pc->pctl_desc.name = dev_name(dev);
+ pc->pctl_desc.pins = soc->pins;
+ pc->pctl_desc.npins = soc->npins;
+ pc->pctl_desc.pctlops = soc->pctl_ops;
+ pc->pctl_desc.pmxops = soc->pmx_ops;
+ pc->pctl_desc.owner = THIS_MODULE;
+
+ pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
+ if (IS_ERR(pc->pctl_dev))
+ return PTR_ERR(pc->pctl_dev);
+
+ for_each_child_of_node(dev->parent->of_node, node) {
+ if (of_match_node(bcm63xx_gpio_of_match, node)) {
+ err = bcm63xx_gpio_probe(dev, node, soc, pc);
+ if (err) {
+ dev_err(dev, "could not add GPIO chip\n");
+ of_node_put(node);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm63xx.h b/drivers/pinctrl/bcm/pinctrl-bcm63xx.h
new file mode 100644
index 000000000000..3bdb50021f1b
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-bcm63xx.h
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2021 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2016 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#ifndef __PINCTRL_BCM63XX_H__
+#define __PINCTRL_BCM63XX_H__
+
+#include <linux/pinctrl/pinctrl.h>
+
+#define BCM63XX_BANK_GPIOS 32
+
+struct bcm63xx_pinctrl_soc {
+ struct pinctrl_ops *pctl_ops;
+ struct pinmux_ops *pmx_ops;
+
+ const struct pinctrl_pin_desc *pins;
+ unsigned npins;
+
+ unsigned int ngpios;
+};
+
+struct bcm63xx_pinctrl {
+ struct device *dev;
+ struct regmap *regs;
+
+ struct pinctrl_desc pctl_desc;
+ struct pinctrl_dev *pctl_dev;
+
+ void *driver_data;
+};
+
+static inline unsigned int bcm63xx_bank_pin(unsigned int pin)
+{
+ return pin % BCM63XX_BANK_GPIOS;
+}
+
+int bcm63xx_pinctrl_probe(struct platform_device *pdev,
+ const struct bcm63xx_pinctrl_soc *soc,
+ void *driver_data);
+
+#endif /* __PINCTRL_BCM63XX_H__ */
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 6e6825d17a1d..a4ac87c8b4f8 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -160,7 +160,7 @@ int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name)
}
/**
- * pin_get_name_from_id() - look up a pin name from a pin id
+ * pin_get_name() - look up a pin name from a pin id
* @pctldev: the pin control device to lookup the pin on
* @pin: pin number/id to look up
*/
@@ -1258,7 +1258,7 @@ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
p->state = NULL;
- /* Apply all the settings for the new state */
+ /* Apply all the settings for the new state - pinmux first */
list_for_each_entry(setting, &state->settings, node) {
switch (setting->type) {
case PIN_MAP_TYPE_MUX_GROUP:
@@ -1266,6 +1266,29 @@ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
break;
case PIN_MAP_TYPE_CONFIGS_PIN:
case PIN_MAP_TYPE_CONFIGS_GROUP:
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret < 0)
+ goto unapply_new_state;
+
+ /* Do not link hogs (circular dependency) */
+ if (p != setting->pctldev->p)
+ pinctrl_link_add(setting->pctldev, p->dev);
+ }
+
+ /* Apply all the settings for the new state - pinconf after */
+ list_for_each_entry(setting, &state->settings, node) {
+ switch (setting->type) {
+ case PIN_MAP_TYPE_MUX_GROUP:
+ ret = 0;
+ break;
+ case PIN_MAP_TYPE_CONFIGS_PIN:
+ case PIN_MAP_TYPE_CONFIGS_GROUP:
ret = pinconf_apply_setting(setting);
break;
default:
@@ -1892,11 +1915,11 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
dev_name(pctldev->dev));
return;
}
- debugfs_create_file("pins", S_IFREG | S_IRUGO,
+ debugfs_create_file("pins", 0444,
device_root, pctldev, &pinctrl_pins_fops);
- debugfs_create_file("pingroups", S_IFREG | S_IRUGO,
+ debugfs_create_file("pingroups", 0444,
device_root, pctldev, &pinctrl_groups_fops);
- debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
+ debugfs_create_file("gpio-ranges", 0444,
device_root, pctldev, &pinctrl_gpioranges_fops);
if (pctldev->desc->pmxops)
pinmux_init_device_debugfs(device_root, pctldev);
@@ -1918,11 +1941,11 @@ static void pinctrl_init_debugfs(void)
return;
}
- debugfs_create_file("pinctrl-devices", S_IFREG | S_IRUGO,
+ debugfs_create_file("pinctrl-devices", 0444,
debugfs_root, NULL, &pinctrl_devices_fops);
- debugfs_create_file("pinctrl-maps", S_IFREG | S_IRUGO,
+ debugfs_create_file("pinctrl-maps", 0444,
debugfs_root, NULL, &pinctrl_maps_fops);
- debugfs_create_file("pinctrl-handles", S_IFREG | S_IRUGO,
+ debugfs_create_file("pinctrl-handles", 0444,
debugfs_root, NULL, &pinctrl_fops);
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.c b/drivers/pinctrl/freescale/pinctrl-imx1.c
index faf770f13bc7..1e2b0fe9ffd6 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1.c
@@ -262,6 +262,7 @@ static struct platform_driver imx1_pinctrl_driver = {
.driver = {
.name = "imx1-pinctrl",
.of_match_table = imx1_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
};
builtin_platform_driver_probe(imx1_pinctrl_driver, imx1_pinctrl_probe);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
index a899a398b6bb..51748da1668f 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx25.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
@@ -323,7 +323,8 @@ static int imx25_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx25_pinctrl_driver = {
.driver = {
.name = "imx25-pinctrl",
- .of_match_table = of_match_ptr(imx25_pinctrl_of_match),
+ .of_match_table = imx25_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx25_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx27.c b/drivers/pinctrl/freescale/pinctrl-imx27.c
index b4dfc1676cbc..67e7105be4f3 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx27.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx27.c
@@ -396,7 +396,8 @@ static int imx27_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx27_pinctrl_driver = {
.driver = {
.name = "imx27-pinctrl",
- .of_match_table = of_match_ptr(imx27_pinctrl_of_match),
+ .of_match_table = imx27_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx27_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx35.c b/drivers/pinctrl/freescale/pinctrl-imx35.c
index 871bb419e2f0..c8671ad5214c 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx35.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx35.c
@@ -1014,6 +1014,7 @@ static struct platform_driver imx35_pinctrl_driver = {
.driver = {
.name = "imx35-pinctrl",
.of_match_table = imx35_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx35_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx50.c b/drivers/pinctrl/freescale/pinctrl-imx50.c
index cf182c040e0b..a245b4011c00 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx50.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx50.c
@@ -399,7 +399,8 @@ static int imx50_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx50_pinctrl_driver = {
.driver = {
.name = "imx50-pinctrl",
- .of_match_table = of_match_ptr(imx50_pinctrl_of_match),
+ .of_match_table = imx50_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx50_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx51.c b/drivers/pinctrl/freescale/pinctrl-imx51.c
index e5c261e2bf1e..307cf5fe4d15 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx51.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx51.c
@@ -776,6 +776,7 @@ static struct platform_driver imx51_pinctrl_driver = {
.driver = {
.name = "imx51-pinctrl",
.of_match_table = imx51_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx51_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx53.c b/drivers/pinctrl/freescale/pinctrl-imx53.c
index 64c97aaf20c7..02bf3bda69ac 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx53.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx53.c
@@ -463,6 +463,7 @@ static struct platform_driver imx53_pinctrl_driver = {
.driver = {
.name = "imx53-pinctrl",
.of_match_table = imx53_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx53_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6dl.c b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
index 0858b4d79ed2..2b6d5141a477 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6dl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
@@ -473,6 +473,7 @@ static struct platform_driver imx6dl_pinctrl_driver = {
.driver = {
.name = "imx6dl-pinctrl",
.of_match_table = imx6dl_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx6dl_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6q.c b/drivers/pinctrl/freescale/pinctrl-imx6q.c
index 078ed6a331fd..a7507def26a9 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6q.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6q.c
@@ -475,6 +475,7 @@ static struct platform_driver imx6q_pinctrl_driver = {
.driver = {
.name = "imx6q-pinctrl",
.of_match_table = imx6q_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx6q_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sl.c b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
index 9d2e6f987aa7..236f3bf120c2 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
@@ -379,6 +379,7 @@ static struct platform_driver imx6sl_pinctrl_driver = {
.driver = {
.name = "imx6sl-pinctrl",
.of_match_table = imx6sl_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx6sl_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sll.c b/drivers/pinctrl/freescale/pinctrl-imx6sll.c
index 0618f4d887fd..dfefcecbe072 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sll.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sll.c
@@ -345,7 +345,7 @@ static int imx6sll_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx6sll_pinctrl_driver = {
.driver = {
.name = "imx6sll-pinctrl",
- .of_match_table = of_match_ptr(imx6sll_pinctrl_of_match),
+ .of_match_table = imx6sll_pinctrl_of_match,
.suppress_bind_attrs = true,
},
.probe = imx6sll_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sx.c b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
index c7e2b1f94f01..b7b97c274dcc 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
@@ -382,7 +382,8 @@ static int imx6sx_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx6sx_pinctrl_driver = {
.driver = {
.name = "imx6sx-pinctrl",
- .of_match_table = of_match_ptr(imx6sx_pinctrl_of_match),
+ .of_match_table = imx6sx_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx6sx_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6ul.c b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
index 7e37627c63f5..3b8747482e36 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6ul.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
@@ -342,7 +342,8 @@ static int imx6ul_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx6ul_pinctrl_driver = {
.driver = {
.name = "imx6ul-pinctrl",
- .of_match_table = of_match_ptr(imx6ul_pinctrl_of_match),
+ .of_match_table = imx6ul_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx6ul_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7d.c b/drivers/pinctrl/freescale/pinctrl-imx7d.c
index 369d3e59fdd6..4126387344cb 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7d.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7d.c
@@ -386,7 +386,8 @@ static int imx7d_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx7d_pinctrl_driver = {
.driver = {
.name = "imx7d-pinctrl",
- .of_match_table = of_match_ptr(imx7d_pinctrl_of_match),
+ .of_match_table = imx7d_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx7d_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7ulp.c b/drivers/pinctrl/freescale/pinctrl-imx7ulp.c
index 922ff73c7087..1915378d92b2 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7ulp.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7ulp.c
@@ -303,7 +303,7 @@ static int imx7ulp_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx7ulp_pinctrl_driver = {
.driver = {
.name = "imx7ulp-pinctrl",
- .of_match_table = of_match_ptr(imx7ulp_pinctrl_of_match),
+ .of_match_table = imx7ulp_pinctrl_of_match,
.suppress_bind_attrs = true,
},
.probe = imx7ulp_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8dxl.c b/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
index d3020c0cd55d..041455c13d0d 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
@@ -184,7 +184,7 @@ static int imx8dxl_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx8dxl_pinctrl_driver = {
.driver = {
.name = "fsl,imx8dxl-iomuxc",
- .of_match_table = of_match_ptr(imx8dxl_pinctrl_of_match),
+ .of_match_table = imx8dxl_pinctrl_of_match,
.suppress_bind_attrs = true,
},
.probe = imx8dxl_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mm.c b/drivers/pinctrl/freescale/pinctrl-imx8mm.c
index 31c5d8861406..39dc73281ce6 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8mm.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mm.c
@@ -337,7 +337,7 @@ static int imx8mm_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx8mm_pinctrl_driver = {
.driver = {
.name = "imx8mm-pinctrl",
- .of_match_table = of_match_ptr(imx8mm_pinctrl_of_match),
+ .of_match_table = imx8mm_pinctrl_of_match,
.suppress_bind_attrs = true,
},
.probe = imx8mm_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mn.c b/drivers/pinctrl/freescale/pinctrl-imx8mn.c
index 14c9deb51fec..448a79eb4568 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8mn.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mn.c
@@ -337,7 +337,7 @@ static int imx8mn_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx8mn_pinctrl_driver = {
.driver = {
.name = "imx8mn-pinctrl",
- .of_match_table = of_match_ptr(imx8mn_pinctrl_of_match),
+ .of_match_table = imx8mn_pinctrl_of_match,
.suppress_bind_attrs = true,
},
.probe = imx8mn_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mp.c b/drivers/pinctrl/freescale/pinctrl-imx8mp.c
index bf4bbb5e2446..88abc257318f 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8mp.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mp.c
@@ -335,7 +335,8 @@ static int imx8mp_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx8mp_pinctrl_driver = {
.driver = {
.name = "imx8mp-pinctrl",
- .of_match_table = of_match_ptr(imx8mp_pinctrl_of_match),
+ .of_match_table = imx8mp_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = imx8mp_pinctrl_probe,
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mq.c b/drivers/pinctrl/freescale/pinctrl-imx8mq.c
index ae3ea5b5c204..3ed3c98bcedb 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8mq.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mq.c
@@ -340,7 +340,7 @@ static int imx8mq_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx8mq_pinctrl_driver = {
.driver = {
.name = "imx8mq-pinctrl",
- .of_match_table = of_match_ptr(imx8mq_pinctrl_of_match),
+ .of_match_table = imx8mq_pinctrl_of_match,
.pm = &imx_pinctrl_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8qm.c b/drivers/pinctrl/freescale/pinctrl-imx8qm.c
index 8f46b9404cd7..2e2d30dc13f7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8qm.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8qm.c
@@ -317,7 +317,7 @@ static int imx8qm_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx8qm_pinctrl_driver = {
.driver = {
.name = "imx8qm-pinctrl",
- .of_match_table = of_match_ptr(imx8qm_pinctrl_of_match),
+ .of_match_table = imx8qm_pinctrl_of_match,
.suppress_bind_attrs = true,
},
.probe = imx8qm_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8qxp.c b/drivers/pinctrl/freescale/pinctrl-imx8qxp.c
index 6776ad6a3a27..4f97813ba8b7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8qxp.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8qxp.c
@@ -223,7 +223,7 @@ static int imx8qxp_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx8qxp_pinctrl_driver = {
.driver = {
.name = "imx8qxp-pinctrl",
- .of_match_table = of_match_ptr(imx8qxp_pinctrl_of_match),
+ .of_match_table = imx8qxp_pinctrl_of_match,
.suppress_bind_attrs = true,
},
.probe = imx8qxp_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index 37602b053ed2..700e5a136814 100644
--- a/drivers/pinctrl/freescale/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -336,6 +336,7 @@ static struct platform_driver vf610_pinctrl_driver = {
.driver = {
.name = "vf610-pinctrl",
.of_match_table = vf610_pinctrl_of_match,
+ .suppress_bind_attrs = true,
},
.probe = vf610_pinctrl_probe,
};
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 9f3361c13ded..85750974d182 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1173,16 +1173,15 @@ static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
for (gpp = 0; gpp < community->ngpps; gpp++) {
const struct intel_padgroup *padgrp = &community->gpps[gpp];
unsigned long pending, enabled, gpp_offset;
- unsigned long flags;
- raw_spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock(&pctrl->lock);
pending = readl(community->regs + community->is_offset +
padgrp->reg_num * 4);
enabled = readl(community->regs + community->ie_offset +
padgrp->reg_num * 4);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock(&pctrl->lock);
/* Only interrupts that are enabled */
pending &= enabled;
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index eef17f228669..90f0c8255eaf 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -147,6 +147,12 @@ config PINCTRL_MT8192
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
+config PINCTRL_MT8195
+ bool "Mediatek MT8195 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ select PINCTRL_MTK_PARIS
+
config PINCTRL_MT8516
bool "Mediatek MT8516 pin control"
depends on OF
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 01218bf4dc30..06fde993ace2 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -21,5 +21,6 @@ obj-$(CONFIG_PINCTRL_MT8167) += pinctrl-mt8167.o
obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
obj-$(CONFIG_PINCTRL_MT8183) += pinctrl-mt8183.o
obj-$(CONFIG_PINCTRL_MT8192) += pinctrl-mt8192.o
+obj-$(CONFIG_PINCTRL_MT8195) += pinctrl-mt8195.o
obj-$(CONFIG_PINCTRL_MT8516) += pinctrl-mt8516.o
obj-$(CONFIG_PINCTRL_MT6397) += pinctrl-mt6397.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index 0fa7de43bc4c..3a4a23c40a71 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -619,6 +619,8 @@ int mtk_moore_pinctrl_probe(struct platform_device *pdev,
hw->nbase = hw->soc->nbase_names;
+ spin_lock_init(&hw->lock);
+
/* Copy from internal struct mtk_pin_desc to register to the core */
pins = devm_kmalloc_array(&pdev->dev, hw->soc->npins, sizeof(*pins),
GFP_KERNEL);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8195.c b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
new file mode 100644
index 000000000000..a7500e18bb1d
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#include "pinctrl-mtk-mt8195.h"
+#include "pinctrl-paris.h"
+
+/* MT8195 have multiple bases to program pin configuration listed as the below:
+ * iocfg[0]:0x10005000, iocfg[1]:0x11d10000, iocfg[2]:0x11d30000,
+ * iocfg[3]:0x11d40000, iocfg[4]:0x11e20000, iocfg[5]:0x11eb0000,
+ * iocfg[6]:0x11f40000.
+ * _i_based could be used to indicate what base the pin should be mapped into.
+ */
+
+#define PIN_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 0)
+
+#define PINS_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 1)
+
+static const struct mtk_pin_field_calc mt8195_pin_mode_range[] = {
+ PIN_FIELD(0, 144, 0x300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_dir_range[] = {
+ PIN_FIELD(0, 144, 0x0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_di_range[] = {
+ PIN_FIELD(0, 144, 0x200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_do_range[] = {
+ PIN_FIELD(0, 144, 0x100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x040, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 4, 0x040, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 4, 0x040, 0x10, 2, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x040, 0x10, 3, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x040, 0x10, 4, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x040, 0x10, 5, 1),
+ PIN_FIELD_BASE(6, 6, 4, 0x040, 0x10, 6, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x040, 0x10, 7, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x040, 0x10, 13, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x040, 0x10, 8, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x040, 0x10, 14, 1),
+ PIN_FIELD_BASE(11, 11, 4, 0x040, 0x10, 9, 1),
+ PIN_FIELD_BASE(12, 12, 4, 0x040, 0x10, 15, 1),
+ PIN_FIELD_BASE(13, 13, 4, 0x040, 0x10, 10, 1),
+ PIN_FIELD_BASE(14, 14, 4, 0x040, 0x10, 16, 1),
+ PIN_FIELD_BASE(15, 15, 4, 0x040, 0x10, 11, 1),
+ PIN_FIELD_BASE(16, 16, 4, 0x040, 0x10, 17, 1),
+ PIN_FIELD_BASE(17, 17, 4, 0x040, 0x10, 12, 1),
+ PIN_FIELD_BASE(18, 18, 2, 0x040, 0x10, 5, 1),
+ PIN_FIELD_BASE(19, 19, 2, 0x040, 0x10, 12, 1),
+ PIN_FIELD_BASE(20, 20, 2, 0x040, 0x10, 11, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x040, 0x10, 10, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x040, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x040, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x040, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x040, 0x10, 4, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x040, 0x10, 3, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x040, 0x10, 6, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x040, 0x10, 7, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x040, 0x10, 8, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x040, 0x10, 9, 1),
+ PIN_FIELD_BASE(31, 31, 1, 0x060, 0x10, 13, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x060, 0x10, 12, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x060, 0x10, 11, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x060, 0x10, 14, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x060, 0x10, 15, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x070, 0x10, 3, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x070, 0x10, 6, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x070, 0x10, 4, 1),
+ PIN_FIELD_BASE(39, 39, 1, 0x070, 0x10, 5, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x070, 0x10, 8, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x070, 0x10, 7, 1),
+ PIN_FIELD_BASE(42, 42, 1, 0x070, 0x10, 10, 1),
+ PIN_FIELD_BASE(43, 43, 1, 0x070, 0x10, 9, 1),
+ PIN_FIELD_BASE(44, 44, 1, 0x070, 0x10, 20, 1),
+ PIN_FIELD_BASE(45, 45, 1, 0x070, 0x10, 21, 1),
+ PIN_FIELD_BASE(46, 46, 1, 0x060, 0x10, 18, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x060, 0x10, 16, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x060, 0x10, 19, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x060, 0x10, 17, 1),
+ PIN_FIELD_BASE(50, 50, 1, 0x060, 0x10, 25, 1),
+ PIN_FIELD_BASE(51, 51, 1, 0x060, 0x10, 20, 1),
+ PIN_FIELD_BASE(52, 52, 1, 0x060, 0x10, 26, 1),
+ PIN_FIELD_BASE(53, 53, 1, 0x060, 0x10, 21, 1),
+ PIN_FIELD_BASE(54, 54, 1, 0x060, 0x10, 22, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x060, 0x10, 23, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x060, 0x10, 24, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x060, 0x10, 29, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x060, 0x10, 27, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x060, 0x10, 30, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x060, 0x10, 28, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x060, 0x10, 8, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x060, 0x10, 7, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0x060, 0x10, 10, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x060, 0x10, 9, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x070, 0x10, 1, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x060, 0x10, 31, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x070, 0x10, 0, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x070, 0x10, 2, 1),
+ PIN_FIELD_BASE(69, 69, 1, 0x060, 0x10, 0, 1),
+ PIN_FIELD_BASE(70, 70, 1, 0x060, 0x10, 6, 1),
+ PIN_FIELD_BASE(71, 71, 1, 0x060, 0x10, 4, 1),
+ PIN_FIELD_BASE(72, 72, 1, 0x060, 0x10, 5, 1),
+ PIN_FIELD_BASE(73, 73, 1, 0x060, 0x10, 1, 1),
+ PIN_FIELD_BASE(74, 74, 1, 0x060, 0x10, 2, 1),
+ PIN_FIELD_BASE(75, 75, 1, 0x060, 0x10, 3, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x070, 0x10, 11, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x030, 0x10, 1, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x030, 0x10, 2, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x030, 0x10, 9, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x030, 0x10, 10, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x030, 0x10, 11, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x030, 0x10, 12, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x030, 0x10, 13, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x030, 0x10, 14, 1),
+ PIN_FIELD_BASE(85, 85, 3, 0x030, 0x10, 15, 1),
+ PIN_FIELD_BASE(86, 86, 3, 0x030, 0x10, 16, 1),
+ PIN_FIELD_BASE(87, 87, 3, 0x030, 0x10, 3, 1),
+ PIN_FIELD_BASE(88, 88, 3, 0x030, 0x10, 4, 1),
+ PIN_FIELD_BASE(89, 89, 3, 0x030, 0x10, 5, 1),
+ PIN_FIELD_BASE(90, 90, 3, 0x030, 0x10, 6, 1),
+ PIN_FIELD_BASE(91, 91, 3, 0x030, 0x10, 7, 1),
+ PIN_FIELD_BASE(92, 92, 3, 0x030, 0x10, 8, 1),
+ PIN_FIELD_BASE(93, 93, 3, 0x030, 0x10, 18, 1),
+ PIN_FIELD_BASE(94, 94, 3, 0x030, 0x10, 19, 1),
+ PIN_FIELD_BASE(95, 95, 3, 0x030, 0x10, 17, 1),
+ PIN_FIELD_BASE(96, 96, 3, 0x030, 0x10, 0, 1),
+ PIN_FIELD_BASE(97, 97, 3, 0x030, 0x10, 20, 1),
+ PIN_FIELD_BASE(98, 98, 3, 0x030, 0x10, 28, 1),
+ PIN_FIELD_BASE(99, 99, 3, 0x030, 0x10, 27, 1),
+ PIN_FIELD_BASE(100, 100, 3, 0x030, 0x10, 30, 1),
+ PIN_FIELD_BASE(101, 101, 3, 0x030, 0x10, 29, 1),
+ PIN_FIELD_BASE(102, 102, 3, 0x040, 0x10, 0, 1),
+ PIN_FIELD_BASE(103, 103, 3, 0x030, 0x10, 31, 1),
+ PIN_FIELD_BASE(104, 104, 3, 0x030, 0x10, 25, 1),
+ PIN_FIELD_BASE(105, 105, 3, 0x030, 0x10, 26, 1),
+ PIN_FIELD_BASE(106, 106, 3, 0x030, 0x10, 23, 1),
+ PIN_FIELD_BASE(107, 107, 3, 0x030, 0x10, 24, 1),
+ PIN_FIELD_BASE(108, 108, 3, 0x030, 0x10, 22, 1),
+ PIN_FIELD_BASE(109, 109, 3, 0x030, 0x10, 21, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x010, 0x10, 1, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x010, 0x10, 0, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x010, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x010, 0x10, 3, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x010, 0x10, 4, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x010, 0x10, 5, 1),
+ PIN_FIELD_BASE(116, 116, 6, 0x030, 0x10, 9, 1),
+ PIN_FIELD_BASE(117, 117, 6, 0x030, 0x10, 8, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x030, 0x10, 7, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x030, 0x10, 6, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x030, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x030, 0x10, 1, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x030, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x030, 0x10, 5, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x030, 0x10, 4, 1),
+ PIN_FIELD_BASE(125, 125, 6, 0x030, 0x10, 3, 1),
+ PIN_FIELD_BASE(126, 126, 6, 0x030, 0x10, 2, 1),
+ PIN_FIELD_BASE(127, 127, 6, 0x030, 0x10, 10, 1),
+ PIN_FIELD_BASE(128, 128, 3, 0x040, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 3, 0x040, 0x10, 1, 1),
+ PIN_FIELD_BASE(130, 130, 3, 0x040, 0x10, 4, 1),
+ PIN_FIELD_BASE(131, 131, 3, 0x040, 0x10, 2, 1),
+ PIN_FIELD_BASE(132, 132, 6, 0x030, 0x10, 13, 1),
+ PIN_FIELD_BASE(133, 133, 6, 0x030, 0x10, 12, 1),
+ PIN_FIELD_BASE(134, 134, 6, 0x030, 0x10, 15, 1),
+ PIN_FIELD_BASE(135, 135, 6, 0x030, 0x10, 14, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x070, 0x10, 13, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x070, 0x10, 12, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x070, 0x10, 15, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x070, 0x10, 14, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x070, 0x10, 17, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x070, 0x10, 16, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x070, 0x10, 19, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x070, 0x10, 18, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_smt_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x0d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 4, 0x0d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 4, 0x0d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x0d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x0d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x0d0, 0x10, 5, 1),
+ PINS_FIELD_BASE(6, 7, 4, 0x0d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x0d0, 0x10, 12, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x0d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x0d0, 0x10, 13, 1),
+ PIN_FIELD_BASE(11, 11, 4, 0x0d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(12, 12, 4, 0x0d0, 0x10, 14, 1),
+ PIN_FIELD_BASE(13, 13, 4, 0x0d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(14, 14, 4, 0x0d0, 0x10, 15, 1),
+ PIN_FIELD_BASE(15, 15, 4, 0x0d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(16, 16, 4, 0x0d0, 0x10, 16, 1),
+ PIN_FIELD_BASE(17, 17, 4, 0x0d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(18, 18, 2, 0x090, 0x10, 11, 1),
+ PIN_FIELD_BASE(19, 19, 2, 0x090, 0x10, 10, 1),
+ PIN_FIELD_BASE(20, 20, 2, 0x090, 0x10, 9, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x090, 0x10, 11, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x090, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x090, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x090, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x090, 0x10, 4, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x090, 0x10, 3, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x090, 0x10, 5, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x090, 0x10, 6, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x090, 0x10, 7, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x090, 0x10, 8, 1),
+ PINS_FIELD_BASE(31, 33, 1, 0x0f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x0f0, 0x10, 0, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x0f0, 0x10, 1, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x0f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x0f0, 0x10, 2, 1),
+ PINS_FIELD_BASE(38, 39, 1, 0x0f0, 0x10, 5, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x0f0, 0x10, 14, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x0f0, 0x10, 13, 1),
+ PIN_FIELD_BASE(42, 42, 1, 0x0f0, 0x10, 16, 1),
+ PIN_FIELD_BASE(43, 43, 1, 0x0f0, 0x10, 15, 1),
+ PIN_FIELD_BASE(44, 44, 1, 0x0f0, 0x10, 25, 1),
+ PIN_FIELD_BASE(45, 45, 1, 0x0f0, 0x10, 26, 1),
+ PINS_FIELD_BASE(46, 47, 1, 0x0f0, 0x10, 5, 1),
+ PINS_FIELD_BASE(48, 51, 1, 0x0f0, 0x10, 6, 1),
+ PINS_FIELD_BASE(52, 55, 1, 0x0f0, 0x10, 7, 1),
+ PINS_FIELD_BASE(56, 59, 1, 0x0f0, 0x10, 8, 1),
+ PINS_FIELD_BASE(60, 63, 1, 0x0f0, 0x10, 9, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x0f0, 0x10, 10, 1),
+ PINS_FIELD_BASE(65, 68, 1, 0x0f0, 0x10, 3, 1),
+ PINS_FIELD_BASE(69, 71, 1, 0x0f0, 0x10, 10, 1),
+ PINS_FIELD_BASE(72, 75, 1, 0x0f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x0f0, 0x10, 12, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x0e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x0e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x0e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x0e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0e0, 0x10, 11, 1),
+ PINS_FIELD_BASE(85, 88, 3, 0x0e0, 0x10, 14, 1),
+ PIN_FIELD_BASE(89, 89, 3, 0x0e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(90, 90, 3, 0x0e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(91, 91, 3, 0x0e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(92, 92, 3, 0x0e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(93, 93, 3, 0x0e0, 0x10, 12, 1),
+ PIN_FIELD_BASE(94, 94, 3, 0x0e0, 0x10, 13, 1),
+ PINS_FIELD_BASE(95, 98, 3, 0x0e0, 0x10, 15, 1),
+ PINS_FIELD_BASE(99, 102, 3, 0x0e0, 0x10, 16, 1),
+ PINS_FIELD_BASE(103, 104, 3, 0x0e0, 0x10, 17, 1),
+ PIN_FIELD_BASE(105, 105, 3, 0x0e0, 0x10, 18, 1),
+ PINS_FIELD_BASE(106, 107, 3, 0x0e0, 0x10, 17, 1),
+ PINS_FIELD_BASE(108, 109, 3, 0x0e0, 0x10, 18, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x070, 0x10, 1, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x070, 0x10, 0, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x070, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x070, 0x10, 3, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x070, 0x10, 4, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x070, 0x10, 5, 1),
+ PIN_FIELD_BASE(116, 116, 6, 0x0c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(117, 117, 6, 0x0c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x0c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x0c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x0c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x0c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x0c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x0c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x0c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(125, 125, 6, 0x0c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(126, 126, 6, 0x0c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(127, 127, 6, 0x0c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(128, 128, 3, 0x0e0, 0x10, 18, 1),
+ PINS_FIELD_BASE(129, 131, 3, 0x0e0, 0x10, 19, 1),
+ PIN_FIELD_BASE(132, 132, 6, 0x0c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(133, 133, 6, 0x0c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(134, 134, 6, 0x0c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(135, 135, 6, 0x0c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0f0, 0x10, 18, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x0f0, 0x10, 17, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x0f0, 0x10, 20, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0f0, 0x10, 19, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0f0, 0x10, 22, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0f0, 0x10, 21, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0f0, 0x10, 24, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0f0, 0x10, 23, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_pu_range[] = {
+ PIN_FIELD_BASE(6, 6, 4, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(11, 11, 4, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(12, 12, 4, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(13, 13, 4, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(14, 14, 4, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(15, 15, 4, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(16, 16, 4, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(17, 17, 4, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(18, 18, 2, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(19, 19, 2, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(20, 20, 2, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(31, 31, 1, 0x00a0, 0x10, 13, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x00a0, 0x10, 14, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x00a0, 0x10, 15, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(39, 39, 1, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(42, 42, 1, 0x00b0, 0x10, 10, 1),
+ PIN_FIELD_BASE(43, 43, 1, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(44, 44, 1, 0x00b0, 0x10, 21, 1),
+ PIN_FIELD_BASE(45, 45, 1, 0x00b0, 0x10, 22, 1),
+ PIN_FIELD_BASE(46, 46, 1, 0x00a0, 0x10, 18, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x00a0, 0x10, 16, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x00a0, 0x10, 19, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x00a0, 0x10, 17, 1),
+ PIN_FIELD_BASE(50, 50, 1, 0x00a0, 0x10, 25, 1),
+ PIN_FIELD_BASE(51, 51, 1, 0x00a0, 0x10, 20, 1),
+ PIN_FIELD_BASE(52, 52, 1, 0x00a0, 0x10, 26, 1),
+ PIN_FIELD_BASE(53, 53, 1, 0x00a0, 0x10, 21, 1),
+ PIN_FIELD_BASE(54, 54, 1, 0x00a0, 0x10, 22, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x00a0, 0x10, 23, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x00a0, 0x10, 24, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x00a0, 0x10, 29, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x00a0, 0x10, 27, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x00a0, 0x10, 30, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x00a0, 0x10, 28, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x00a0, 0x10, 31, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(69, 69, 1, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(70, 70, 1, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(71, 71, 1, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(72, 72, 1, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(73, 73, 1, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(74, 74, 1, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(75, 75, 1, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(97, 97, 3, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(98, 98, 3, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(99, 99, 3, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(100, 100, 3, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(101, 101, 3, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(102, 102, 3, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(103, 103, 3, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(108, 108, 3, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(109, 109, 3, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(128, 128, 3, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(129, 129, 3, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(130, 130, 3, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(131, 131, 3, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(132, 132, 6, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(133, 133, 6, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(134, 134, 6, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(135, 135, 6, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x00b0, 0x10, 14, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x00b0, 0x10, 13, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x00b0, 0x10, 16, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x00b0, 0x10, 15, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x00b0, 0x10, 18, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x00b0, 0x10, 17, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x00b0, 0x10, 20, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x00b0, 0x10, 19, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_pd_range[] = {
+ PIN_FIELD_BASE(6, 6, 4, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(10, 10, 4, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(11, 11, 4, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(12, 12, 4, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(13, 13, 4, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(14, 14, 4, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(15, 15, 4, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(16, 16, 4, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(17, 17, 4, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(18, 18, 2, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(19, 19, 2, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(20, 20, 2, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(31, 31, 1, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(39, 39, 1, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(40, 40, 1, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(41, 41, 1, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(42, 42, 1, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(43, 43, 1, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(44, 44, 1, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(45, 45, 1, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(46, 46, 1, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(50, 50, 1, 0x0080, 0x10, 25, 1),
+ PIN_FIELD_BASE(51, 51, 1, 0x0080, 0x10, 20, 1),
+ PIN_FIELD_BASE(52, 52, 1, 0x0080, 0x10, 26, 1),
+ PIN_FIELD_BASE(53, 53, 1, 0x0080, 0x10, 21, 1),
+ PIN_FIELD_BASE(54, 54, 1, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x0080, 0x10, 24, 1),
+ PIN_FIELD_BASE(57, 57, 1, 0x0080, 0x10, 29, 1),
+ PIN_FIELD_BASE(58, 58, 1, 0x0080, 0x10, 27, 1),
+ PIN_FIELD_BASE(59, 59, 1, 0x0080, 0x10, 30, 1),
+ PIN_FIELD_BASE(60, 60, 1, 0x0080, 0x10, 28, 1),
+ PIN_FIELD_BASE(61, 61, 1, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(62, 62, 1, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(63, 63, 1, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(64, 64, 1, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(65, 65, 1, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(66, 66, 1, 0x0080, 0x10, 31, 1),
+ PIN_FIELD_BASE(67, 67, 1, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(68, 68, 1, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(69, 69, 1, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(70, 70, 1, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(71, 71, 1, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(72, 72, 1, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(73, 73, 1, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(74, 74, 1, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(75, 75, 1, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(76, 76, 1, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(97, 97, 3, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(98, 98, 3, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(99, 99, 3, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(100, 100, 3, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(101, 101, 3, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(102, 102, 3, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(103, 103, 3, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(108, 108, 3, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(109, 109, 3, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(128, 128, 3, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(129, 129, 3, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(130, 130, 3, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(131, 131, 3, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(132, 132, 6, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(133, 133, 6, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(134, 134, 6, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(135, 135, 6, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(137, 137, 1, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(138, 138, 1, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0090, 0x10, 19, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_pupd_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 4, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 4, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0060, 0x10, 13, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0060, 0x10, 14, 1),
+ PIN_FIELD_BASE(85, 85, 3, 0x0060, 0x10, 15, 1),
+ PIN_FIELD_BASE(86, 86, 3, 0x0060, 0x10, 16, 1),
+ PIN_FIELD_BASE(87, 87, 3, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(88, 88, 3, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(89, 89, 3, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(90, 90, 3, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(91, 91, 3, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(92, 92, 3, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(93, 93, 3, 0x0060, 0x10, 18, 1),
+ PIN_FIELD_BASE(94, 94, 3, 0x0060, 0x10, 19, 1),
+ PIN_FIELD_BASE(95, 95, 3, 0x0060, 0x10, 17, 1),
+ PIN_FIELD_BASE(96, 96, 3, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(104, 104, 3, 0x0060, 0x10, 22, 1),
+ PIN_FIELD_BASE(105, 105, 3, 0x0060, 0x10, 23, 1),
+ PIN_FIELD_BASE(106, 106, 3, 0x0060, 0x10, 20, 1),
+ PIN_FIELD_BASE(107, 107, 3, 0x0060, 0x10, 21, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x0020, 0x10, 1, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x0020, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x0020, 0x10, 3, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x0020, 0x10, 4, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x0020, 0x10, 5, 1),
+ PIN_FIELD_BASE(116, 116, 6, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(117, 117, 6, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(125, 125, 6, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(126, 126, 6, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(127, 127, 6, 0x0050, 0x10, 10, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_r0_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 4, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 4, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(85, 85, 3, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(86, 86, 3, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(87, 87, 3, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(88, 88, 3, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(89, 89, 3, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(90, 90, 3, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(91, 91, 3, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(92, 92, 3, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(93, 93, 3, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(94, 94, 3, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(95, 95, 3, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(96, 96, 3, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(104, 104, 3, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(105, 105, 3, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(106, 106, 3, 0x0080, 0x10, 20, 1),
+ PIN_FIELD_BASE(107, 107, 3, 0x0080, 0x10, 21, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x0030, 0x10, 1, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x0030, 0x10, 4, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x0030, 0x10, 5, 1),
+ PIN_FIELD_BASE(116, 116, 6, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(117, 117, 6, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(125, 125, 6, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(126, 126, 6, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(127, 127, 6, 0x0070, 0x10, 10, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_r1_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 4, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 4, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(85, 85, 3, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(86, 86, 3, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(87, 87, 3, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(88, 88, 3, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(89, 89, 3, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(90, 90, 3, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(91, 91, 3, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(92, 92, 3, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(93, 93, 3, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(94, 94, 3, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(95, 95, 3, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(96, 96, 3, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(104, 104, 3, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(105, 105, 3, 0x0090, 0x10, 23, 1),
+ PIN_FIELD_BASE(106, 106, 3, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(107, 107, 3, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(116, 116, 6, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(117, 117, 6, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(125, 125, 6, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(126, 126, 6, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(127, 127, 6, 0x0080, 0x10, 10, 1),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_drv_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x000, 0x10, 0, 3),
+ PIN_FIELD_BASE(1, 1, 4, 0x000, 0x10, 3, 3),
+ PIN_FIELD_BASE(2, 2, 4, 0x000, 0x10, 6, 3),
+ PIN_FIELD_BASE(3, 3, 4, 0x000, 0x10, 9, 3),
+ PIN_FIELD_BASE(4, 4, 4, 0x000, 0x10, 12, 3),
+ PIN_FIELD_BASE(5, 5, 4, 0x000, 0x10, 15, 3),
+ PINS_FIELD_BASE(6, 7, 4, 0x000, 0x10, 18, 3),
+ PIN_FIELD_BASE(8, 8, 4, 0x010, 0x10, 6, 3),
+ PIN_FIELD_BASE(9, 9, 4, 0x000, 0x10, 21, 3),
+ PIN_FIELD_BASE(10, 10, 4, 0x010, 0x10, 9, 3),
+ PIN_FIELD_BASE(11, 11, 4, 0x000, 0x10, 24, 3),
+ PIN_FIELD_BASE(12, 12, 4, 0x010, 0x10, 12, 3),
+ PIN_FIELD_BASE(13, 13, 4, 0x010, 0x10, 27, 3),
+ PIN_FIELD_BASE(14, 14, 4, 0x010, 0x10, 15, 3),
+ PIN_FIELD_BASE(15, 15, 4, 0x010, 0x10, 0, 3),
+ PIN_FIELD_BASE(16, 16, 4, 0x010, 0x10, 18, 3),
+ PIN_FIELD_BASE(17, 17, 4, 0x010, 0x10, 3, 3),
+ PIN_FIELD_BASE(18, 18, 2, 0x010, 0x10, 6, 3),
+ PIN_FIELD_BASE(19, 19, 2, 0x010, 0x10, 3, 3),
+ PIN_FIELD_BASE(20, 20, 2, 0x010, 0x10, 0, 3),
+ PIN_FIELD_BASE(21, 21, 2, 0x000, 0x10, 27, 3),
+ PIN_FIELD_BASE(22, 22, 2, 0x000, 0x10, 0, 3),
+ PIN_FIELD_BASE(23, 23, 2, 0x000, 0x10, 3, 3),
+ PIN_FIELD_BASE(24, 24, 2, 0x000, 0x10, 6, 3),
+ PIN_FIELD_BASE(25, 25, 2, 0x000, 0x10, 12, 3),
+ PIN_FIELD_BASE(26, 26, 2, 0x000, 0x10, 9, 3),
+ PIN_FIELD_BASE(27, 27, 2, 0x000, 0x10, 15, 3),
+ PIN_FIELD_BASE(28, 28, 2, 0x000, 0x10, 18, 3),
+ PIN_FIELD_BASE(29, 29, 2, 0x000, 0x10, 21, 3),
+ PIN_FIELD_BASE(30, 30, 2, 0x000, 0x10, 24, 3),
+ PINS_FIELD_BASE(31, 33, 1, 0x010, 0x10, 0, 3),
+ PIN_FIELD_BASE(34, 34, 1, 0x000, 0x10, 21, 3),
+ PIN_FIELD_BASE(35, 35, 1, 0x000, 0x10, 24, 3),
+ PIN_FIELD_BASE(36, 36, 1, 0x010, 0x10, 0, 3),
+ PIN_FIELD_BASE(37, 37, 1, 0x010, 0x10, 21, 3),
+ PINS_FIELD_BASE(38, 39, 1, 0x010, 0x10, 3, 3),
+ PIN_FIELD_BASE(40, 40, 1, 0x010, 0x10, 27, 3),
+ PIN_FIELD_BASE(41, 41, 1, 0x010, 0x10, 24, 3),
+ PIN_FIELD_BASE(42, 42, 1, 0x020, 0x10, 3, 3),
+ PIN_FIELD_BASE(43, 43, 1, 0x020, 0x10, 0, 3),
+ PIN_FIELD_BASE(44, 44, 1, 0x030, 0x10, 0, 3),
+ PIN_FIELD_BASE(45, 45, 1, 0x030, 0x10, 3, 3),
+ PINS_FIELD_BASE(46, 47, 1, 0x010, 0x10, 3, 3),
+ PINS_FIELD_BASE(48, 51, 1, 0x010, 0x10, 6, 3),
+ PINS_FIELD_BASE(52, 55, 1, 0x010, 0x10, 9, 3),
+ PINS_FIELD_BASE(56, 59, 1, 0x010, 0x10, 12, 3),
+ PINS_FIELD_BASE(60, 63, 1, 0x010, 0x10, 15, 3),
+ PIN_FIELD_BASE(64, 64, 1, 0x010, 0x10, 18, 3),
+ PINS_FIELD_BASE(65, 68, 1, 0x000, 0x10, 27, 3),
+ PIN_FIELD_BASE(69, 69, 1, 0x000, 0x10, 0, 3),
+ PIN_FIELD_BASE(70, 70, 1, 0x000, 0x10, 18, 3),
+ PIN_FIELD_BASE(71, 71, 1, 0x000, 0x10, 12, 3),
+ PIN_FIELD_BASE(72, 72, 1, 0x000, 0x10, 15, 3),
+ PIN_FIELD_BASE(73, 73, 1, 0x000, 0x10, 3, 3),
+ PIN_FIELD_BASE(74, 74, 1, 0x000, 0x10, 6, 3),
+ PIN_FIELD_BASE(75, 75, 1, 0x000, 0x10, 9, 3),
+ PIN_FIELD_BASE(76, 76, 1, 0x010, 0x10, 18, 3),
+ PIN_FIELD_BASE(77, 77, 3, 0x000, 0x10, 0, 3),
+ PIN_FIELD_BASE(78, 78, 3, 0x000, 0x10, 15, 3),
+ PIN_FIELD_BASE(79, 79, 3, 0x000, 0x10, 18, 3),
+ PIN_FIELD_BASE(80, 80, 3, 0x000, 0x10, 21, 3),
+ PIN_FIELD_BASE(81, 81, 3, 0x000, 0x10, 28, 3),
+ PIN_FIELD_BASE(82, 82, 3, 0x000, 0x10, 27, 3),
+ PIN_FIELD_BASE(83, 83, 3, 0x010, 0x10, 0, 3),
+ PIN_FIELD_BASE(84, 84, 3, 0x010, 0x10, 3, 3),
+ PINS_FIELD_BASE(85, 88, 3, 0x010, 0x10, 15, 3),
+ PIN_FIELD_BASE(89, 89, 3, 0x000, 0x10, 3, 3),
+ PIN_FIELD_BASE(90, 90, 3, 0x000, 0x10, 6, 3),
+ PIN_FIELD_BASE(91, 91, 3, 0x000, 0x10, 9, 3),
+ PIN_FIELD_BASE(92, 92, 3, 0x000, 0x10, 12, 3),
+ PIN_FIELD_BASE(93, 93, 3, 0x010, 0x10, 6, 3),
+ PIN_FIELD_BASE(94, 94, 3, 0x010, 0x10, 9, 3),
+ PINS_FIELD_BASE(95, 98, 3, 0x010, 0x10, 18, 3),
+ PINS_FIELD_BASE(99, 102, 3, 0x010, 0x10, 21, 3),
+ PINS_FIELD_BASE(103, 104, 3, 0x010, 0x10, 24, 3),
+ PIN_FIELD_BASE(105, 105, 3, 0x010, 0x10, 27, 3),
+ PINS_FIELD_BASE(106, 107, 3, 0x010, 0x10, 24, 3),
+ PINS_FIELD_BASE(108, 109, 3, 0x010, 0x10, 27, 3),
+ PIN_FIELD_BASE(110, 110, 5, 0x000, 0x10, 3, 3),
+ PIN_FIELD_BASE(111, 111, 5, 0x000, 0x10, 0, 3),
+ PIN_FIELD_BASE(112, 112, 5, 0x000, 0x10, 6, 3),
+ PIN_FIELD_BASE(113, 113, 5, 0x000, 0x10, 9, 3),
+ PIN_FIELD_BASE(114, 114, 5, 0x000, 0x10, 12, 3),
+ PIN_FIELD_BASE(115, 115, 5, 0x000, 0x10, 15, 3),
+ PIN_FIELD_BASE(116, 116, 6, 0x000, 0x10, 27, 3),
+ PIN_FIELD_BASE(117, 117, 6, 0x000, 0x10, 24, 3),
+ PIN_FIELD_BASE(118, 118, 6, 0x000, 0x10, 21, 3),
+ PIN_FIELD_BASE(119, 119, 6, 0x000, 0x10, 18, 3),
+ PIN_FIELD_BASE(120, 120, 6, 0x010, 0x10, 3, 3),
+ PIN_FIELD_BASE(121, 121, 6, 0x000, 0x10, 3, 3),
+ PIN_FIELD_BASE(122, 122, 6, 0x000, 0x10, 0, 3),
+ PIN_FIELD_BASE(123, 123, 6, 0x000, 0x10, 15, 3),
+ PIN_FIELD_BASE(124, 124, 6, 0x000, 0x10, 12, 3),
+ PIN_FIELD_BASE(125, 125, 6, 0x000, 0x10, 9, 3),
+ PIN_FIELD_BASE(126, 126, 6, 0x000, 0x10, 6, 3),
+ PIN_FIELD_BASE(127, 127, 6, 0x010, 0x10, 0, 3),
+ PIN_FIELD_BASE(128, 128, 3, 0x010, 0x10, 27, 3),
+ PINS_FIELD_BASE(129, 130, 3, 0x020, 0x10, 0, 3),
+ PINS_FIELD_BASE(131, 131, 3, 0x010, 0x10, 12, 3),
+ PIN_FIELD_BASE(132, 132, 6, 0x010, 0x10, 9, 3),
+ PIN_FIELD_BASE(133, 133, 6, 0x010, 0x10, 6, 3),
+ PIN_FIELD_BASE(134, 134, 6, 0x010, 0x10, 15, 3),
+ PIN_FIELD_BASE(135, 135, 6, 0x010, 0x10, 12, 3),
+ PIN_FIELD_BASE(136, 136, 1, 0x020, 0x10, 9, 3),
+ PIN_FIELD_BASE(137, 137, 1, 0x020, 0x10, 6, 3),
+ PIN_FIELD_BASE(138, 138, 1, 0x020, 0x10, 15, 3),
+ PIN_FIELD_BASE(139, 139, 1, 0x020, 0x10, 12, 3),
+ PIN_FIELD_BASE(140, 140, 1, 0x020, 0x10, 21, 3),
+ PIN_FIELD_BASE(141, 141, 1, 0x020, 0x10, 18, 3),
+ PIN_FIELD_BASE(142, 142, 1, 0x020, 0x10, 27, 3),
+ PIN_FIELD_BASE(143, 143, 1, 0x020, 0x10, 24, 3),
+};
+
+static const struct mtk_pin_field_calc mt8195_pin_drv_adv_range[] = {
+ PIN_FIELD_BASE(8, 8, 4, 0x020, 0x10, 15, 3),
+ PIN_FIELD_BASE(9, 9, 4, 0x020, 0x10, 0, 3),
+ PIN_FIELD_BASE(10, 10, 4, 0x020, 0x10, 18, 3),
+ PIN_FIELD_BASE(11, 11, 4, 0x020, 0x10, 3, 3),
+ PIN_FIELD_BASE(12, 12, 4, 0x020, 0x10, 21, 3),
+ PIN_FIELD_BASE(13, 13, 4, 0x020, 0x10, 6, 3),
+ PIN_FIELD_BASE(14, 14, 4, 0x020, 0x10, 24, 3),
+ PIN_FIELD_BASE(15, 15, 4, 0x020, 0x10, 9, 3),
+ PIN_FIELD_BASE(16, 16, 4, 0x020, 0x10, 27, 3),
+ PIN_FIELD_BASE(17, 17, 4, 0x020, 0x10, 12, 3),
+ PIN_FIELD_BASE(29, 29, 2, 0x020, 0x10, 0, 3),
+ PIN_FIELD_BASE(30, 30, 2, 0x020, 0x10, 3, 3),
+ PIN_FIELD_BASE(34, 34, 1, 0x040, 0x10, 0, 3),
+ PIN_FIELD_BASE(35, 35, 1, 0x040, 0x10, 3, 3),
+ PIN_FIELD_BASE(44, 44, 1, 0x040, 0x10, 6, 3),
+ PIN_FIELD_BASE(45, 45, 1, 0x040, 0x10, 9, 3),
+};
+
+static const struct mtk_pin_reg_calc mt8195_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt8195_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8195_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8195_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8195_pin_do_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8195_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8195_pin_ies_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8195_pin_pu_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt8195_pin_pd_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt8195_pin_drv_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8195_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8195_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8195_pin_r1_range),
+ [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt8195_pin_drv_adv_range),
+};
+
+static const char * const mt8195_pinctrl_register_base_names[] = {
+ "iocfg0", "iocfg_bm", "iocfg_bl", "iocfg_br", "iocfg_lm",
+ "iocfg_rb", "iocfg_tl",
+};
+
+static const struct mtk_eint_hw mt8195_eint_hw = {
+ .port_mask = 0xf,
+ .ports = 7,
+ .ap_num = 225,
+ .db_cnt = 32,
+};
+
+static const struct mtk_pin_soc mt8195_data = {
+ .reg_cal = mt8195_reg_cals,
+ .pins = mtk_pins_mt8195,
+ .npins = ARRAY_SIZE(mtk_pins_mt8195),
+ .ngrps = ARRAY_SIZE(mtk_pins_mt8195),
+ .eint_hw = &mt8195_eint_hw,
+ .nfuncs = 8,
+ .gpio_m = 0,
+ .base_names = mt8195_pinctrl_register_base_names,
+ .nbase_names = ARRAY_SIZE(mt8195_pinctrl_register_base_names),
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_drive_get = mtk_pinconf_adv_drive_get_raw,
+ .adv_drive_set = mtk_pinconf_adv_drive_set_raw,
+};
+
+static const struct of_device_id mt8195_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt8195-pinctrl", },
+ { }
+};
+
+static int mt8195_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_paris_pinctrl_probe(pdev, &mt8195_data);
+}
+
+static struct platform_driver mt8195_pinctrl_driver = {
+ .driver = {
+ .name = "mt8195-pinctrl",
+ .of_match_table = mt8195_pinctrl_of_match,
+ },
+ .probe = mt8195_pinctrl_probe,
+};
+
+static int __init mt8195_pinctrl_init(void)
+{
+ return platform_driver_register(&mt8195_pinctrl_driver);
+}
+arch_initcall(mt8195_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 72f17f26acd8..5b3b048725cc 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -57,11 +57,16 @@ static u32 mtk_r32(struct mtk_pinctrl *pctl, u8 i, u32 reg)
void mtk_rmw(struct mtk_pinctrl *pctl, u8 i, u32 reg, u32 mask, u32 set)
{
u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pctl->lock, flags);
val = mtk_r32(pctl, i, reg);
val &= ~mask;
val |= set;
mtk_w32(pctl, i, reg, val);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static int mtk_hw_pin_field_lookup(struct mtk_pinctrl *hw,
@@ -1027,6 +1032,20 @@ int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw,
}
EXPORT_SYMBOL_GPL(mtk_pinconf_adv_drive_get);
+int mtk_pinconf_adv_drive_set_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg)
+{
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV_ADV, arg);
+}
+EXPORT_SYMBOL_GPL(mtk_pinconf_adv_drive_set_raw);
+
+int mtk_pinconf_adv_drive_get_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 *val)
+{
+ return mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV_ADV, val);
+}
+EXPORT_SYMBOL_GPL(mtk_pinconf_adv_drive_get_raw);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_DESCRIPTION("Pin configuration library module for mediatek SoCs");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
index e2aae285b5fc..a6f1bdb2083b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
@@ -66,6 +66,7 @@ enum {
PINCTRL_PIN_REG_DRV_EN,
PINCTRL_PIN_REG_DRV_E0,
PINCTRL_PIN_REG_DRV_E1,
+ PINCTRL_PIN_REG_DRV_ADV,
PINCTRL_PIN_REG_MAX,
};
@@ -251,6 +252,8 @@ struct mtk_pinctrl {
struct mtk_eint *eint;
struct mtk_pinctrl_group *groups;
const char **grp_names;
+ /* lock pin's register resource to avoid multiple threads issue*/
+ spinlock_t lock;
};
void mtk_rmw(struct mtk_pinctrl *pctl, u8 i, u32 reg, u32 mask, u32 set);
@@ -314,6 +317,10 @@ int mtk_pinconf_adv_drive_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 arg);
int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 *val);
+int mtk_pinconf_adv_drive_set_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg);
+int mtk_pinconf_adv_drive_get_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 *val);
bool mtk_is_virt_gpio(struct mtk_pinctrl *hw, unsigned int gpio_n);
#endif /* __PINCTRL_MTK_COMMON_V2_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8195.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8195.h
new file mode 100644
index 000000000000..de4a8a80bf1d
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8195.h
@@ -0,0 +1,1669 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#ifndef __PINCTRL_MTK_MT8195_H
+#define __PINCTRL_MTK_MT8195_H
+
+#include "pinctrl-paris.h"
+
+static const struct mtk_pin_desc mtk_pins_mt8195[] = {
+ MTK_PIN(
+ 0, "GPIO0",
+ MTK_EINT_FUNCTION(0, 0),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "TP_GPIO0_AO"),
+ MTK_FUNCTION(2, "MSDC2_CMD"),
+ MTK_FUNCTION(3, "TDMIN_MCK"),
+ MTK_FUNCTION(4, "CLKM0"),
+ MTK_FUNCTION(5, "PERSTN_1"),
+ MTK_FUNCTION(6, "IDDIG_1P"),
+ MTK_FUNCTION(7, "DMIC4_CLK")
+ ),
+ MTK_PIN(
+ 1, "GPIO1",
+ MTK_EINT_FUNCTION(0, 1),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "TP_GPIO1_AO"),
+ MTK_FUNCTION(2, "MSDC2_CLK"),
+ MTK_FUNCTION(3, "TDMIN_DI"),
+ MTK_FUNCTION(4, "CLKM1"),
+ MTK_FUNCTION(5, "CLKREQN_1"),
+ MTK_FUNCTION(6, "USB_DRVVBUS_1P"),
+ MTK_FUNCTION(7, "DMIC4_DAT")
+ ),
+ MTK_PIN(
+ 2, "GPIO2",
+ MTK_EINT_FUNCTION(0, 2),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "TP_GPIO2_AO"),
+ MTK_FUNCTION(2, "MSDC2_DAT3"),
+ MTK_FUNCTION(3, "TDMIN_LRCK"),
+ MTK_FUNCTION(4, "CLKM2"),
+ MTK_FUNCTION(5, "WAKEN_1"),
+ MTK_FUNCTION(7, "DMIC2_CLK")
+ ),
+ MTK_PIN(
+ 3, "GPIO3",
+ MTK_EINT_FUNCTION(0, 3),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "TP_GPIO3_AO"),
+ MTK_FUNCTION(2, "MSDC2_DAT0"),
+ MTK_FUNCTION(3, "TDMIN_BCK"),
+ MTK_FUNCTION(4, "CLKM3"),
+ MTK_FUNCTION(7, "DMIC2_DAT")
+ ),
+ MTK_PIN(
+ 4, "GPIO4",
+ MTK_EINT_FUNCTION(0, 4),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "TP_GPIO4_AO"),
+ MTK_FUNCTION(2, "MSDC2_DAT2"),
+ MTK_FUNCTION(3, "SPDIF_IN1"),
+ MTK_FUNCTION(4, "UTXD3"),
+ MTK_FUNCTION(5, "SDA2"),
+ MTK_FUNCTION(7, "IDDIG_2P")
+ ),
+ MTK_PIN(
+ 5, "GPIO5",
+ MTK_EINT_FUNCTION(0, 5),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "TP_GPIO5_AO"),
+ MTK_FUNCTION(2, "MSDC2_DAT1"),
+ MTK_FUNCTION(3, "SPDIF_IN0"),
+ MTK_FUNCTION(4, "URXD3"),
+ MTK_FUNCTION(5, "SCL2"),
+ MTK_FUNCTION(7, "USB_DRVVBUS_2P")
+ ),
+ MTK_PIN(
+ 6, "GPIO6",
+ MTK_EINT_FUNCTION(0, 6),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "TP_GPIO6_AO"),
+ MTK_FUNCTION(2, "DP_TX_HPD"),
+ MTK_FUNCTION(3, "I2SO1_D4"),
+ MTK_FUNCTION(4, "UTXD4"),
+ MTK_FUNCTION(5, "CMVREF3"),
+ MTK_FUNCTION(7, "DMIC3_CLK")
+ ),
+ MTK_PIN(
+ 7, "GPIO7",
+ MTK_EINT_FUNCTION(0, 7),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "TP_GPIO7_AO"),
+ MTK_FUNCTION(2, "EDP_TX_HPD"),
+ MTK_FUNCTION(3, "I2SO1_D5"),
+ MTK_FUNCTION(4, "URXD4"),
+ MTK_FUNCTION(5, "CMVREF4"),
+ MTK_FUNCTION(7, "DMIC3_DAT")
+ ),
+ MTK_PIN(
+ 8, "GPIO8",
+ MTK_EINT_FUNCTION(0, 8),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "SDA0"),
+ MTK_FUNCTION(2, "PWM_0"),
+ MTK_FUNCTION(4, "SPDIF_OUT"),
+ MTK_FUNCTION(6, "LVTS_FOUT"),
+ MTK_FUNCTION(7, "DBG_MON_A0")
+ ),
+ MTK_PIN(
+ 9, "GPIO9",
+ MTK_EINT_FUNCTION(0, 9),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "SCL0"),
+ MTK_FUNCTION(2, "PWM_1"),
+ MTK_FUNCTION(4, "IR_IN"),
+ MTK_FUNCTION(6, "LVTS_SDO"),
+ MTK_FUNCTION(7, "DBG_MON_A1")
+ ),
+ MTK_PIN(
+ 10, "GPIO10",
+ MTK_EINT_FUNCTION(0, 10),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(1, "SDA1"),
+ MTK_FUNCTION(2, "PWM_2"),
+ MTK_FUNCTION(3, "ADSP_URXD0"),
+ MTK_FUNCTION(4, "SPDIF_IN1"),
+ MTK_FUNCTION(6, "LVTS_SCF"),
+ MTK_FUNCTION(7, "DBG_MON_A2")
+ ),
+ MTK_PIN(
+ 11, "GPIO11",
+ MTK_EINT_FUNCTION(0, 11),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(1, "SCL1"),
+ MTK_FUNCTION(2, "PWM_3"),
+ MTK_FUNCTION(3, "ADSP_UTXD0"),
+ MTK_FUNCTION(4, "SPDIF_IN0"),
+ MTK_FUNCTION(6, "LVTS_SCK"),
+ MTK_FUNCTION(7, "DBG_MON_A3")
+ ),
+ MTK_PIN(
+ 12, "GPIO12",
+ MTK_EINT_FUNCTION(0, 12),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(1, "SDA2"),
+ MTK_FUNCTION(2, "DMIC3_DAT_R"),
+ MTK_FUNCTION(3, "I2SO1_D6"),
+ MTK_FUNCTION(6, "LVTS_SDI"),
+ MTK_FUNCTION(7, "DBG_MON_A4")
+ ),
+ MTK_PIN(
+ 13, "GPIO13",
+ MTK_EINT_FUNCTION(0, 13),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(1, "SCL2"),
+ MTK_FUNCTION(2, "DMIC4_DAT_R"),
+ MTK_FUNCTION(3, "I2SO1_D7"),
+ MTK_FUNCTION(7, "DBG_MON_A5")
+ ),
+ MTK_PIN(
+ 14, "GPIO14",
+ MTK_EINT_FUNCTION(0, 14),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(1, "SDA3"),
+ MTK_FUNCTION(2, "DMIC3_DAT"),
+ MTK_FUNCTION(3, "TDMIN_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_A6")
+ ),
+ MTK_PIN(
+ 15, "GPIO15",
+ MTK_EINT_FUNCTION(0, 15),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(1, "SCL3"),
+ MTK_FUNCTION(2, "DMIC3_CLK"),
+ MTK_FUNCTION(3, "TDMIN_DI"),
+ MTK_FUNCTION(7, "DBG_MON_A7")
+ ),
+ MTK_PIN(
+ 16, "GPIO16",
+ MTK_EINT_FUNCTION(0, 16),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(1, "SDA4"),
+ MTK_FUNCTION(2, "DMIC4_DAT"),
+ MTK_FUNCTION(3, "TDMIN_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_A8")
+ ),
+ MTK_PIN(
+ 17, "GPIO17",
+ MTK_EINT_FUNCTION(0, 17),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(1, "SCL4"),
+ MTK_FUNCTION(2, "DMIC4_CLK"),
+ MTK_FUNCTION(3, "TDMIN_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A9")
+ ),
+ MTK_PIN(
+ 18, "GPIO18",
+ MTK_EINT_FUNCTION(0, 18),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "DP_TX_HPD")
+ ),
+ MTK_PIN(
+ 19, "GPIO19",
+ MTK_EINT_FUNCTION(0, 19),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "WAKEN"),
+ MTK_FUNCTION(2, "SCP_SDA1"),
+ MTK_FUNCTION(3, "MD32_0_JTAG_TCK"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TCK"),
+ MTK_FUNCTION(5, "SDA6")
+ ),
+ MTK_PIN(
+ 20, "GPIO20",
+ MTK_EINT_FUNCTION(0, 20),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "PERSTN"),
+ MTK_FUNCTION(2, "SCP_SCL1"),
+ MTK_FUNCTION(3, "MD32_0_JTAG_TMS"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TMS"),
+ MTK_FUNCTION(5, "SCL6")
+ ),
+ MTK_PIN(
+ 21, "GPIO21",
+ MTK_EINT_FUNCTION(0, 21),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "CLKREQN"),
+ MTK_FUNCTION(3, "MD32_0_JTAG_TDI"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TDI"),
+ MTK_FUNCTION(5, "SCP_SDA1")
+ ),
+ MTK_PIN(
+ 22, "GPIO22",
+ MTK_EINT_FUNCTION(0, 22),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(1, "CMMCLK0"),
+ MTK_FUNCTION(2, "PERSTN_1"),
+ MTK_FUNCTION(5, "SCP_SCL1"),
+ MTK_FUNCTION(7, "MD32_0_GPIO0")
+ ),
+ MTK_PIN(
+ 23, "GPIO23",
+ MTK_EINT_FUNCTION(0, 23),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(1, "CMMCLK1"),
+ MTK_FUNCTION(2, "CLKREQN_1"),
+ MTK_FUNCTION(3, "SDA4"),
+ MTK_FUNCTION(4, "DMIC1_CLK"),
+ MTK_FUNCTION(5, "SCP_SDA0"),
+ MTK_FUNCTION(7, "MD32_0_GPIO1")
+ ),
+ MTK_PIN(
+ 24, "GPIO24",
+ MTK_EINT_FUNCTION(0, 24),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "CMMCLK2"),
+ MTK_FUNCTION(2, "WAKEN_1"),
+ MTK_FUNCTION(3, "SCL4"),
+ MTK_FUNCTION(4, "DMIC1_DAT"),
+ MTK_FUNCTION(5, "SCP_SCL0"),
+ MTK_FUNCTION(6, "LVTS_26M"),
+ MTK_FUNCTION(7, "MD32_0_GPIO2")
+ ),
+ MTK_PIN(
+ 25, "GPIO25",
+ MTK_EINT_FUNCTION(0, 25),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "CMMRST"),
+ MTK_FUNCTION(2, "CMMCLK3"),
+ MTK_FUNCTION(3, "SPDIF_OUT"),
+ MTK_FUNCTION(4, "SDA6"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TRSTN"),
+ MTK_FUNCTION(6, "MD32_0_JTAG_TRST")
+ ),
+ MTK_PIN(
+ 26, "GPIO26",
+ MTK_EINT_FUNCTION(0, 26),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "CMMPDN"),
+ MTK_FUNCTION(2, "CMMCLK4"),
+ MTK_FUNCTION(3, "IR_IN"),
+ MTK_FUNCTION(4, "SCL6"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TDO"),
+ MTK_FUNCTION(6, "MD32_0_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 27, "GPIO27",
+ MTK_EINT_FUNCTION(0, 27),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "HDMIRX20_HTPLG"),
+ MTK_FUNCTION(2, "CMFLASH0"),
+ MTK_FUNCTION(3, "MD32_0_TXD"),
+ MTK_FUNCTION(4, "TP_UTXD2_AO"),
+ MTK_FUNCTION(5, "SCL7"),
+ MTK_FUNCTION(6, "UCTS2"),
+ MTK_FUNCTION(7, "DBG_MON_A18")
+ ),
+ MTK_PIN(
+ 28, "GPIO28",
+ MTK_EINT_FUNCTION(0, 28),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "HDMIRX20_PWR5V"),
+ MTK_FUNCTION(2, "CMFLASH1"),
+ MTK_FUNCTION(3, "MD32_0_RXD"),
+ MTK_FUNCTION(4, "TP_URXD2_AO"),
+ MTK_FUNCTION(5, "SDA7"),
+ MTK_FUNCTION(6, "URTS2"),
+ MTK_FUNCTION(7, "DBG_MON_A19")
+ ),
+ MTK_PIN(
+ 29, "GPIO29",
+ MTK_EINT_FUNCTION(0, 29),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "HDMIRX20_SCL"),
+ MTK_FUNCTION(2, "CMFLASH2"),
+ MTK_FUNCTION(3, "SCL5"),
+ MTK_FUNCTION(4, "TP_URTS2_AO"),
+ MTK_FUNCTION(6, "UTXD2"),
+ MTK_FUNCTION(7, "DBG_MON_A20")
+ ),
+ MTK_PIN(
+ 30, "GPIO30",
+ MTK_EINT_FUNCTION(0, 30),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "HDMIRX20_SDA"),
+ MTK_FUNCTION(2, "CMFLASH3"),
+ MTK_FUNCTION(3, "SDA5"),
+ MTK_FUNCTION(4, "TP_UCTS2_AO"),
+ MTK_FUNCTION(6, "URXD2"),
+ MTK_FUNCTION(7, "DBG_MON_A21")
+ ),
+ MTK_PIN(
+ 31, "GPIO31",
+ MTK_EINT_FUNCTION(0, 31),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "HDMITX20_PWR5V"),
+ MTK_FUNCTION(2, "DMIC1_DAT_R"),
+ MTK_FUNCTION(3, "PERSTN"),
+ MTK_FUNCTION(7, "DBG_MON_A22")
+ ),
+ MTK_PIN(
+ 32, "GPIO32",
+ MTK_EINT_FUNCTION(0, 32),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "HDMITX20_HTPLG"),
+ MTK_FUNCTION(3, "CLKREQN"),
+ MTK_FUNCTION(7, "DBG_MON_A23")
+ ),
+ MTK_PIN(
+ 33, "GPIO33",
+ MTK_EINT_FUNCTION(0, 33),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "HDMITX20_CEC"),
+ MTK_FUNCTION(2, "CMVREF0"),
+ MTK_FUNCTION(3, "WAKEN")
+ ),
+ MTK_PIN(
+ 34, "GPIO34",
+ MTK_EINT_FUNCTION(0, 34),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "HDMITX20_SCL"),
+ MTK_FUNCTION(2, "CMVREF1"),
+ MTK_FUNCTION(3, "SCL7"),
+ MTK_FUNCTION(4, "SCL6"),
+ MTK_FUNCTION(7, "DBG_MON_A24")
+ ),
+ MTK_PIN(
+ 35, "GPIO35",
+ MTK_EINT_FUNCTION(0, 35),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "HDMITX20_SDA"),
+ MTK_FUNCTION(2, "CMVREF2"),
+ MTK_FUNCTION(3, "SDA7"),
+ MTK_FUNCTION(4, "SDA6"),
+ MTK_FUNCTION(7, "DBG_MON_A25")
+ ),
+ MTK_PIN(
+ 36, "GPIO36",
+ MTK_EINT_FUNCTION(0, 36),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "RTC32K_CK"),
+ MTK_FUNCTION(7, "DBG_MON_A27")
+ ),
+ MTK_PIN(
+ 37, "GPIO37",
+ MTK_EINT_FUNCTION(0, 37),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "WATCHDOG"),
+ MTK_FUNCTION(7, "DBG_MON_A28")
+ ),
+ MTK_PIN(
+ 38, "GPIO38",
+ MTK_EINT_FUNCTION(0, 38),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(1, "SRCLKENA0"),
+ MTK_FUNCTION(7, "DBG_MON_A29")
+ ),
+ MTK_PIN(
+ 39, "GPIO39",
+ MTK_EINT_FUNCTION(0, 39),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "SRCLKENA1"),
+ MTK_FUNCTION(2, "DMIC2_DAT_R"),
+ MTK_FUNCTION(7, "DBG_MON_A30")
+ ),
+ MTK_PIN(
+ 40, "GPIO40",
+ MTK_EINT_FUNCTION(0, 40),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CSN"),
+ MTK_FUNCTION(3, "SPIM3_CSB"),
+ MTK_FUNCTION(7, "DBG_MON_A31")
+ ),
+ MTK_PIN(
+ 41, "GPIO41",
+ MTK_EINT_FUNCTION(0, 41),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CK"),
+ MTK_FUNCTION(3, "SPIM3_CLK"),
+ MTK_FUNCTION(7, "DBG_MON_A32")
+ ),
+ MTK_PIN(
+ 42, "GPIO42",
+ MTK_EINT_FUNCTION(0, 42),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MO"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MI"),
+ MTK_FUNCTION(3, "SPIM3_MO"),
+ MTK_FUNCTION(7, "DBG_MON_B0")
+ ),
+ MTK_PIN(
+ 43, "GPIO43",
+ MTK_EINT_FUNCTION(0, 43),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MI"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MO"),
+ MTK_FUNCTION(3, "SPIM3_MI"),
+ MTK_FUNCTION(7, "DBG_MON_B1")
+ ),
+ MTK_PIN(
+ 44, "GPIO44",
+ MTK_EINT_FUNCTION(0, 44),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "SPMI_M_SCL"),
+ MTK_FUNCTION(2, "I2SI00_DATA1"),
+ MTK_FUNCTION(3, "SCL5"),
+ MTK_FUNCTION(4, "UTXD5"),
+ MTK_FUNCTION(7, "DBG_MON_B2")
+ ),
+ MTK_PIN(
+ 45, "GPIO45",
+ MTK_EINT_FUNCTION(0, 45),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "SPMI_M_SDA"),
+ MTK_FUNCTION(2, "I2SI00_DATA2"),
+ MTK_FUNCTION(3, "SDA5"),
+ MTK_FUNCTION(4, "URXD5"),
+ MTK_FUNCTION(7, "DBG_MON_B3")
+ ),
+ MTK_PIN(
+ 46, "GPIO46",
+ MTK_EINT_FUNCTION(0, 46),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "I2SIN_MCK"),
+ MTK_FUNCTION(2, "I2SI00_DATA3"),
+ MTK_FUNCTION(3, "SPLIN_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B4")
+ ),
+ MTK_PIN(
+ 47, "GPIO47",
+ MTK_EINT_FUNCTION(0, 47),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "I2SIN_BCK"),
+ MTK_FUNCTION(2, "I2SIN0_BCK"),
+ MTK_FUNCTION(3, "SPLIN_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B5")
+ ),
+ MTK_PIN(
+ 48, "GPIO48",
+ MTK_EINT_FUNCTION(0, 48),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "I2SIN_WS"),
+ MTK_FUNCTION(2, "I2SIN0_LRCK"),
+ MTK_FUNCTION(3, "SPLIN_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B6")
+ ),
+ MTK_PIN(
+ 49, "GPIO49",
+ MTK_EINT_FUNCTION(0, 49),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "I2SIN_D0"),
+ MTK_FUNCTION(2, "I2SI00_DATA0"),
+ MTK_FUNCTION(3, "SPLIN_D0"),
+ MTK_FUNCTION(7, "DBG_MON_B7")
+ ),
+ MTK_PIN(
+ 50, "GPIO50",
+ MTK_EINT_FUNCTION(0, 50),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "I2SO1_MCK"),
+ MTK_FUNCTION(2, "I2SI5_D0"),
+ MTK_FUNCTION(4, "I2SO4_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B8")
+ ),
+ MTK_PIN(
+ 51, "GPIO51",
+ MTK_EINT_FUNCTION(0, 51),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "I2SO1_BCK"),
+ MTK_FUNCTION(2, "I2SI5_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B9")
+ ),
+ MTK_PIN(
+ 52, "GPIO52",
+ MTK_EINT_FUNCTION(0, 52),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "I2SO1_WS"),
+ MTK_FUNCTION(2, "I2SI5_WS"),
+ MTK_FUNCTION(7, "DBG_MON_B10")
+ ),
+ MTK_PIN(
+ 53, "GPIO53",
+ MTK_EINT_FUNCTION(0, 53),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "I2SO1_D0"),
+ MTK_FUNCTION(2, "I2SI5_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B11")
+ ),
+ MTK_PIN(
+ 54, "GPIO54",
+ MTK_EINT_FUNCTION(0, 54),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "I2SO1_D1"),
+ MTK_FUNCTION(2, "I2SI01_DATA1"),
+ MTK_FUNCTION(3, "SPLIN_D1"),
+ MTK_FUNCTION(4, "I2SO4_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B12")
+ ),
+ MTK_PIN(
+ 55, "GPIO55",
+ MTK_EINT_FUNCTION(0, 55),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "I2SO1_D2"),
+ MTK_FUNCTION(2, "I2SI01_DATA2"),
+ MTK_FUNCTION(3, "SPLIN_D2"),
+ MTK_FUNCTION(4, "I2SO4_WS"),
+ MTK_FUNCTION(7, "DBG_MON_B13")
+ ),
+ MTK_PIN(
+ 56, "GPIO56",
+ MTK_EINT_FUNCTION(0, 56),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "I2SO1_D3"),
+ MTK_FUNCTION(2, "I2SI01_DATA3"),
+ MTK_FUNCTION(3, "SPLIN_D3"),
+ MTK_FUNCTION(4, "I2SO4_D0"),
+ MTK_FUNCTION(7, "DBG_MON_B14")
+ ),
+ MTK_PIN(
+ 57, "GPIO57",
+ MTK_EINT_FUNCTION(0, 57),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "I2SO2_MCK"),
+ MTK_FUNCTION(2, "I2SO1_D12"),
+ MTK_FUNCTION(3, "LCM1_RST"),
+ MTK_FUNCTION(7, "DBG_MON_B15")
+ ),
+ MTK_PIN(
+ 58, "GPIO58",
+ MTK_EINT_FUNCTION(0, 58),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "I2SO2_BCK"),
+ MTK_FUNCTION(2, "I2SO1_D13"),
+ MTK_FUNCTION(3, "I2SIN1_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B16")
+ ),
+ MTK_PIN(
+ 59, "GPIO59",
+ MTK_EINT_FUNCTION(0, 59),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "I2SO2_WS"),
+ MTK_FUNCTION(2, "I2SO1_D14"),
+ MTK_FUNCTION(3, "I2SIN1_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B17")
+ ),
+ MTK_PIN(
+ 60, "GPIO60",
+ MTK_EINT_FUNCTION(0, 60),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "I2SO2_D0"),
+ MTK_FUNCTION(2, "I2SO1_D15"),
+ MTK_FUNCTION(3, "I2SI01_DATA0"),
+ MTK_FUNCTION(7, "DBG_MON_B18")
+ ),
+ MTK_PIN(
+ 61, "GPIO61",
+ MTK_EINT_FUNCTION(0, 61),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "DMIC1_CLK"),
+ MTK_FUNCTION(2, "I2SO2_BCK"),
+ MTK_FUNCTION(3, "SCP_SPI2_CK"),
+ MTK_FUNCTION(7, "DBG_MON_B19")
+ ),
+ MTK_PIN(
+ 62, "GPIO62",
+ MTK_EINT_FUNCTION(0, 62),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "DMIC1_DAT"),
+ MTK_FUNCTION(2, "I2SO2_WS"),
+ MTK_FUNCTION(3, "SCP_SPI2_MI"),
+ MTK_FUNCTION(7, "DBG_MON_B20")
+ ),
+ MTK_PIN(
+ 63, "GPIO63",
+ MTK_EINT_FUNCTION(0, 63),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "DMIC2_CLK"),
+ MTK_FUNCTION(2, "VBUSVALID"),
+ MTK_FUNCTION(3, "SCP_SPI2_MO"),
+ MTK_FUNCTION(4, "SCP_SCL2"),
+ MTK_FUNCTION(5, "SCP_JTAG1_TDO"),
+ MTK_FUNCTION(6, "JTDO_SEL1"),
+ MTK_FUNCTION(7, "DBG_MON_B21")
+ ),
+ MTK_PIN(
+ 64, "GPIO64",
+ MTK_EINT_FUNCTION(0, 64),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "DMIC2_DAT"),
+ MTK_FUNCTION(2, "VBUSVALID_1P"),
+ MTK_FUNCTION(3, "SCP_SPI2_CS"),
+ MTK_FUNCTION(4, "SCP_SDA2"),
+ MTK_FUNCTION(7, "DBG_MON_B22")
+ ),
+ MTK_PIN(
+ 65, "GPIO65",
+ MTK_EINT_FUNCTION(0, 65),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "PCM_DO"),
+ MTK_FUNCTION(2, "AUXIF_ST0"),
+ MTK_FUNCTION(3, "UCTS2"),
+ MTK_FUNCTION(5, "SCP_JTAG1_TMS"),
+ MTK_FUNCTION(6, "JTMS_SEL1"),
+ MTK_FUNCTION(7, "DBG_MON_B23")
+ ),
+ MTK_PIN(
+ 66, "GPIO66",
+ MTK_EINT_FUNCTION(0, 66),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "PCM_CLK"),
+ MTK_FUNCTION(2, "AUXIF_CLK0"),
+ MTK_FUNCTION(3, "URTS2"),
+ MTK_FUNCTION(5, "SCP_JTAG1_TCK"),
+ MTK_FUNCTION(6, "JTCK_SEL1"),
+ MTK_FUNCTION(7, "DBG_MON_B24")
+ ),
+ MTK_PIN(
+ 67, "GPIO67",
+ MTK_EINT_FUNCTION(0, 67),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "PCM_DI"),
+ MTK_FUNCTION(2, "AUXIF_ST1"),
+ MTK_FUNCTION(3, "UTXD2"),
+ MTK_FUNCTION(5, "SCP_JTAG1_TRSTN"),
+ MTK_FUNCTION(6, "JTRSTn_SEL1"),
+ MTK_FUNCTION(7, "DBG_MON_B25")
+ ),
+ MTK_PIN(
+ 68, "GPIO68",
+ MTK_EINT_FUNCTION(0, 68),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "PCM_SYNC"),
+ MTK_FUNCTION(2, "AUXIF_CLK1"),
+ MTK_FUNCTION(3, "URXD2"),
+ MTK_FUNCTION(5, "SCP_JTAG1_TDI"),
+ MTK_FUNCTION(6, "JTDI_SEL1"),
+ MTK_FUNCTION(7, "DBG_MON_B26")
+ ),
+ MTK_PIN(
+ 69, "GPIO69",
+ MTK_EINT_FUNCTION(0, 69),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "AUD_CLK_MOSI"),
+ MTK_FUNCTION(2, "I2SIN2_BCK"),
+ MTK_FUNCTION(3, "PWM_0"),
+ MTK_FUNCTION(4, "WAKEN"),
+ MTK_FUNCTION(7, "DBG_MON_B27")
+ ),
+ MTK_PIN(
+ 70, "GPIO70",
+ MTK_EINT_FUNCTION(0, 70),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "AUD_SYNC_MOSI"),
+ MTK_FUNCTION(2, "I2SIN2_LRCK"),
+ MTK_FUNCTION(3, "PWM_1"),
+ MTK_FUNCTION(4, "PERSTN"),
+ MTK_FUNCTION(7, "DBG_MON_B28")
+ ),
+ MTK_PIN(
+ 71, "GPIO71",
+ MTK_EINT_FUNCTION(0, 71),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI0"),
+ MTK_FUNCTION(2, "IDDIG_2P"),
+ MTK_FUNCTION(3, "PWM_2"),
+ MTK_FUNCTION(4, "CLKREQN"),
+ MTK_FUNCTION(7, "DBG_MON_B29")
+ ),
+ MTK_PIN(
+ 72, "GPIO72",
+ MTK_EINT_FUNCTION(0, 72),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI1"),
+ MTK_FUNCTION(2, "USB_DRVVBUS_2P"),
+ MTK_FUNCTION(3, "PWM_3"),
+ MTK_FUNCTION(4, "PERSTN_1"),
+ MTK_FUNCTION(7, "DBG_MON_B30")
+ ),
+ MTK_PIN(
+ 73, "GPIO73",
+ MTK_EINT_FUNCTION(0, 73),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO0"),
+ MTK_FUNCTION(2, "I2SI02_DATA0"),
+ MTK_FUNCTION(4, "CLKREQN_1"),
+ MTK_FUNCTION(5, "VOW_DAT_MISO"),
+ MTK_FUNCTION(7, "DBG_MON_B31")
+ ),
+ MTK_PIN(
+ 74, "GPIO74",
+ MTK_EINT_FUNCTION(0, 74),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO1"),
+ MTK_FUNCTION(2, "I2SI02_DATA1"),
+ MTK_FUNCTION(4, "WAKEN_1"),
+ MTK_FUNCTION(5, "VOW_CLK_MISO"),
+ MTK_FUNCTION(7, "DBG_MON_B32")
+ ),
+ MTK_PIN(
+ 75, "GPIO75",
+ MTK_EINT_FUNCTION(0, 75),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO2"),
+ MTK_FUNCTION(2, "I2SI02_DATA2")
+ ),
+ MTK_PIN(
+ 76, "GPIO76",
+ MTK_EINT_FUNCTION(0, 76),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "SCP_VREQ_VAO"),
+ MTK_FUNCTION(2, "I2SI02_DATA3"),
+ MTK_FUNCTION(7, "DBG_MON_A26")
+ ),
+ MTK_PIN(
+ 77, "GPIO77",
+ MTK_EINT_FUNCTION(0, 77),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "DGI_D0"),
+ MTK_FUNCTION(2, "DPI_D0"),
+ MTK_FUNCTION(3, "I2SI4_MCK"),
+ MTK_FUNCTION(4, "SPIM4_CLK"),
+ MTK_FUNCTION(5, "GBE_TXD3"),
+ MTK_FUNCTION(6, "SPM_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 78, "GPIO78",
+ MTK_EINT_FUNCTION(0, 78),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "DGI_D1"),
+ MTK_FUNCTION(2, "DPI_D1"),
+ MTK_FUNCTION(3, "I2SI4_BCK"),
+ MTK_FUNCTION(4, "SPIM4_MO"),
+ MTK_FUNCTION(5, "GBE_TXD2"),
+ MTK_FUNCTION(6, "SPM_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 79, "GPIO79",
+ MTK_EINT_FUNCTION(0, 79),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "DGI_D2"),
+ MTK_FUNCTION(2, "DPI_D2"),
+ MTK_FUNCTION(3, "I2SI4_WS"),
+ MTK_FUNCTION(4, "SPIM4_CSB"),
+ MTK_FUNCTION(5, "GBE_TXD1"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 80, "GPIO80",
+ MTK_EINT_FUNCTION(0, 80),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "DGI_D3"),
+ MTK_FUNCTION(2, "DPI_D3"),
+ MTK_FUNCTION(3, "I2SI4_D0"),
+ MTK_FUNCTION(4, "SPIM4_MI"),
+ MTK_FUNCTION(5, "GBE_TXD0"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 81, "GPIO81",
+ MTK_EINT_FUNCTION(0, 81),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "DGI_D4"),
+ MTK_FUNCTION(2, "DPI_D4"),
+ MTK_FUNCTION(3, "I2SI5_MCK"),
+ MTK_FUNCTION(4, "SPIM5_CLK"),
+ MTK_FUNCTION(5, "GBE_RXD3"),
+ MTK_FUNCTION(6, "SPM_JTAG_TRSTN")
+ ),
+ MTK_PIN(
+ 82, "GPIO82",
+ MTK_EINT_FUNCTION(0, 82),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "DGI_D5"),
+ MTK_FUNCTION(2, "DPI_D5"),
+ MTK_FUNCTION(3, "I2SI5_BCK"),
+ MTK_FUNCTION(4, "SPIM5_MO"),
+ MTK_FUNCTION(5, "GBE_RXD2"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 83, "GPIO83",
+ MTK_EINT_FUNCTION(0, 83),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "DGI_D6"),
+ MTK_FUNCTION(2, "DPI_D6"),
+ MTK_FUNCTION(3, "I2SI5_WS"),
+ MTK_FUNCTION(4, "SPIM5_CSB"),
+ MTK_FUNCTION(5, "GBE_RXD1"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 84, "GPIO84",
+ MTK_EINT_FUNCTION(0, 84),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "DGI_D7"),
+ MTK_FUNCTION(2, "DPI_D7"),
+ MTK_FUNCTION(3, "I2SI5_D0"),
+ MTK_FUNCTION(4, "SPIM5_MI"),
+ MTK_FUNCTION(5, "GBE_RXD0"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 85, "GPIO85",
+ MTK_EINT_FUNCTION(0, 85),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "DGI_D8"),
+ MTK_FUNCTION(2, "DPI_D8"),
+ MTK_FUNCTION(3, "I2SO4_MCK"),
+ MTK_FUNCTION(4, "SCP_SPI1_B_CK"),
+ MTK_FUNCTION(5, "GBE_TXC"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 86, "GPIO86",
+ MTK_EINT_FUNCTION(0, 86),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "DGI_D9"),
+ MTK_FUNCTION(2, "DPI_D9"),
+ MTK_FUNCTION(3, "I2SO4_BCK"),
+ MTK_FUNCTION(4, "SCP_SPI1_B_MI"),
+ MTK_FUNCTION(5, "GBE_RXC"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TRSTN")
+ ),
+ MTK_PIN(
+ 87, "GPIO87",
+ MTK_EINT_FUNCTION(0, 87),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(1, "DGI_D10"),
+ MTK_FUNCTION(2, "DPI_D10"),
+ MTK_FUNCTION(3, "I2SO4_WS"),
+ MTK_FUNCTION(4, "SCP_SPI1_B_CS"),
+ MTK_FUNCTION(5, "GBE_RXDV"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 88, "GPIO88",
+ MTK_EINT_FUNCTION(0, 88),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "DGI_D11"),
+ MTK_FUNCTION(2, "DPI_D11"),
+ MTK_FUNCTION(3, "I2SO4_D0"),
+ MTK_FUNCTION(4, "SCP_SPI1_B_MO"),
+ MTK_FUNCTION(5, "GBE_TXEN"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 89, "GPIO89",
+ MTK_EINT_FUNCTION(0, 89),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "DGI_D12"),
+ MTK_FUNCTION(2, "DPI_D12"),
+ MTK_FUNCTION(3, "MSDC2_CMD_A"),
+ MTK_FUNCTION(4, "I2SO5_BCK"),
+ MTK_FUNCTION(5, "GBE_MDC"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 90, "GPIO90",
+ MTK_EINT_FUNCTION(0, 90),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "DGI_D13"),
+ MTK_FUNCTION(2, "DPI_D13"),
+ MTK_FUNCTION(3, "MSDC2_CLK_A"),
+ MTK_FUNCTION(4, "I2SO5_WS"),
+ MTK_FUNCTION(5, "GBE_MDIO"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 91, "GPIO91",
+ MTK_EINT_FUNCTION(0, 91),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO91"),
+ MTK_FUNCTION(1, "DGI_D14"),
+ MTK_FUNCTION(2, "DPI_D14"),
+ MTK_FUNCTION(3, "MSDC2_DAT3_A"),
+ MTK_FUNCTION(4, "I2SO5_D0"),
+ MTK_FUNCTION(5, "GBE_TXER"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TRSTN")
+ ),
+ MTK_PIN(
+ 92, "GPIO92",
+ MTK_EINT_FUNCTION(0, 92),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO92"),
+ MTK_FUNCTION(1, "DGI_D15"),
+ MTK_FUNCTION(2, "DPI_D15"),
+ MTK_FUNCTION(3, "MSDC2_DAT0_A"),
+ MTK_FUNCTION(4, "I2SO2_D1"),
+ MTK_FUNCTION(5, "GBE_RXER"),
+ MTK_FUNCTION(6, "CCU0_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 93, "GPIO93",
+ MTK_EINT_FUNCTION(0, 93),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO93"),
+ MTK_FUNCTION(1, "DGI_HSYNC"),
+ MTK_FUNCTION(2, "DPI_HSYNC"),
+ MTK_FUNCTION(3, "MSDC2_DAT2_A"),
+ MTK_FUNCTION(4, "I2SO2_D2"),
+ MTK_FUNCTION(5, "GBE_COL"),
+ MTK_FUNCTION(6, "CCU0_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 94, "GPIO94",
+ MTK_EINT_FUNCTION(0, 94),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO94"),
+ MTK_FUNCTION(1, "DGI_VSYNC"),
+ MTK_FUNCTION(2, "DPI_VSYNC"),
+ MTK_FUNCTION(3, "MSDC2_DAT1_A"),
+ MTK_FUNCTION(4, "I2SO2_D3"),
+ MTK_FUNCTION(5, "GBE_INTR"),
+ MTK_FUNCTION(6, "CCU0_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 95, "GPIO95",
+ MTK_EINT_FUNCTION(0, 95),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "DGI_DE"),
+ MTK_FUNCTION(2, "DPI_DE"),
+ MTK_FUNCTION(3, "UTXD2"),
+ MTK_FUNCTION(5, "I2SIN_D1"),
+ MTK_FUNCTION(6, "CCU0_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 96, "GPIO96",
+ MTK_EINT_FUNCTION(0, 96),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "DGI_CK"),
+ MTK_FUNCTION(2, "DPI_CK"),
+ MTK_FUNCTION(3, "URXD2"),
+ MTK_FUNCTION(4, "I2SO5_MCK"),
+ MTK_FUNCTION(5, "I2SIN_D2"),
+ MTK_FUNCTION(6, "CCU0_JTAG_TRST")
+ ),
+ MTK_PIN(
+ 97, "GPIO97",
+ MTK_EINT_FUNCTION(0, 97),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "DISP_PWM0"),
+ MTK_FUNCTION(2, "DVFSRC_EXT_REQ")
+ ),
+ MTK_PIN(
+ 98, "GPIO98",
+ MTK_EINT_FUNCTION(0, 98),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "UTXD0")
+ ),
+ MTK_PIN(
+ 99, "GPIO99",
+ MTK_EINT_FUNCTION(0, 99),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "URXD0")
+ ),
+ MTK_PIN(
+ 100, "GPIO100",
+ MTK_EINT_FUNCTION(0, 100),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "URTS1"),
+ MTK_FUNCTION(2, "DSI_TE"),
+ MTK_FUNCTION(3, "I2SO1_D8"),
+ MTK_FUNCTION(4, "KPROW2"),
+ MTK_FUNCTION(5, "PWM_0"),
+ MTK_FUNCTION(6, "TP_URTS1_AO"),
+ MTK_FUNCTION(7, "I2SIN_D0")
+ ),
+ MTK_PIN(
+ 101, "GPIO101",
+ MTK_EINT_FUNCTION(0, 101),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "UCTS1"),
+ MTK_FUNCTION(2, "DSI1_TE"),
+ MTK_FUNCTION(3, "I2SO1_D9"),
+ MTK_FUNCTION(4, "KPCOL2"),
+ MTK_FUNCTION(5, "PWM_1"),
+ MTK_FUNCTION(6, "TP_UCTS1_AO"),
+ MTK_FUNCTION(7, "I2SIN_D1")
+ ),
+ MTK_PIN(
+ 102, "GPIO102",
+ MTK_EINT_FUNCTION(0, 102),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "VBUSVALID_2P"),
+ MTK_FUNCTION(3, "I2SO1_D10"),
+ MTK_FUNCTION(4, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(5, "TP_UTXD1_AO"),
+ MTK_FUNCTION(6, "MD32_1_TXD"),
+ MTK_FUNCTION(7, "I2SIN_D2")
+ ),
+ MTK_PIN(
+ 103, "GPIO103",
+ MTK_EINT_FUNCTION(0, 103),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "VBUSVALID_3P"),
+ MTK_FUNCTION(3, "I2SO1_D11"),
+ MTK_FUNCTION(4, "SSPM_URXD_AO"),
+ MTK_FUNCTION(5, "TP_URXD1_AO"),
+ MTK_FUNCTION(6, "MD32_1_RXD"),
+ MTK_FUNCTION(7, "I2SIN_D3")
+ ),
+ MTK_PIN(
+ 104, "GPIO104",
+ MTK_EINT_FUNCTION(0, 104),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "KPROW0"),
+ MTK_FUNCTION(2, "DISP_PWM1")
+ ),
+ MTK_PIN(
+ 105, "GPIO105",
+ MTK_EINT_FUNCTION(0, 105),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "KPROW1"),
+ MTK_FUNCTION(2, "EDP_TX_HPD"),
+ MTK_FUNCTION(3, "PWM_2")
+ ),
+ MTK_PIN(
+ 106, "GPIO106",
+ MTK_EINT_FUNCTION(0, 106),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "KPCOL0")
+ ),
+ MTK_PIN(
+ 107, "GPIO107",
+ MTK_EINT_FUNCTION(0, 107),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "KPCOL1"),
+ MTK_FUNCTION(2, "DSI1_TE"),
+ MTK_FUNCTION(3, "PWM_3"),
+ MTK_FUNCTION(4, "SCP_SCL3"),
+ MTK_FUNCTION(5, "I2SIN_MCK")
+ ),
+ MTK_PIN(
+ 108, "GPIO108",
+ MTK_EINT_FUNCTION(0, 108),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "LCM_RST"),
+ MTK_FUNCTION(2, "KPCOL1"),
+ MTK_FUNCTION(4, "SCP_SDA3"),
+ MTK_FUNCTION(5, "I2SIN_BCK")
+ ),
+ MTK_PIN(
+ 109, "GPIO109",
+ MTK_EINT_FUNCTION(0, 109),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "DSI_TE"),
+ MTK_FUNCTION(2, "I2SIN_D3"),
+ MTK_FUNCTION(5, "I2SIN_WS")
+ ),
+ MTK_PIN(
+ 110, "GPIO110",
+ MTK_EINT_FUNCTION(0, 110),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(2, "JTMS_SEL3"),
+ MTK_FUNCTION(3, "UDI_TMS"),
+ MTK_FUNCTION(5, "CCU1_JTAG_TMS"),
+ MTK_FUNCTION(6, "IPU_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 111, "GPIO111",
+ MTK_EINT_FUNCTION(0, 111),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(2, "JTCK_SEL3"),
+ MTK_FUNCTION(3, "UDI_TCK"),
+ MTK_FUNCTION(5, "CCU1_JTAG_TCK"),
+ MTK_FUNCTION(6, "IPU_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 112, "GPIO112",
+ MTK_EINT_FUNCTION(0, 112),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(2, "JTDI_SEL3"),
+ MTK_FUNCTION(3, "UDI_TDI"),
+ MTK_FUNCTION(4, "I2SO2_D0"),
+ MTK_FUNCTION(5, "CCU1_JTAG_TDI"),
+ MTK_FUNCTION(6, "IPU_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 113, "GPIO113",
+ MTK_EINT_FUNCTION(0, 113),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(2, "JTDO_SEL3"),
+ MTK_FUNCTION(3, "UDI_TDO"),
+ MTK_FUNCTION(4, "I2SO2_D1"),
+ MTK_FUNCTION(5, "CCU1_JTAG_TDO"),
+ MTK_FUNCTION(6, "IPU_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 114, "GPIO114",
+ MTK_EINT_FUNCTION(0, 114),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(2, "JTRSTn_SEL3"),
+ MTK_FUNCTION(3, "UDI_NTRST"),
+ MTK_FUNCTION(4, "I2SO2_D2"),
+ MTK_FUNCTION(5, "CCU1_JTAG_TRST"),
+ MTK_FUNCTION(6, "IPU_JTAG_TRST")
+ ),
+ MTK_PIN(
+ 115, "GPIO115",
+ MTK_EINT_FUNCTION(0, 115),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(4, "I2SO2_D3"),
+ MTK_FUNCTION(6, "MD32_1_GPIO2")
+ ),
+ MTK_PIN(
+ 116, "GPIO116",
+ MTK_EINT_FUNCTION(0, 116),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "MSDC0_DAT7")
+ ),
+ MTK_PIN(
+ 117, "GPIO117",
+ MTK_EINT_FUNCTION(0, 117),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "MSDC0_DAT6")
+ ),
+ MTK_PIN(
+ 118, "GPIO118",
+ MTK_EINT_FUNCTION(0, 118),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "MSDC0_DAT5")
+ ),
+ MTK_PIN(
+ 119, "GPIO119",
+ MTK_EINT_FUNCTION(0, 119),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "MSDC0_DAT4")
+ ),
+ MTK_PIN(
+ 120, "GPIO120",
+ MTK_EINT_FUNCTION(0, 120),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "MSDC0_RSTB")
+ ),
+ MTK_PIN(
+ 121, "GPIO121",
+ MTK_EINT_FUNCTION(0, 121),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "MSDC0_CMD")
+ ),
+ MTK_PIN(
+ 122, "GPIO122",
+ MTK_EINT_FUNCTION(0, 122),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "MSDC0_CLK")
+ ),
+ MTK_PIN(
+ 123, "GPIO123",
+ MTK_EINT_FUNCTION(0, 123),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "MSDC0_DAT3")
+ ),
+ MTK_PIN(
+ 124, "GPIO124",
+ MTK_EINT_FUNCTION(0, 124),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "MSDC0_DAT2")
+ ),
+ MTK_PIN(
+ 125, "GPIO125",
+ MTK_EINT_FUNCTION(0, 125),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "MSDC0_DAT1")
+ ),
+ MTK_PIN(
+ 126, "GPIO126",
+ MTK_EINT_FUNCTION(0, 126),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "MSDC0_DAT0")
+ ),
+ MTK_PIN(
+ 127, "GPIO127",
+ MTK_EINT_FUNCTION(0, 127),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO127"),
+ MTK_FUNCTION(1, "MSDC0_DSL")
+ ),
+ MTK_PIN(
+ 128, "GPIO128",
+ MTK_EINT_FUNCTION(0, 128),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO128"),
+ MTK_FUNCTION(1, "IDDIG"),
+ MTK_FUNCTION(2, "UCTS2"),
+ MTK_FUNCTION(3, "UTXD5"),
+ MTK_FUNCTION(4, "UFS_MPHY_SCL"),
+ MTK_FUNCTION(5, "mbistreaden_trigger"),
+ MTK_FUNCTION(6, "MD32_1_GPIO0"),
+ MTK_FUNCTION(7, "SCP_SCL2")
+ ),
+ MTK_PIN(
+ 129, "GPIO129",
+ MTK_EINT_FUNCTION(0, 129),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO129"),
+ MTK_FUNCTION(1, "USB_DRVVBUS"),
+ MTK_FUNCTION(2, "URTS2"),
+ MTK_FUNCTION(3, "URXD5"),
+ MTK_FUNCTION(4, "UFS_MPHY_SDA"),
+ MTK_FUNCTION(5, "mbistwriteen_trigger"),
+ MTK_FUNCTION(6, "MD32_1_GPIO1"),
+ MTK_FUNCTION(7, "SCP_SDA2")
+ ),
+ MTK_PIN(
+ 130, "GPIO130",
+ MTK_EINT_FUNCTION(0, 130),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO130"),
+ MTK_FUNCTION(1, "IDDIG_1P"),
+ MTK_FUNCTION(2, "SPINOR_IO2"),
+ MTK_FUNCTION(3, "SNFI_WP"),
+ MTK_FUNCTION(4, "VPU_UDI_NTRST")
+ ),
+ MTK_PIN(
+ 131, "GPIO131",
+ MTK_EINT_FUNCTION(0, 131),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO131"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_1P"),
+ MTK_FUNCTION(2, "SPINOR_IO3"),
+ MTK_FUNCTION(3, "SNFI_HOLD"),
+ MTK_FUNCTION(4, "MD32_1_JTAG_TRST"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TRSTN"),
+ MTK_FUNCTION(6, "APU_JTAG_TRST")
+ ),
+ MTK_PIN(
+ 132, "GPIO132",
+ MTK_EINT_FUNCTION(0, 132),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO132"),
+ MTK_FUNCTION(1, "SPIM0_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI0_CS"),
+ MTK_FUNCTION(3, "SPIS0_CSB"),
+ MTK_FUNCTION(4, "VPU_UDI_TMS"),
+ MTK_FUNCTION(6, "I2SO5_D0")
+ ),
+ MTK_PIN(
+ 133, "GPIO133",
+ MTK_EINT_FUNCTION(0, 133),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO133"),
+ MTK_FUNCTION(1, "SPIM0_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI0_CK"),
+ MTK_FUNCTION(3, "SPIS0_CLK"),
+ MTK_FUNCTION(4, "VPU_UDI_TCK"),
+ MTK_FUNCTION(6, "I2SO5_BCK")
+ ),
+ MTK_PIN(
+ 134, "GPIO134",
+ MTK_EINT_FUNCTION(0, 134),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO134"),
+ MTK_FUNCTION(1, "SPIM0_MO"),
+ MTK_FUNCTION(2, "SCP_SPI0_MO"),
+ MTK_FUNCTION(3, "SPIS0_SI"),
+ MTK_FUNCTION(4, "VPU_UDI_TDO"),
+ MTK_FUNCTION(6, "I2SO5_WS")
+ ),
+ MTK_PIN(
+ 135, "GPIO135",
+ MTK_EINT_FUNCTION(0, 135),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO135"),
+ MTK_FUNCTION(1, "SPIM0_MI"),
+ MTK_FUNCTION(2, "SCP_SPI0_MI"),
+ MTK_FUNCTION(3, "SPIS0_SO"),
+ MTK_FUNCTION(4, "VPU_UDI_TDI"),
+ MTK_FUNCTION(6, "I2SO5_MCK")
+ ),
+ MTK_PIN(
+ 136, "GPIO136",
+ MTK_EINT_FUNCTION(0, 136),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO136"),
+ MTK_FUNCTION(1, "SPIM1_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_CS"),
+ MTK_FUNCTION(3, "SPIS1_CSB"),
+ MTK_FUNCTION(4, "MD32_1_JTAG_TMS"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TMS"),
+ MTK_FUNCTION(6, "APU_JTAG_TMS"),
+ MTK_FUNCTION(7, "DBG_MON_A15")
+ ),
+ MTK_PIN(
+ 137, "GPIO137",
+ MTK_EINT_FUNCTION(0, 137),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO137"),
+ MTK_FUNCTION(1, "SPIM1_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_CK"),
+ MTK_FUNCTION(3, "SPIS1_CLK"),
+ MTK_FUNCTION(4, "MD32_1_JTAG_TCK"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TCK"),
+ MTK_FUNCTION(6, "APU_JTAG_TCK"),
+ MTK_FUNCTION(7, "DBG_MON_A14")
+ ),
+ MTK_PIN(
+ 138, "GPIO138",
+ MTK_EINT_FUNCTION(0, 138),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO138"),
+ MTK_FUNCTION(1, "SPIM1_MO"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_MO"),
+ MTK_FUNCTION(3, "SPIS1_SI"),
+ MTK_FUNCTION(4, "MD32_1_JTAG_TDO"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TDO"),
+ MTK_FUNCTION(6, "APU_JTAG_TDO"),
+ MTK_FUNCTION(7, "DBG_MON_A16")
+ ),
+ MTK_PIN(
+ 139, "GPIO139",
+ MTK_EINT_FUNCTION(0, 139),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO139"),
+ MTK_FUNCTION(1, "SPIM1_MI"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_MI"),
+ MTK_FUNCTION(3, "SPIS1_SO"),
+ MTK_FUNCTION(4, "MD32_1_JTAG_TDI"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TDI"),
+ MTK_FUNCTION(6, "APU_JTAG_TDI"),
+ MTK_FUNCTION(7, "DBG_MON_A17")
+ ),
+ MTK_PIN(
+ 140, "GPIO140",
+ MTK_EINT_FUNCTION(0, 140),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO140"),
+ MTK_FUNCTION(1, "SPIM2_CSB"),
+ MTK_FUNCTION(2, "SPINOR_CS"),
+ MTK_FUNCTION(3, "SNFI_CS"),
+ MTK_FUNCTION(4, "DMIC3_DAT"),
+ MTK_FUNCTION(7, "DBG_MON_A11")
+ ),
+ MTK_PIN(
+ 141, "GPIO141",
+ MTK_EINT_FUNCTION(0, 141),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO141"),
+ MTK_FUNCTION(1, "SPIM2_CLK"),
+ MTK_FUNCTION(2, "SPINOR_CK"),
+ MTK_FUNCTION(3, "SNFI_CLK"),
+ MTK_FUNCTION(4, "DMIC3_CLK"),
+ MTK_FUNCTION(7, "DBG_MON_A10")
+ ),
+ MTK_PIN(
+ 142, "GPIO142",
+ MTK_EINT_FUNCTION(0, 142),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO142"),
+ MTK_FUNCTION(1, "SPIM2_MO"),
+ MTK_FUNCTION(2, "SPINOR_IO0"),
+ MTK_FUNCTION(3, "SNFI_MOSI"),
+ MTK_FUNCTION(4, "DMIC4_DAT"),
+ MTK_FUNCTION(7, "DBG_MON_A12")
+ ),
+ MTK_PIN(
+ 143, "GPIO143",
+ MTK_EINT_FUNCTION(0, 143),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO143"),
+ MTK_FUNCTION(1, "SPIM2_MI"),
+ MTK_FUNCTION(2, "SPINOR_IO1"),
+ MTK_FUNCTION(3, "SNFI_MISO"),
+ MTK_FUNCTION(4, "DMIC4_CLK"),
+ MTK_FUNCTION(7, "DBG_MON_A13")
+ ),
+ MTK_PIN(
+ 144, "GPIO144",
+ MTK_EINT_FUNCTION(0, 144),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 145, "GPIO145",
+ MTK_EINT_FUNCTION(0, 145),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 146, "GPIO146",
+ MTK_EINT_FUNCTION(0, 146),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 147, "GPIO147",
+ MTK_EINT_FUNCTION(0, 147),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 148, "GPIO148",
+ MTK_EINT_FUNCTION(0, 148),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 149, "GPIO149",
+ MTK_EINT_FUNCTION(0, 149),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 150, "GPIO150",
+ MTK_EINT_FUNCTION(0, 150),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 151, "GPIO151",
+ MTK_EINT_FUNCTION(0, 151),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 152, "GPIO152",
+ MTK_EINT_FUNCTION(0, 152),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 153, "GPIO153",
+ MTK_EINT_FUNCTION(0, 153),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 154, "GPIO154",
+ MTK_EINT_FUNCTION(0, 154),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 155, "GPIO155",
+ MTK_EINT_FUNCTION(0, 155),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 156, "GPIO156",
+ MTK_EINT_FUNCTION(0, 216),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 157, "GPIO157",
+ MTK_EINT_FUNCTION(0, 217),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 158, "GPIO158",
+ MTK_EINT_FUNCTION(0, 218),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 159, "GPIO159",
+ MTK_EINT_FUNCTION(0, 219),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 160, "GPIO160",
+ MTK_EINT_FUNCTION(0, 220),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 161, "GPIO161",
+ MTK_EINT_FUNCTION(0, 221),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 162, "GPIO162",
+ MTK_EINT_FUNCTION(0, 222),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 163, "GPIO163",
+ MTK_EINT_FUNCTION(0, 223),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 164, "GPIO164",
+ MTK_EINT_FUNCTION(0, 224),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ )
+};
+
+#endif /* __PINCTRL_MTK_MT8195_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index da1f19288aa6..85db2e4377f0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -970,6 +970,8 @@ int mtk_paris_pinctrl_probe(struct platform_device *pdev,
hw->nbase = hw->soc->nbase_names;
+ spin_lock_init(&hw->lock);
+
err = mtk_pctrl_build_state(pdev);
if (err) {
dev_err(&pdev->dev, "build state failed: %d\n", err);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
index 17491b27e487..8ba8f3e9121f 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
@@ -519,13 +519,13 @@ static struct mvebu_mpp_mode armada_cp110_mpp_modes[] = {
MPP_FUNCTION(4, "synce1", "clk"),
MPP_FUNCTION(8, "led", "data"),
MPP_FUNCTION(10, "sdio", "hw_rst"),
- MPP_FUNCTION(11, "sdio", "wr_protect")),
+ MPP_FUNCTION(11, "sdio_wp", "wr_protect")),
MPP_MODE(55,
MPP_FUNCTION(0, "gpio", NULL),
MPP_FUNCTION(1, "ge1", "rxctl_rxdv"),
MPP_FUNCTION(3, "ptp", "pulse"),
MPP_FUNCTION(10, "sdio", "led"),
- MPP_FUNCTION(11, "sdio", "card_detect")),
+ MPP_FUNCTION(11, "sdio_cd", "card_detect")),
MPP_MODE(56,
MPP_FUNCTION(0, "gpio", NULL),
MPP_FUNCTION(4, "tdm", "drx"),
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 1e225d513988..22e8d4c4040e 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -43,7 +43,7 @@ static const struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_INPUT_ENABLE, "input enabled", NULL, false),
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL, false),
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false),
- PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode", true),
+ PCONFDUMP(PIN_CONFIG_MODE_LOW_POWER, "pin low power", "mode", true),
PCONFDUMP(PIN_CONFIG_OUTPUT_ENABLE, "output enabled", NULL, false),
PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true),
PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
@@ -174,8 +174,8 @@ static const struct pinconf_generic_params dt_params[] = {
{ "input-schmitt", PIN_CONFIG_INPUT_SCHMITT, 0 },
{ "input-schmitt-disable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 0 },
{ "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
- { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 },
- { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
+ { "low-power-disable", PIN_CONFIG_MODE_LOW_POWER, 0 },
+ { "low-power-enable", PIN_CONFIG_MODE_LOW_POWER, 1 },
{ "output-disable", PIN_CONFIG_OUTPUT_ENABLE, 0 },
{ "output-enable", PIN_CONFIG_OUTPUT_ENABLE, 1 },
{ "output-high", PIN_CONFIG_OUTPUT, 1, },
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 02c075cc010b..d9d54065472e 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -370,9 +370,9 @@ DEFINE_SHOW_ATTRIBUTE(pinconf_groups);
void pinconf_init_device_debugfs(struct dentry *devroot,
struct pinctrl_dev *pctldev)
{
- debugfs_create_file("pinconf-pins", S_IFREG | S_IRUGO,
+ debugfs_create_file("pinconf-pins", 0444,
devroot, pctldev, &pinconf_pins_fops);
- debugfs_create_file("pinconf-groups", S_IFREG | S_IRUGO,
+ debugfs_create_file("pinconf-groups", 0444,
devroot, pctldev, &pinconf_groups_fops);
}
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index e71ebccc479c..03c32b2c5d30 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -801,6 +801,10 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
conf = atmel_pin_config_read(pctldev, pin_id);
+ /* Keep slew rate enabled by default. */
+ if (atmel_pioctrl->slew_rate_support)
+ conf |= ATMEL_PIO_SR_MASK;
+
for (i = 0; i < num_configs; i++) {
unsigned int param = pinconf_to_config_param(configs[i]);
unsigned int arg = pinconf_to_config_argument(configs[i]);
@@ -808,10 +812,6 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
dev_dbg(pctldev->dev, "%s: pin=%u, config=0x%lx\n",
__func__, pin_id, configs[i]);
- /* Keep slew rate enabled by default. */
- if (atmel_pioctrl->slew_rate_support)
- conf |= ATMEL_PIO_SR_MASK;
-
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
conf &= (~ATMEL_PIO_PUEN_MASK);
diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
index 067271b7d35a..a194d8089b6f 100644
--- a/drivers/pinctrl/pinctrl-equilibrium.c
+++ b/drivers/pinctrl/pinctrl-equilibrium.c
@@ -628,7 +628,8 @@ static int funcs_utils(struct device *dev, struct eqbr_pmx_func *funcs,
break;
default:
- return -EINVAL;
+ of_node_put(np);
+ return -EINVAL;
}
i++;
}
@@ -707,34 +708,42 @@ static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
group.num_pins = of_property_count_u32_elems(np, "pins");
if (group.num_pins < 0) {
dev_err(dev, "No pins in the group: %s\n", prop->name);
+ of_node_put(np);
return -EINVAL;
}
group.name = prop->value;
group.pins = devm_kcalloc(dev, group.num_pins,
sizeof(*(group.pins)), GFP_KERNEL);
- if (!group.pins)
+ if (!group.pins) {
+ of_node_put(np);
return -ENOMEM;
+ }
pinmux = devm_kcalloc(dev, group.num_pins, sizeof(*pinmux),
GFP_KERNEL);
- if (!pinmux)
+ if (!pinmux) {
+ of_node_put(np);
return -ENOMEM;
+ }
for (j = 0; j < group.num_pins; j++) {
if (of_property_read_u32_index(np, "pins", j, &pin_id)) {
dev_err(dev, "Group %s: Read intel pins id failed\n",
group.name);
+ of_node_put(np);
return -EINVAL;
}
if (pin_id >= drvdata->pctl_desc.npins) {
dev_err(dev, "Group %s: Invalid pin ID, idx: %d, pin %u\n",
group.name, j, pin_id);
+ of_node_put(np);
return -EINVAL;
}
group.pins[j] = pin_id;
if (of_property_read_u32_index(np, "pinmux", j, &pinmux_id)) {
dev_err(dev, "Group %s: Read intel pinmux id failed\n",
group.name);
+ of_node_put(np);
return -EINVAL;
}
pinmux[j] = pinmux_id;
@@ -745,6 +754,7 @@ static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
pinmux);
if (err < 0) {
dev_err(dev, "Failed to register group %s\n", group.name);
+ of_node_put(np);
return err;
}
memset(&group, 0, sizeof(group));
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index f2746125b077..651a36b9dcc0 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -3,8 +3,8 @@
* Ingenic SoCs pinctrl driver
*
* Copyright (c) 2017 Paul Cercueil <paul@crapouillou.net>
- * Copyright (c) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
* Copyright (c) 2017, 2019 Paul Boddie <paul@boddie.org.uk>
+ * Copyright (c) 2019, 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
*/
#include <linux/compiler.h>
@@ -26,47 +26,83 @@
#include "pinconf.h"
#include "pinmux.h"
-#define GPIO_PIN 0x00
-#define GPIO_MSK 0x20
+#define GPIO_PIN 0x00
+#define GPIO_MSK 0x20
+
+#define JZ4730_GPIO_DATA 0x00
+#define JZ4730_GPIO_GPDIR 0x04
+#define JZ4730_GPIO_GPPUR 0x0c
+#define JZ4730_GPIO_GPALR 0x10
+#define JZ4730_GPIO_GPAUR 0x14
+#define JZ4730_GPIO_GPIDLR 0x18
+#define JZ4730_GPIO_GPIDUR 0x1c
+#define JZ4730_GPIO_GPIER 0x20
+#define JZ4730_GPIO_GPIMR 0x24
+#define JZ4730_GPIO_GPFR 0x28
+
+#define JZ4740_GPIO_DATA 0x10
+#define JZ4740_GPIO_PULL_DIS 0x30
+#define JZ4740_GPIO_FUNC 0x40
+#define JZ4740_GPIO_SELECT 0x50
+#define JZ4740_GPIO_DIR 0x60
+#define JZ4740_GPIO_TRIG 0x70
+#define JZ4740_GPIO_FLAG 0x80
+
+#define JZ4770_GPIO_INT 0x10
+#define JZ4770_GPIO_PAT1 0x30
+#define JZ4770_GPIO_PAT0 0x40
+#define JZ4770_GPIO_FLAG 0x50
+#define JZ4770_GPIO_PEN 0x70
+
+#define X1830_GPIO_PEL 0x110
+#define X1830_GPIO_PEH 0x120
+#define X1830_GPIO_SR 0x150
+#define X1830_GPIO_SMT 0x160
+
+#define X2000_GPIO_EDG 0x70
+#define X2000_GPIO_PEPU 0x80
+#define X2000_GPIO_PEPD 0x90
+#define X2000_GPIO_SR 0xd0
+#define X2000_GPIO_SMT 0xe0
+
+#define REG_SET(x) ((x) + 0x4)
+#define REG_CLEAR(x) ((x) + 0x8)
+
+#define REG_PZ_BASE(x) ((x) * 7)
+#define REG_PZ_GID2LD(x) ((x) * 7 + 0xf0)
+
+#define GPIO_PULL_DIS 0
+#define GPIO_PULL_UP 1
+#define GPIO_PULL_DOWN 2
+
+#define PINS_PER_GPIO_CHIP 32
+#define JZ4730_PINS_PER_PAIRED_REG 16
-#define JZ4740_GPIO_DATA 0x10
-#define JZ4740_GPIO_PULL_DIS 0x30
-#define JZ4740_GPIO_FUNC 0x40
-#define JZ4740_GPIO_SELECT 0x50
-#define JZ4740_GPIO_DIR 0x60
-#define JZ4740_GPIO_TRIG 0x70
-#define JZ4740_GPIO_FLAG 0x80
-
-#define JZ4770_GPIO_INT 0x10
-#define JZ4770_GPIO_PAT1 0x30
-#define JZ4770_GPIO_PAT0 0x40
-#define JZ4770_GPIO_FLAG 0x50
-#define JZ4770_GPIO_PEN 0x70
-
-#define X1830_GPIO_PEL 0x110
-#define X1830_GPIO_PEH 0x120
-
-#define REG_SET(x) ((x) + 0x4)
-#define REG_CLEAR(x) ((x) + 0x8)
-
-#define REG_PZ_BASE(x) ((x) * 7)
-#define REG_PZ_GID2LD(x) ((x) * 7 + 0xf0)
-
-#define GPIO_PULL_DIS 0
-#define GPIO_PULL_UP 1
-#define GPIO_PULL_DOWN 2
+#define INGENIC_PIN_GROUP_FUNCS(name, id, funcs) \
+ { \
+ name, \
+ id##_pins, \
+ ARRAY_SIZE(id##_pins), \
+ funcs, \
+ }
-#define PINS_PER_GPIO_CHIP 32
+#define INGENIC_PIN_GROUP(name, id, func) \
+ INGENIC_PIN_GROUP_FUNCS(name, id, (void *)(func))
enum jz_version {
+ ID_JZ4730,
ID_JZ4740,
ID_JZ4725B,
+ ID_JZ4750,
+ ID_JZ4755,
ID_JZ4760,
ID_JZ4770,
+ ID_JZ4775,
ID_JZ4780,
ID_X1000,
ID_X1500,
ID_X1830,
+ ID_X2000,
};
struct ingenic_chip_info {
@@ -99,6 +135,99 @@ struct ingenic_gpio_chip {
unsigned int irq, reg_base;
};
+static const u32 jz4730_pull_ups[4] = {
+ 0x3fa3320f, 0xf200ffff, 0xffffffff, 0xffffffff,
+};
+
+static const u32 jz4730_pull_downs[4] = {
+ 0x00000df0, 0x0dff0000, 0x00000000, 0x00000000,
+};
+
+static int jz4730_mmc_1bit_pins[] = { 0x27, 0x26, 0x22, };
+static int jz4730_mmc_4bit_pins[] = { 0x23, 0x24, 0x25, };
+static int jz4730_uart0_data_pins[] = { 0x7e, 0x7f, };
+static int jz4730_uart1_data_pins[] = { 0x18, 0x19, };
+static int jz4730_uart2_data_pins[] = { 0x6f, 0x7d, };
+static int jz4730_uart3_data_pins[] = { 0x10, 0x15, };
+static int jz4730_uart3_hwflow_pins[] = { 0x11, 0x17, };
+static int jz4730_lcd_8bit_pins[] = {
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x3a, 0x39, 0x38,
+};
+static int jz4730_lcd_16bit_pins[] = {
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+};
+static int jz4730_lcd_special_pins[] = { 0x3d, 0x3c, 0x3e, 0x3f, };
+static int jz4730_lcd_generic_pins[] = { 0x3b, };
+static int jz4730_nand_cs1_pins[] = { 0x53, };
+static int jz4730_nand_cs2_pins[] = { 0x54, };
+static int jz4730_nand_cs3_pins[] = { 0x55, };
+static int jz4730_nand_cs4_pins[] = { 0x56, };
+static int jz4730_nand_cs5_pins[] = { 0x57, };
+static int jz4730_pwm_pwm0_pins[] = { 0x5e, };
+static int jz4730_pwm_pwm1_pins[] = { 0x5f, };
+
+static u8 jz4730_lcd_8bit_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, };
+
+static const struct group_desc jz4730_groups[] = {
+ INGENIC_PIN_GROUP("mmc-1bit", jz4730_mmc_1bit, 1),
+ INGENIC_PIN_GROUP("mmc-4bit", jz4730_mmc_4bit, 1),
+ INGENIC_PIN_GROUP("uart0-data", jz4730_uart0_data, 1),
+ INGENIC_PIN_GROUP("uart1-data", jz4730_uart1_data, 1),
+ INGENIC_PIN_GROUP("uart2-data", jz4730_uart2_data, 1),
+ INGENIC_PIN_GROUP("uart3-data", jz4730_uart3_data, 1),
+ INGENIC_PIN_GROUP("uart3-hwflow", jz4730_uart3_hwflow, 1),
+ INGENIC_PIN_GROUP_FUNCS("lcd-8bit", jz4730_lcd_8bit, jz4730_lcd_8bit_funcs),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4730_lcd_16bit, 1),
+ INGENIC_PIN_GROUP("lcd-special", jz4730_lcd_special, 1),
+ INGENIC_PIN_GROUP("lcd-generic", jz4730_lcd_generic, 1),
+ INGENIC_PIN_GROUP("nand-cs1", jz4730_nand_cs1, 1),
+ INGENIC_PIN_GROUP("nand-cs2", jz4730_nand_cs2, 1),
+ INGENIC_PIN_GROUP("nand-cs3", jz4730_nand_cs3, 1),
+ INGENIC_PIN_GROUP("nand-cs4", jz4730_nand_cs4, 1),
+ INGENIC_PIN_GROUP("nand-cs5", jz4730_nand_cs5, 1),
+ INGENIC_PIN_GROUP("pwm0", jz4730_pwm_pwm0, 1),
+ INGENIC_PIN_GROUP("pwm1", jz4730_pwm_pwm1, 1),
+};
+
+static const char *jz4730_mmc_groups[] = { "mmc-1bit", "mmc-4bit", };
+static const char *jz4730_uart0_groups[] = { "uart0-data", };
+static const char *jz4730_uart1_groups[] = { "uart1-data", };
+static const char *jz4730_uart2_groups[] = { "uart2-data", };
+static const char *jz4730_uart3_groups[] = { "uart3-data", "uart3-hwflow", };
+static const char *jz4730_lcd_groups[] = {
+ "lcd-8bit", "lcd-16bit", "lcd-special", "lcd-generic",
+};
+static const char *jz4730_nand_groups[] = {
+ "nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4", "nand-cs5",
+};
+static const char *jz4730_pwm0_groups[] = { "pwm0", };
+static const char *jz4730_pwm1_groups[] = { "pwm1", };
+
+static const struct function_desc jz4730_functions[] = {
+ { "mmc", jz4730_mmc_groups, ARRAY_SIZE(jz4730_mmc_groups), },
+ { "uart0", jz4730_uart0_groups, ARRAY_SIZE(jz4730_uart0_groups), },
+ { "uart1", jz4730_uart1_groups, ARRAY_SIZE(jz4730_uart1_groups), },
+ { "uart2", jz4730_uart2_groups, ARRAY_SIZE(jz4730_uart2_groups), },
+ { "uart3", jz4730_uart3_groups, ARRAY_SIZE(jz4730_uart3_groups), },
+ { "lcd", jz4730_lcd_groups, ARRAY_SIZE(jz4730_lcd_groups), },
+ { "nand", jz4730_nand_groups, ARRAY_SIZE(jz4730_nand_groups), },
+ { "pwm0", jz4730_pwm0_groups, ARRAY_SIZE(jz4730_pwm0_groups), },
+ { "pwm1", jz4730_pwm1_groups, ARRAY_SIZE(jz4730_pwm1_groups), },
+};
+
+static const struct ingenic_chip_info jz4730_chip_info = {
+ .num_chips = 4,
+ .reg_offset = 0x30,
+ .version = ID_JZ4730,
+ .groups = jz4730_groups,
+ .num_groups = ARRAY_SIZE(jz4730_groups),
+ .functions = jz4730_functions,
+ .num_functions = ARRAY_SIZE(jz4730_functions),
+ .pull_ups = jz4730_pull_ups,
+ .pull_downs = jz4730_pull_downs,
+};
+
static const u32 jz4740_pull_ups[4] = {
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
};
@@ -113,13 +242,15 @@ static int jz4740_uart0_data_pins[] = { 0x7a, 0x79, };
static int jz4740_uart0_hwflow_pins[] = { 0x7e, 0x7f, };
static int jz4740_uart1_data_pins[] = { 0x7e, 0x7f, };
static int jz4740_lcd_8bit_pins[] = {
- 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x52, 0x53, 0x54,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x52, 0x53, 0x54,
};
static int jz4740_lcd_16bit_pins[] = {
- 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x55,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
};
static int jz4740_lcd_18bit_pins[] = { 0x50, 0x51, };
-static int jz4740_lcd_18bit_tft_pins[] = { 0x56, 0x57, 0x31, 0x32, };
+static int jz4740_lcd_special_pins[] = { 0x31, 0x32, 0x56, 0x57, };
+static int jz4740_lcd_generic_pins[] = { 0x55, };
static int jz4740_nand_cs1_pins[] = { 0x39, };
static int jz4740_nand_cs2_pins[] = { 0x3a, };
static int jz4740_nand_cs3_pins[] = { 0x3b, };
@@ -134,18 +265,6 @@ static int jz4740_pwm_pwm5_pins[] = { 0x7c, };
static int jz4740_pwm_pwm6_pins[] = { 0x7e, };
static int jz4740_pwm_pwm7_pins[] = { 0x7f, };
-
-#define INGENIC_PIN_GROUP_FUNCS(name, id, funcs) \
- { \
- name, \
- id##_pins, \
- ARRAY_SIZE(id##_pins), \
- funcs, \
- }
-
-#define INGENIC_PIN_GROUP(name, id, func) \
- INGENIC_PIN_GROUP_FUNCS(name, id, (void *)(func))
-
static const struct group_desc jz4740_groups[] = {
INGENIC_PIN_GROUP("mmc-1bit", jz4740_mmc_1bit, 0),
INGENIC_PIN_GROUP("mmc-4bit", jz4740_mmc_4bit, 0),
@@ -155,8 +274,8 @@ static const struct group_desc jz4740_groups[] = {
INGENIC_PIN_GROUP("lcd-8bit", jz4740_lcd_8bit, 0),
INGENIC_PIN_GROUP("lcd-16bit", jz4740_lcd_16bit, 0),
INGENIC_PIN_GROUP("lcd-18bit", jz4740_lcd_18bit, 0),
- INGENIC_PIN_GROUP("lcd-18bit-tft", jz4740_lcd_18bit_tft, 0),
- { "lcd-no-pins", },
+ INGENIC_PIN_GROUP("lcd-special", jz4740_lcd_special, 0),
+ INGENIC_PIN_GROUP("lcd-generic", jz4740_lcd_generic, 0),
INGENIC_PIN_GROUP("nand-cs1", jz4740_nand_cs1, 0),
INGENIC_PIN_GROUP("nand-cs2", jz4740_nand_cs2, 0),
INGENIC_PIN_GROUP("nand-cs3", jz4740_nand_cs3, 0),
@@ -176,7 +295,7 @@ static const char *jz4740_mmc_groups[] = { "mmc-1bit", "mmc-4bit", };
static const char *jz4740_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
static const char *jz4740_uart1_groups[] = { "uart1-data", };
static const char *jz4740_lcd_groups[] = {
- "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-18bit-tft", "lcd-no-pins",
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-special", "lcd-generic",
};
static const char *jz4740_nand_groups[] = {
"nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4", "nand-fre-fwe",
@@ -223,6 +342,17 @@ static int jz4725b_mmc0_4bit_pins[] = { 0x5d, 0x5b, 0x56, };
static int jz4725b_mmc1_1bit_pins[] = { 0x7a, 0x7b, 0x7c, };
static int jz4725b_mmc1_4bit_pins[] = { 0x7d, 0x7e, 0x7f, };
static int jz4725b_uart_data_pins[] = { 0x4c, 0x4d, };
+static int jz4725b_lcd_8bit_pins[] = {
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x72, 0x73, 0x74,
+};
+static int jz4725b_lcd_16bit_pins[] = {
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+};
+static int jz4725b_lcd_18bit_pins[] = { 0x70, 0x71, };
+static int jz4725b_lcd_24bit_pins[] = { 0x76, 0x77, 0x78, 0x79, };
+static int jz4725b_lcd_special_pins[] = { 0x76, 0x77, 0x78, 0x79, };
+static int jz4725b_lcd_generic_pins[] = { 0x75, };
static int jz4725b_nand_cs1_pins[] = { 0x55, };
static int jz4725b_nand_cs2_pins[] = { 0x56, };
static int jz4725b_nand_cs3_pins[] = { 0x57, };
@@ -235,19 +365,6 @@ static int jz4725b_pwm_pwm2_pins[] = { 0x4c, };
static int jz4725b_pwm_pwm3_pins[] = { 0x4d, };
static int jz4725b_pwm_pwm4_pins[] = { 0x4e, };
static int jz4725b_pwm_pwm5_pins[] = { 0x4f, };
-static int jz4725b_lcd_8bit_pins[] = {
- 0x72, 0x73, 0x74,
- 0x60, 0x61, 0x62, 0x63,
- 0x64, 0x65, 0x66, 0x67,
-};
-static int jz4725b_lcd_16bit_pins[] = {
- 0x68, 0x69, 0x6a, 0x6b,
- 0x6c, 0x6d, 0x6e, 0x6f,
-};
-static int jz4725b_lcd_18bit_pins[] = { 0x70, 0x71, };
-static int jz4725b_lcd_24bit_pins[] = { 0x76, 0x77, 0x78, 0x79, };
-static int jz4725b_lcd_special_pins[] = { 0x76, 0x77, 0x78, 0x79, };
-static int jz4725b_lcd_generic_pins[] = { 0x75, };
static u8 jz4725b_mmc0_4bit_funcs[] = { 1, 0, 1, };
@@ -258,6 +375,12 @@ static const struct group_desc jz4725b_groups[] = {
INGENIC_PIN_GROUP("mmc1-1bit", jz4725b_mmc1_1bit, 0),
INGENIC_PIN_GROUP("mmc1-4bit", jz4725b_mmc1_4bit, 0),
INGENIC_PIN_GROUP("uart-data", jz4725b_uart_data, 1),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4725b_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4725b_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4725b_lcd_18bit, 0),
+ INGENIC_PIN_GROUP("lcd-24bit", jz4725b_lcd_24bit, 1),
+ INGENIC_PIN_GROUP("lcd-special", jz4725b_lcd_special, 0),
+ INGENIC_PIN_GROUP("lcd-generic", jz4725b_lcd_generic, 0),
INGENIC_PIN_GROUP("nand-cs1", jz4725b_nand_cs1, 0),
INGENIC_PIN_GROUP("nand-cs2", jz4725b_nand_cs2, 0),
INGENIC_PIN_GROUP("nand-cs3", jz4725b_nand_cs3, 0),
@@ -270,17 +393,15 @@ static const struct group_desc jz4725b_groups[] = {
INGENIC_PIN_GROUP("pwm3", jz4725b_pwm_pwm3, 0),
INGENIC_PIN_GROUP("pwm4", jz4725b_pwm_pwm4, 0),
INGENIC_PIN_GROUP("pwm5", jz4725b_pwm_pwm5, 0),
- INGENIC_PIN_GROUP("lcd-8bit", jz4725b_lcd_8bit, 0),
- INGENIC_PIN_GROUP("lcd-16bit", jz4725b_lcd_16bit, 0),
- INGENIC_PIN_GROUP("lcd-18bit", jz4725b_lcd_18bit, 0),
- INGENIC_PIN_GROUP("lcd-24bit", jz4725b_lcd_24bit, 1),
- INGENIC_PIN_GROUP("lcd-special", jz4725b_lcd_special, 0),
- INGENIC_PIN_GROUP("lcd-generic", jz4725b_lcd_generic, 0),
};
static const char *jz4725b_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
static const char *jz4725b_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
static const char *jz4725b_uart_groups[] = { "uart-data", };
+static const char *jz4725b_lcd_groups[] = {
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit",
+ "lcd-special", "lcd-generic",
+};
static const char *jz4725b_nand_groups[] = {
"nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4",
"nand-cle-ale", "nand-fre-fwe",
@@ -291,10 +412,6 @@ static const char *jz4725b_pwm2_groups[] = { "pwm2", };
static const char *jz4725b_pwm3_groups[] = { "pwm3", };
static const char *jz4725b_pwm4_groups[] = { "pwm4", };
static const char *jz4725b_pwm5_groups[] = { "pwm5", };
-static const char *jz4725b_lcd_groups[] = {
- "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit",
- "lcd-special", "lcd-generic",
-};
static const struct function_desc jz4725b_functions[] = {
{ "mmc0", jz4725b_mmc0_groups, ARRAY_SIZE(jz4725b_mmc0_groups), },
@@ -322,6 +439,275 @@ static const struct ingenic_chip_info jz4725b_chip_info = {
.pull_downs = jz4740_pull_downs,
};
+static const u32 jz4750_pull_ups[6] = {
+ 0xffffffff, 0xffffffff, 0x3fffffff, 0x7fffffff, 0x1fff3fff, 0x00ffffff,
+};
+
+static const u32 jz4750_pull_downs[6] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static int jz4750_uart0_data_pins[] = { 0xa4, 0xa5, };
+static int jz4750_uart0_hwflow_pins[] = { 0xa6, 0xa7, };
+static int jz4750_uart1_data_pins[] = { 0x90, 0x91, };
+static int jz4750_uart1_hwflow_pins[] = { 0x92, 0x93, };
+static int jz4750_uart2_data_pins[] = { 0x9b, 0x9a, };
+static int jz4750_uart3_data_pins[] = { 0xb0, 0xb1, };
+static int jz4750_uart3_hwflow_pins[] = { 0xb2, 0xb3, };
+static int jz4750_mmc0_1bit_pins[] = { 0xa8, 0xa9, 0xa0, };
+static int jz4750_mmc0_4bit_pins[] = { 0xa1, 0xa2, 0xa3, };
+static int jz4750_mmc0_8bit_pins[] = { 0xa4, 0xa5, 0xa6, 0xa7, };
+static int jz4750_mmc1_1bit_pins[] = { 0xae, 0xaf, 0xaa, };
+static int jz4750_mmc1_4bit_pins[] = { 0xab, 0xac, 0xad, };
+static int jz4750_i2c_pins[] = { 0x8c, 0x8d, };
+static int jz4750_cim_pins[] = {
+ 0x89, 0x8b, 0x8a, 0x88,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+};
+static int jz4750_lcd_8bit_pins[] = {
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x72, 0x73, 0x74,
+};
+static int jz4750_lcd_16bit_pins[] = {
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+};
+static int jz4750_lcd_18bit_pins[] = { 0x70, 0x71, };
+static int jz4750_lcd_24bit_pins[] = { 0x76, 0x77, 0x78, 0x79, 0xb2, 0xb3, };
+static int jz4750_lcd_special_pins[] = { 0x76, 0x77, 0x78, 0x79, };
+static int jz4750_lcd_generic_pins[] = { 0x75, };
+static int jz4750_nand_cs1_pins[] = { 0x55, };
+static int jz4750_nand_cs2_pins[] = { 0x56, };
+static int jz4750_nand_cs3_pins[] = { 0x57, };
+static int jz4750_nand_cs4_pins[] = { 0x58, };
+static int jz4750_nand_fre_fwe_pins[] = { 0x5c, 0x5d, };
+static int jz4750_pwm_pwm0_pins[] = { 0x94, };
+static int jz4750_pwm_pwm1_pins[] = { 0x95, };
+static int jz4750_pwm_pwm2_pins[] = { 0x96, };
+static int jz4750_pwm_pwm3_pins[] = { 0x97, };
+static int jz4750_pwm_pwm4_pins[] = { 0x98, };
+static int jz4750_pwm_pwm5_pins[] = { 0x99, };
+
+static const struct group_desc jz4750_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", jz4750_uart0_data, 1),
+ INGENIC_PIN_GROUP("uart0-hwflow", jz4750_uart0_hwflow, 1),
+ INGENIC_PIN_GROUP("uart1-data", jz4750_uart1_data, 0),
+ INGENIC_PIN_GROUP("uart1-hwflow", jz4750_uart1_hwflow, 0),
+ INGENIC_PIN_GROUP("uart2-data", jz4750_uart2_data, 1),
+ INGENIC_PIN_GROUP("uart3-data", jz4750_uart3_data, 0),
+ INGENIC_PIN_GROUP("uart3-hwflow", jz4750_uart3_hwflow, 0),
+ INGENIC_PIN_GROUP("mmc0-1bit", jz4750_mmc0_1bit, 0),
+ INGENIC_PIN_GROUP("mmc0-4bit", jz4750_mmc0_4bit, 0),
+ INGENIC_PIN_GROUP("mmc0-8bit", jz4750_mmc0_8bit, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit", jz4750_mmc1_1bit, 0),
+ INGENIC_PIN_GROUP("mmc1-4bit", jz4750_mmc1_4bit, 0),
+ INGENIC_PIN_GROUP("i2c-data", jz4750_i2c, 0),
+ INGENIC_PIN_GROUP("cim-data", jz4750_cim, 0),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4750_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4750_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4750_lcd_18bit, 0),
+ INGENIC_PIN_GROUP("lcd-24bit", jz4750_lcd_24bit, 1),
+ INGENIC_PIN_GROUP("lcd-special", jz4750_lcd_special, 0),
+ INGENIC_PIN_GROUP("lcd-generic", jz4750_lcd_generic, 0),
+ INGENIC_PIN_GROUP("nand-cs1", jz4750_nand_cs1, 0),
+ INGENIC_PIN_GROUP("nand-cs2", jz4750_nand_cs2, 0),
+ INGENIC_PIN_GROUP("nand-cs3", jz4750_nand_cs3, 0),
+ INGENIC_PIN_GROUP("nand-cs4", jz4750_nand_cs4, 0),
+ INGENIC_PIN_GROUP("nand-fre-fwe", jz4750_nand_fre_fwe, 0),
+ INGENIC_PIN_GROUP("pwm0", jz4750_pwm_pwm0, 0),
+ INGENIC_PIN_GROUP("pwm1", jz4750_pwm_pwm1, 0),
+ INGENIC_PIN_GROUP("pwm2", jz4750_pwm_pwm2, 0),
+ INGENIC_PIN_GROUP("pwm3", jz4750_pwm_pwm3, 0),
+ INGENIC_PIN_GROUP("pwm4", jz4750_pwm_pwm4, 0),
+ INGENIC_PIN_GROUP("pwm5", jz4750_pwm_pwm5, 0),
+};
+
+static const char *jz4750_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char *jz4750_uart1_groups[] = { "uart1-data", "uart1-hwflow", };
+static const char *jz4750_uart2_groups[] = { "uart2-data", };
+static const char *jz4750_uart3_groups[] = { "uart3-data", "uart3-hwflow", };
+static const char *jz4750_mmc0_groups[] = {
+ "mmc0-1bit", "mmc0-4bit", "mmc0-8bit",
+};
+static const char *jz4750_mmc1_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *jz4750_i2c_groups[] = { "i2c-data", };
+static const char *jz4750_cim_groups[] = { "cim-data", };
+static const char *jz4750_lcd_groups[] = {
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit",
+ "lcd-special", "lcd-generic",
+};
+static const char *jz4750_nand_groups[] = {
+ "nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4", "nand-fre-fwe",
+};
+static const char *jz4750_pwm0_groups[] = { "pwm0", };
+static const char *jz4750_pwm1_groups[] = { "pwm1", };
+static const char *jz4750_pwm2_groups[] = { "pwm2", };
+static const char *jz4750_pwm3_groups[] = { "pwm3", };
+static const char *jz4750_pwm4_groups[] = { "pwm4", };
+static const char *jz4750_pwm5_groups[] = { "pwm5", };
+
+static const struct function_desc jz4750_functions[] = {
+ { "uart0", jz4750_uart0_groups, ARRAY_SIZE(jz4750_uart0_groups), },
+ { "uart1", jz4750_uart1_groups, ARRAY_SIZE(jz4750_uart1_groups), },
+ { "uart2", jz4750_uart2_groups, ARRAY_SIZE(jz4750_uart2_groups), },
+ { "uart3", jz4750_uart3_groups, ARRAY_SIZE(jz4750_uart3_groups), },
+ { "mmc0", jz4750_mmc0_groups, ARRAY_SIZE(jz4750_mmc0_groups), },
+ { "mmc1", jz4750_mmc1_groups, ARRAY_SIZE(jz4750_mmc1_groups), },
+ { "i2c", jz4750_i2c_groups, ARRAY_SIZE(jz4750_i2c_groups), },
+ { "cim", jz4750_cim_groups, ARRAY_SIZE(jz4750_cim_groups), },
+ { "lcd", jz4750_lcd_groups, ARRAY_SIZE(jz4750_lcd_groups), },
+ { "nand", jz4750_nand_groups, ARRAY_SIZE(jz4750_nand_groups), },
+ { "pwm0", jz4750_pwm0_groups, ARRAY_SIZE(jz4750_pwm0_groups), },
+ { "pwm1", jz4750_pwm1_groups, ARRAY_SIZE(jz4750_pwm1_groups), },
+ { "pwm2", jz4750_pwm2_groups, ARRAY_SIZE(jz4750_pwm2_groups), },
+ { "pwm3", jz4750_pwm3_groups, ARRAY_SIZE(jz4750_pwm3_groups), },
+ { "pwm4", jz4750_pwm4_groups, ARRAY_SIZE(jz4750_pwm4_groups), },
+ { "pwm5", jz4750_pwm5_groups, ARRAY_SIZE(jz4750_pwm5_groups), },
+};
+
+static const struct ingenic_chip_info jz4750_chip_info = {
+ .num_chips = 6,
+ .reg_offset = 0x100,
+ .version = ID_JZ4750,
+ .groups = jz4750_groups,
+ .num_groups = ARRAY_SIZE(jz4750_groups),
+ .functions = jz4750_functions,
+ .num_functions = ARRAY_SIZE(jz4750_functions),
+ .pull_ups = jz4750_pull_ups,
+ .pull_downs = jz4750_pull_downs,
+};
+
+static const u32 jz4755_pull_ups[6] = {
+ 0xffffffff, 0xffffffff, 0x0fffffff, 0xffffffff, 0x33dc3fff, 0x0000fc00,
+};
+
+static const u32 jz4755_pull_downs[6] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static int jz4755_uart0_data_pins[] = { 0x7c, 0x7d, };
+static int jz4755_uart0_hwflow_pins[] = { 0x7e, 0x7f, };
+static int jz4755_uart1_data_pins[] = { 0x97, 0x99, };
+static int jz4755_uart2_data_pins[] = { 0x9f, };
+static int jz4755_mmc0_1bit_pins[] = { 0x2f, 0x50, 0x5c, };
+static int jz4755_mmc0_4bit_pins[] = { 0x5d, 0x5b, 0x51, };
+static int jz4755_mmc1_1bit_pins[] = { 0x3a, 0x3d, 0x3c, };
+static int jz4755_mmc1_4bit_pins[] = { 0x3b, 0x3e, 0x3f, };
+static int jz4755_i2c_pins[] = { 0x8c, 0x8d, };
+static int jz4755_cim_pins[] = {
+ 0x89, 0x8b, 0x8a, 0x88,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+};
+static int jz4755_lcd_8bit_pins[] = {
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x72, 0x73, 0x74,
+};
+static int jz4755_lcd_16bit_pins[] = {
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+};
+static int jz4755_lcd_18bit_pins[] = { 0x70, 0x71, };
+static int jz4755_lcd_24bit_pins[] = { 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, };
+static int jz4755_lcd_special_pins[] = { 0x76, 0x77, 0x78, 0x79, };
+static int jz4755_lcd_generic_pins[] = { 0x75, };
+static int jz4755_nand_cs1_pins[] = { 0x55, };
+static int jz4755_nand_cs2_pins[] = { 0x56, };
+static int jz4755_nand_cs3_pins[] = { 0x57, };
+static int jz4755_nand_cs4_pins[] = { 0x58, };
+static int jz4755_nand_fre_fwe_pins[] = { 0x5c, 0x5d, };
+static int jz4755_pwm_pwm0_pins[] = { 0x94, };
+static int jz4755_pwm_pwm1_pins[] = { 0xab, };
+static int jz4755_pwm_pwm2_pins[] = { 0x96, };
+static int jz4755_pwm_pwm3_pins[] = { 0x97, };
+static int jz4755_pwm_pwm4_pins[] = { 0x98, };
+static int jz4755_pwm_pwm5_pins[] = { 0x99, };
+
+static u8 jz4755_mmc0_1bit_funcs[] = { 2, 2, 1, };
+static u8 jz4755_mmc0_4bit_funcs[] = { 1, 0, 1, };
+static u8 jz4755_lcd_24bit_funcs[] = { 1, 1, 1, 1, 0, 0, };
+
+static const struct group_desc jz4755_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", jz4755_uart0_data, 0),
+ INGENIC_PIN_GROUP("uart0-hwflow", jz4755_uart0_hwflow, 0),
+ INGENIC_PIN_GROUP("uart1-data", jz4755_uart1_data, 0),
+ INGENIC_PIN_GROUP("uart2-data", jz4755_uart2_data, 1),
+ INGENIC_PIN_GROUP_FUNCS("mmc0-1bit", jz4755_mmc0_1bit,
+ jz4755_mmc0_1bit_funcs),
+ INGENIC_PIN_GROUP_FUNCS("mmc0-4bit", jz4755_mmc0_4bit,
+ jz4755_mmc0_4bit_funcs),
+ INGENIC_PIN_GROUP("mmc1-1bit", jz4755_mmc1_1bit, 1),
+ INGENIC_PIN_GROUP("mmc1-4bit", jz4755_mmc1_4bit, 1),
+ INGENIC_PIN_GROUP("i2c-data", jz4755_i2c, 0),
+ INGENIC_PIN_GROUP("cim-data", jz4755_cim, 0),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4755_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4755_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4755_lcd_18bit, 0),
+ INGENIC_PIN_GROUP_FUNCS("lcd-24bit", jz4755_lcd_24bit,
+ jz4755_lcd_24bit_funcs),
+ INGENIC_PIN_GROUP("lcd-special", jz4755_lcd_special, 0),
+ INGENIC_PIN_GROUP("lcd-generic", jz4755_lcd_generic, 0),
+ INGENIC_PIN_GROUP("nand-cs1", jz4755_nand_cs1, 0),
+ INGENIC_PIN_GROUP("nand-cs2", jz4755_nand_cs2, 0),
+ INGENIC_PIN_GROUP("nand-cs3", jz4755_nand_cs3, 0),
+ INGENIC_PIN_GROUP("nand-cs4", jz4755_nand_cs4, 0),
+ INGENIC_PIN_GROUP("nand-fre-fwe", jz4755_nand_fre_fwe, 0),
+ INGENIC_PIN_GROUP("pwm0", jz4755_pwm_pwm0, 0),
+ INGENIC_PIN_GROUP("pwm1", jz4755_pwm_pwm1, 1),
+ INGENIC_PIN_GROUP("pwm2", jz4755_pwm_pwm2, 0),
+ INGENIC_PIN_GROUP("pwm3", jz4755_pwm_pwm3, 0),
+ INGENIC_PIN_GROUP("pwm4", jz4755_pwm_pwm4, 0),
+ INGENIC_PIN_GROUP("pwm5", jz4755_pwm_pwm5, 0),
+};
+
+static const char *jz4755_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char *jz4755_uart1_groups[] = { "uart1-data", };
+static const char *jz4755_uart2_groups[] = { "uart2-data", };
+static const char *jz4755_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *jz4755_mmc1_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *jz4755_i2c_groups[] = { "i2c-data", };
+static const char *jz4755_cim_groups[] = { "cim-data", };
+static const char *jz4755_lcd_groups[] = {
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit",
+ "lcd-special", "lcd-generic",
+};
+static const char *jz4755_nand_groups[] = {
+ "nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4", "nand-fre-fwe",
+};
+static const char *jz4755_pwm0_groups[] = { "pwm0", };
+static const char *jz4755_pwm1_groups[] = { "pwm1", };
+static const char *jz4755_pwm2_groups[] = { "pwm2", };
+static const char *jz4755_pwm3_groups[] = { "pwm3", };
+static const char *jz4755_pwm4_groups[] = { "pwm4", };
+static const char *jz4755_pwm5_groups[] = { "pwm5", };
+
+static const struct function_desc jz4755_functions[] = {
+ { "uart0", jz4755_uart0_groups, ARRAY_SIZE(jz4755_uart0_groups), },
+ { "uart1", jz4755_uart1_groups, ARRAY_SIZE(jz4755_uart1_groups), },
+ { "uart2", jz4755_uart2_groups, ARRAY_SIZE(jz4755_uart2_groups), },
+ { "mmc0", jz4755_mmc0_groups, ARRAY_SIZE(jz4755_mmc0_groups), },
+ { "mmc1", jz4755_mmc1_groups, ARRAY_SIZE(jz4755_mmc1_groups), },
+ { "i2c", jz4755_i2c_groups, ARRAY_SIZE(jz4755_i2c_groups), },
+ { "cim", jz4755_cim_groups, ARRAY_SIZE(jz4755_cim_groups), },
+ { "lcd", jz4755_lcd_groups, ARRAY_SIZE(jz4755_lcd_groups), },
+ { "nand", jz4755_nand_groups, ARRAY_SIZE(jz4755_nand_groups), },
+ { "pwm0", jz4755_pwm0_groups, ARRAY_SIZE(jz4755_pwm0_groups), },
+ { "pwm1", jz4755_pwm1_groups, ARRAY_SIZE(jz4755_pwm1_groups), },
+ { "pwm2", jz4755_pwm2_groups, ARRAY_SIZE(jz4755_pwm2_groups), },
+ { "pwm3", jz4755_pwm3_groups, ARRAY_SIZE(jz4755_pwm3_groups), },
+ { "pwm4", jz4755_pwm4_groups, ARRAY_SIZE(jz4755_pwm4_groups), },
+ { "pwm5", jz4755_pwm5_groups, ARRAY_SIZE(jz4755_pwm5_groups), },
+};
+
+static const struct ingenic_chip_info jz4755_chip_info = {
+ .num_chips = 6,
+ .reg_offset = 0x100,
+ .version = ID_JZ4755,
+ .groups = jz4755_groups,
+ .num_groups = ARRAY_SIZE(jz4755_groups),
+ .functions = jz4755_functions,
+ .num_functions = ARRAY_SIZE(jz4755_functions),
+ .pull_ups = jz4755_pull_ups,
+ .pull_downs = jz4755_pull_downs,
+};
+
static const u32 jz4760_pull_ups[6] = {
0xffffffff, 0xfffcf3ff, 0xffffffff, 0xffffcfff, 0xfffffb7c, 0xfffff00f,
};
@@ -389,7 +775,7 @@ static int jz4760_lcd_18bit_pins[] = {
static int jz4760_lcd_24bit_pins[] = {
0x40, 0x41, 0x4a, 0x4b, 0x54, 0x55,
};
-static int jz4760_lcd_special_pins[] = { 0x40, 0x41, 0x4a, 0x54 };
+static int jz4760_lcd_special_pins[] = { 0x54, 0x4a, 0x41, 0x40, };
static int jz4760_lcd_generic_pins[] = { 0x49, };
static int jz4760_pwm_pwm0_pins[] = { 0x80, };
static int jz4760_pwm_pwm1_pins[] = { 0x81, };
@@ -450,8 +836,8 @@ static const struct group_desc jz4760_groups[] = {
INGENIC_PIN_GROUP("lcd-16bit", jz4760_lcd_16bit, 0),
INGENIC_PIN_GROUP("lcd-18bit", jz4760_lcd_18bit, 0),
INGENIC_PIN_GROUP("lcd-24bit", jz4760_lcd_24bit, 0),
- INGENIC_PIN_GROUP("lcd-generic", jz4760_lcd_generic, 0),
INGENIC_PIN_GROUP("lcd-special", jz4760_lcd_special, 1),
+ INGENIC_PIN_GROUP("lcd-generic", jz4760_lcd_generic, 0),
INGENIC_PIN_GROUP("pwm0", jz4760_pwm_pwm0, 0),
INGENIC_PIN_GROUP("pwm1", jz4760_pwm_pwm1, 0),
INGENIC_PIN_GROUP("pwm2", jz4760_pwm_pwm2, 0),
@@ -648,7 +1034,13 @@ static int jz4770_cim_12bit_pins[] = {
};
static int jz4770_lcd_8bit_pins[] = {
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x4c, 0x4d,
- 0x48, 0x49, 0x52, 0x53,
+ 0x48, 0x52, 0x53,
+};
+static int jz4770_lcd_16bit_pins[] = {
+ 0x4e, 0x4f, 0x50, 0x51, 0x56, 0x57, 0x58, 0x59,
+};
+static int jz4770_lcd_18bit_pins[] = {
+ 0x5a, 0x5b,
};
static int jz4770_lcd_24bit_pins[] = {
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
@@ -656,6 +1048,8 @@ static int jz4770_lcd_24bit_pins[] = {
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
0x58, 0x59, 0x5a, 0x5b,
};
+static int jz4770_lcd_special_pins[] = { 0x54, 0x4a, 0x41, 0x40, };
+static int jz4770_lcd_generic_pins[] = { 0x49, };
static int jz4770_pwm_pwm0_pins[] = { 0x80, };
static int jz4770_pwm_pwm1_pins[] = { 0x81, };
static int jz4770_pwm_pwm2_pins[] = { 0x82, };
@@ -667,7 +1061,9 @@ static int jz4770_pwm_pwm7_pins[] = { 0x6b, };
static int jz4770_mac_rmii_pins[] = {
0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
};
-static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, };
+static int jz4770_mac_mii_pins[] = {
+ 0x7b, 0x7a, 0x7d, 0x7c, 0xa7, 0x24, 0xaf,
+};
static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data, 0),
@@ -754,8 +1150,11 @@ static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("cim-data-8bit", jz4770_cim_8bit, 0),
INGENIC_PIN_GROUP("cim-data-12bit", jz4770_cim_12bit, 0),
INGENIC_PIN_GROUP("lcd-8bit", jz4770_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4770_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4770_lcd_18bit, 0),
INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit, 0),
- { "lcd-no-pins", },
+ INGENIC_PIN_GROUP("lcd-special", jz4770_lcd_special, 1),
+ INGENIC_PIN_GROUP("lcd-generic", jz4770_lcd_generic, 0),
INGENIC_PIN_GROUP("pwm0", jz4770_pwm_pwm0, 0),
INGENIC_PIN_GROUP("pwm1", jz4770_pwm_pwm1, 0),
INGENIC_PIN_GROUP("pwm2", jz4770_pwm_pwm2, 0),
@@ -816,7 +1215,8 @@ static const char *jz4770_i2c1_groups[] = { "i2c1-data", };
static const char *jz4770_i2c2_groups[] = { "i2c2-data", };
static const char *jz4770_cim_groups[] = { "cim-data-8bit", "cim-data-12bit", };
static const char *jz4770_lcd_groups[] = {
- "lcd-8bit", "lcd-24bit", "lcd-no-pins",
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit",
+ "lcd-special", "lcd-generic",
};
static const char *jz4770_pwm0_groups[] = { "pwm0", };
static const char *jz4770_pwm1_groups[] = { "pwm1", };
@@ -874,6 +1274,279 @@ static const struct ingenic_chip_info jz4770_chip_info = {
.pull_downs = jz4770_pull_downs,
};
+static const u32 jz4775_pull_ups[7] = {
+ 0x28ff00ff, 0xf030f3fc, 0x0fffffff, 0xfffe4000, 0xf0f0000c, 0x0000f00f, 0x0000f3c0,
+};
+
+static const u32 jz4775_pull_downs[7] = {
+ 0x00000000, 0x00030c03, 0x00000000, 0x00008000, 0x00000403, 0x00000ff0, 0x00030c00,
+};
+
+static int jz4775_uart0_data_pins[] = { 0xa0, 0xa3, };
+static int jz4775_uart0_hwflow_pins[] = { 0xa1, 0xa2, };
+static int jz4775_uart1_data_pins[] = { 0x7a, 0x7c, };
+static int jz4775_uart1_hwflow_pins[] = { 0x7b, 0x7d, };
+static int jz4775_uart2_data_c_pins[] = { 0x54, 0x4a, };
+static int jz4775_uart2_data_f_pins[] = { 0xa5, 0xa4, };
+static int jz4775_uart3_data_pins[] = { 0x1e, 0x1f, };
+static int jz4775_ssi_dt_a_pins[] = { 0x13, };
+static int jz4775_ssi_dt_d_pins[] = { 0x75, };
+static int jz4775_ssi_dr_a_pins[] = { 0x14, };
+static int jz4775_ssi_dr_d_pins[] = { 0x74, };
+static int jz4775_ssi_clk_a_pins[] = { 0x12, };
+static int jz4775_ssi_clk_d_pins[] = { 0x78, };
+static int jz4775_ssi_gpc_pins[] = { 0x76, };
+static int jz4775_ssi_ce0_a_pins[] = { 0x17, };
+static int jz4775_ssi_ce0_d_pins[] = { 0x79, };
+static int jz4775_ssi_ce1_pins[] = { 0x77, };
+static int jz4775_mmc0_1bit_a_pins[] = { 0x12, 0x13, 0x14, };
+static int jz4775_mmc0_4bit_a_pins[] = { 0x15, 0x16, 0x17, };
+static int jz4775_mmc0_8bit_a_pins[] = { 0x04, 0x05, 0x06, 0x07, };
+static int jz4775_mmc0_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, };
+static int jz4775_mmc0_4bit_e_pins[] = { 0x95, 0x96, 0x97, };
+static int jz4775_mmc1_1bit_d_pins[] = { 0x78, 0x79, 0x74, };
+static int jz4775_mmc1_4bit_d_pins[] = { 0x75, 0x76, 0x77, };
+static int jz4775_mmc1_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, };
+static int jz4775_mmc1_4bit_e_pins[] = { 0x95, 0x96, 0x97, };
+static int jz4775_mmc2_1bit_b_pins[] = { 0x3c, 0x3d, 0x34, };
+static int jz4775_mmc2_4bit_b_pins[] = { 0x35, 0x3e, 0x3f, };
+static int jz4775_mmc2_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, };
+static int jz4775_mmc2_4bit_e_pins[] = { 0x95, 0x96, 0x97, };
+static int jz4775_nemc_8bit_data_pins[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+};
+static int jz4775_nemc_16bit_data_pins[] = {
+ 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1,
+};
+static int jz4775_nemc_cle_ale_pins[] = { 0x20, 0x21, };
+static int jz4775_nemc_addr_pins[] = { 0x22, 0x23, 0x24, 0x25, };
+static int jz4775_nemc_rd_we_pins[] = { 0x10, 0x11, };
+static int jz4775_nemc_frd_fwe_pins[] = { 0x12, 0x13, };
+static int jz4775_nemc_wait_pins[] = { 0x1b, };
+static int jz4775_nemc_cs1_pins[] = { 0x15, };
+static int jz4775_nemc_cs2_pins[] = { 0x16, };
+static int jz4775_nemc_cs3_pins[] = { 0x17, };
+static int jz4775_i2c0_pins[] = { 0x7e, 0x7f, };
+static int jz4775_i2c1_pins[] = { 0x9e, 0x9f, };
+static int jz4775_i2c2_pins[] = { 0x80, 0x83, };
+static int jz4775_i2s_data_tx_pins[] = { 0xa3, };
+static int jz4775_i2s_data_rx_pins[] = { 0xa2, };
+static int jz4775_i2s_clk_txrx_pins[] = { 0xa0, 0xa1, };
+static int jz4775_i2s_sysclk_pins[] = { 0x83, };
+static int jz4775_dmic_pins[] = { 0xaa, 0xab, };
+static int jz4775_cim_pins[] = {
+ 0x26, 0x27, 0x28, 0x29,
+ 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31,
+};
+static int jz4775_lcd_8bit_pins[] = {
+ 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x4c, 0x4d,
+ 0x48, 0x52, 0x53,
+};
+static int jz4775_lcd_16bit_pins[] = {
+ 0x4e, 0x4f, 0x50, 0x51, 0x56, 0x57, 0x58, 0x59,
+};
+static int jz4775_lcd_18bit_pins[] = {
+ 0x5a, 0x5b,
+};
+static int jz4775_lcd_24bit_pins[] = {
+ 0x40, 0x41, 0x4a, 0x4b, 0x54, 0x55,
+};
+static int jz4775_lcd_special_pins[] = { 0x54, 0x4a, 0x41, 0x40, };
+static int jz4775_lcd_generic_pins[] = { 0x49, };
+static int jz4775_pwm_pwm0_pins[] = { 0x80, };
+static int jz4775_pwm_pwm1_pins[] = { 0x81, };
+static int jz4775_pwm_pwm2_pins[] = { 0x82, };
+static int jz4775_pwm_pwm3_pins[] = { 0x83, };
+static int jz4775_mac_rmii_pins[] = {
+ 0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
+};
+static int jz4775_mac_mii_pins[] = {
+ 0x7b, 0x7a, 0x7d, 0x7c, 0xa7, 0x24, 0xaf,
+};
+static int jz4775_mac_rgmii_pins[] = {
+ 0xa9, 0x7b, 0x7a, 0xab, 0xaa, 0xac, 0x7d, 0x7c, 0xa5, 0xa4,
+ 0xad, 0xae, 0xa7, 0xa6,
+};
+static int jz4775_mac_gmii_pins[] = {
+ 0x31, 0x30, 0x2f, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a,
+ 0xa8, 0x28, 0x24, 0xaf,
+};
+static int jz4775_otg_pins[] = { 0x8a, };
+
+static u8 jz4775_uart3_data_funcs[] = { 0, 1, };
+static u8 jz4775_mac_mii_funcs[] = { 1, 1, 1, 1, 0, 1, 0, };
+static u8 jz4775_mac_rgmii_funcs[] = {
+ 0, 1, 1, 0, 0, 0, 1, 1, 0, 0,
+ 0, 0, 0, 0,
+};
+static u8 jz4775_mac_gmii_funcs[] = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 1, 1, 0,
+};
+
+static const struct group_desc jz4775_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", jz4775_uart0_data, 0),
+ INGENIC_PIN_GROUP("uart0-hwflow", jz4775_uart0_hwflow, 0),
+ INGENIC_PIN_GROUP("uart1-data", jz4775_uart1_data, 0),
+ INGENIC_PIN_GROUP("uart1-hwflow", jz4775_uart1_hwflow, 0),
+ INGENIC_PIN_GROUP("uart2-data-c", jz4775_uart2_data_c, 2),
+ INGENIC_PIN_GROUP("uart2-data-f", jz4775_uart2_data_f, 1),
+ INGENIC_PIN_GROUP_FUNCS("uart3-data", jz4775_uart3_data,
+ jz4775_uart3_data_funcs),
+ INGENIC_PIN_GROUP("ssi-dt-a", jz4775_ssi_dt_a, 2),
+ INGENIC_PIN_GROUP("ssi-dt-d", jz4775_ssi_dt_d, 1),
+ INGENIC_PIN_GROUP("ssi-dr-a", jz4775_ssi_dr_a, 2),
+ INGENIC_PIN_GROUP("ssi-dr-d", jz4775_ssi_dr_d, 1),
+ INGENIC_PIN_GROUP("ssi-clk-a", jz4775_ssi_clk_a, 2),
+ INGENIC_PIN_GROUP("ssi-clk-d", jz4775_ssi_clk_d, 1),
+ INGENIC_PIN_GROUP("ssi-gpc", jz4775_ssi_gpc, 1),
+ INGENIC_PIN_GROUP("ssi-ce0-a", jz4775_ssi_ce0_a, 2),
+ INGENIC_PIN_GROUP("ssi-ce0-d", jz4775_ssi_ce0_d, 1),
+ INGENIC_PIN_GROUP("ssi-ce1", jz4775_ssi_ce1, 1),
+ INGENIC_PIN_GROUP("mmc0-1bit-a", jz4775_mmc0_1bit_a, 1),
+ INGENIC_PIN_GROUP("mmc0-4bit-a", jz4775_mmc0_4bit_a, 1),
+ INGENIC_PIN_GROUP("mmc0-8bit-a", jz4775_mmc0_8bit_a, 1),
+ INGENIC_PIN_GROUP("mmc0-1bit-e", jz4775_mmc0_1bit_e, 0),
+ INGENIC_PIN_GROUP("mmc0-4bit-e", jz4775_mmc0_4bit_e, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit-d", jz4775_mmc1_1bit_d, 0),
+ INGENIC_PIN_GROUP("mmc1-4bit-d", jz4775_mmc1_4bit_d, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit-e", jz4775_mmc1_1bit_e, 1),
+ INGENIC_PIN_GROUP("mmc1-4bit-e", jz4775_mmc1_4bit_e, 1),
+ INGENIC_PIN_GROUP("mmc2-1bit-b", jz4775_mmc2_1bit_b, 0),
+ INGENIC_PIN_GROUP("mmc2-4bit-b", jz4775_mmc2_4bit_b, 0),
+ INGENIC_PIN_GROUP("mmc2-1bit-e", jz4775_mmc2_1bit_e, 2),
+ INGENIC_PIN_GROUP("mmc2-4bit-e", jz4775_mmc2_4bit_e, 2),
+ INGENIC_PIN_GROUP("nemc-8bit-data", jz4775_nemc_8bit_data, 0),
+ INGENIC_PIN_GROUP("nemc-16bit-data", jz4775_nemc_16bit_data, 1),
+ INGENIC_PIN_GROUP("nemc-cle-ale", jz4775_nemc_cle_ale, 0),
+ INGENIC_PIN_GROUP("nemc-addr", jz4775_nemc_addr, 0),
+ INGENIC_PIN_GROUP("nemc-rd-we", jz4775_nemc_rd_we, 0),
+ INGENIC_PIN_GROUP("nemc-frd-fwe", jz4775_nemc_frd_fwe, 0),
+ INGENIC_PIN_GROUP("nemc-wait", jz4775_nemc_wait, 0),
+ INGENIC_PIN_GROUP("nemc-cs1", jz4775_nemc_cs1, 0),
+ INGENIC_PIN_GROUP("nemc-cs2", jz4775_nemc_cs2, 0),
+ INGENIC_PIN_GROUP("nemc-cs3", jz4775_nemc_cs3, 0),
+ INGENIC_PIN_GROUP("i2c0-data", jz4775_i2c0, 0),
+ INGENIC_PIN_GROUP("i2c1-data", jz4775_i2c1, 0),
+ INGENIC_PIN_GROUP("i2c2-data", jz4775_i2c2, 1),
+ INGENIC_PIN_GROUP("i2s-data-tx", jz4775_i2s_data_tx, 1),
+ INGENIC_PIN_GROUP("i2s-data-rx", jz4775_i2s_data_rx, 1),
+ INGENIC_PIN_GROUP("i2s-clk-txrx", jz4775_i2s_clk_txrx, 1),
+ INGENIC_PIN_GROUP("i2s-sysclk", jz4775_i2s_sysclk, 2),
+ INGENIC_PIN_GROUP("dmic", jz4775_dmic, 1),
+ INGENIC_PIN_GROUP("cim-data", jz4775_cim, 0),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4775_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4775_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4775_lcd_18bit, 0),
+ INGENIC_PIN_GROUP("lcd-24bit", jz4775_lcd_24bit, 0),
+ INGENIC_PIN_GROUP("lcd-generic", jz4775_lcd_generic, 0),
+ INGENIC_PIN_GROUP("lcd-special", jz4775_lcd_special, 1),
+ INGENIC_PIN_GROUP("pwm0", jz4775_pwm_pwm0, 0),
+ INGENIC_PIN_GROUP("pwm1", jz4775_pwm_pwm1, 0),
+ INGENIC_PIN_GROUP("pwm2", jz4775_pwm_pwm2, 0),
+ INGENIC_PIN_GROUP("pwm3", jz4775_pwm_pwm3, 0),
+ INGENIC_PIN_GROUP("mac-rmii", jz4775_mac_rmii, 0),
+ INGENIC_PIN_GROUP_FUNCS("mac-mii", jz4775_mac_mii,
+ jz4775_mac_mii_funcs),
+ INGENIC_PIN_GROUP_FUNCS("mac-rgmii", jz4775_mac_rgmii,
+ jz4775_mac_rgmii_funcs),
+ INGENIC_PIN_GROUP_FUNCS("mac-gmii", jz4775_mac_gmii,
+ jz4775_mac_gmii_funcs),
+ INGENIC_PIN_GROUP("otg-vbus", jz4775_otg, 0),
+};
+
+static const char *jz4775_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char *jz4775_uart1_groups[] = { "uart1-data", "uart1-hwflow", };
+static const char *jz4775_uart2_groups[] = { "uart2-data-c", "uart2-data-f", };
+static const char *jz4775_uart3_groups[] = { "uart3-data", };
+static const char *jz4775_ssi_groups[] = {
+ "ssi-dt-a", "ssi-dt-d",
+ "ssi-dr-a", "ssi-dr-d",
+ "ssi-clk-a", "ssi-clk-d",
+ "ssi-gpc",
+ "ssi-ce0-a", "ssi-ce0-d",
+ "ssi-ce1",
+};
+static const char *jz4775_mmc0_groups[] = {
+ "mmc0-1bit-a", "mmc0-4bit-a", "mmc0-8bit-a",
+ "mmc0-1bit-e", "mmc0-4bit-e",
+};
+static const char *jz4775_mmc1_groups[] = {
+ "mmc1-1bit-d", "mmc1-4bit-d",
+ "mmc1-1bit-e", "mmc1-4bit-e",
+};
+static const char *jz4775_mmc2_groups[] = {
+ "mmc2-1bit-b", "mmc2-4bit-b",
+ "mmc2-1bit-e", "mmc2-4bit-e",
+};
+static const char *jz4775_nemc_groups[] = {
+ "nemc-8bit-data", "nemc-16bit-data", "nemc-cle-ale",
+ "nemc-addr", "nemc-rd-we", "nemc-frd-fwe", "nemc-wait",
+};
+static const char *jz4775_cs1_groups[] = { "nemc-cs1", };
+static const char *jz4775_cs2_groups[] = { "nemc-cs2", };
+static const char *jz4775_cs3_groups[] = { "nemc-cs3", };
+static const char *jz4775_i2c0_groups[] = { "i2c0-data", };
+static const char *jz4775_i2c1_groups[] = { "i2c1-data", };
+static const char *jz4775_i2c2_groups[] = { "i2c2-data", };
+static const char *jz4775_i2s_groups[] = {
+ "i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-sysclk",
+};
+static const char *jz4775_dmic_groups[] = { "dmic", };
+static const char *jz4775_cim_groups[] = { "cim-data", };
+static const char *jz4775_lcd_groups[] = {
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit",
+ "lcd-special", "lcd-generic",
+};
+static const char *jz4775_pwm0_groups[] = { "pwm0", };
+static const char *jz4775_pwm1_groups[] = { "pwm1", };
+static const char *jz4775_pwm2_groups[] = { "pwm2", };
+static const char *jz4775_pwm3_groups[] = { "pwm3", };
+static const char *jz4775_mac_groups[] = {
+ "mac-rmii", "mac-mii", "mac-rgmii", "mac-gmii",
+};
+static const char *jz4775_otg_groups[] = { "otg-vbus", };
+
+static const struct function_desc jz4775_functions[] = {
+ { "uart0", jz4775_uart0_groups, ARRAY_SIZE(jz4775_uart0_groups), },
+ { "uart1", jz4775_uart1_groups, ARRAY_SIZE(jz4775_uart1_groups), },
+ { "uart2", jz4775_uart2_groups, ARRAY_SIZE(jz4775_uart2_groups), },
+ { "uart3", jz4775_uart3_groups, ARRAY_SIZE(jz4775_uart3_groups), },
+ { "ssi", jz4775_ssi_groups, ARRAY_SIZE(jz4775_ssi_groups), },
+ { "mmc0", jz4775_mmc0_groups, ARRAY_SIZE(jz4775_mmc0_groups), },
+ { "mmc1", jz4775_mmc1_groups, ARRAY_SIZE(jz4775_mmc1_groups), },
+ { "mmc2", jz4775_mmc2_groups, ARRAY_SIZE(jz4775_mmc2_groups), },
+ { "nemc", jz4775_nemc_groups, ARRAY_SIZE(jz4775_nemc_groups), },
+ { "nemc-cs1", jz4775_cs1_groups, ARRAY_SIZE(jz4775_cs1_groups), },
+ { "nemc-cs2", jz4775_cs2_groups, ARRAY_SIZE(jz4775_cs2_groups), },
+ { "nemc-cs3", jz4775_cs3_groups, ARRAY_SIZE(jz4775_cs3_groups), },
+ { "i2c0", jz4775_i2c0_groups, ARRAY_SIZE(jz4775_i2c0_groups), },
+ { "i2c1", jz4775_i2c1_groups, ARRAY_SIZE(jz4775_i2c1_groups), },
+ { "i2c2", jz4775_i2c2_groups, ARRAY_SIZE(jz4775_i2c2_groups), },
+ { "i2s", jz4775_i2s_groups, ARRAY_SIZE(jz4775_i2s_groups), },
+ { "dmic", jz4775_dmic_groups, ARRAY_SIZE(jz4775_dmic_groups), },
+ { "cim", jz4775_cim_groups, ARRAY_SIZE(jz4775_cim_groups), },
+ { "lcd", jz4775_lcd_groups, ARRAY_SIZE(jz4775_lcd_groups), },
+ { "pwm0", jz4775_pwm0_groups, ARRAY_SIZE(jz4775_pwm0_groups), },
+ { "pwm1", jz4775_pwm1_groups, ARRAY_SIZE(jz4775_pwm1_groups), },
+ { "pwm2", jz4775_pwm2_groups, ARRAY_SIZE(jz4775_pwm2_groups), },
+ { "pwm3", jz4775_pwm3_groups, ARRAY_SIZE(jz4775_pwm3_groups), },
+ { "mac", jz4775_mac_groups, ARRAY_SIZE(jz4775_mac_groups), },
+ { "otg", jz4775_otg_groups, ARRAY_SIZE(jz4775_otg_groups), },
+};
+
+static const struct ingenic_chip_info jz4775_chip_info = {
+ .num_chips = 7,
+ .reg_offset = 0x100,
+ .version = ID_JZ4775,
+ .groups = jz4775_groups,
+ .num_groups = ARRAY_SIZE(jz4775_groups),
+ .functions = jz4775_functions,
+ .num_functions = ARRAY_SIZE(jz4775_functions),
+ .pull_ups = jz4775_pull_ups,
+ .pull_downs = jz4775_pull_downs,
+};
+
static const u32 jz4780_pull_ups[6] = {
0x3fffffff, 0xfff0f3fc, 0x0fffffff, 0xffff4fff, 0xfffffb7c, 0x7fa7f00f,
};
@@ -927,6 +1600,7 @@ static int jz4780_i2s_data_rx_pins[] = { 0x86, };
static int jz4780_i2s_clk_txrx_pins[] = { 0x6c, 0x6d, };
static int jz4780_i2s_clk_rx_pins[] = { 0x88, 0x89, };
static int jz4780_i2s_sysclk_pins[] = { 0x85, };
+static int jz4780_dmic_pins[] = { 0x32, 0x33, };
static int jz4780_hdmi_ddc_pins[] = { 0xb9, 0xb8, };
static u8 jz4780_i2s_clk_txrx_funcs[] = { 1, 0, };
@@ -1025,11 +1699,16 @@ static const struct group_desc jz4780_groups[] = {
jz4780_i2s_clk_txrx_funcs),
INGENIC_PIN_GROUP("i2s-clk-rx", jz4780_i2s_clk_rx, 1),
INGENIC_PIN_GROUP("i2s-sysclk", jz4780_i2s_sysclk, 2),
+ INGENIC_PIN_GROUP("dmic", jz4780_dmic, 1),
INGENIC_PIN_GROUP("hdmi-ddc", jz4780_hdmi_ddc, 0),
INGENIC_PIN_GROUP("cim-data", jz4770_cim_8bit, 0),
INGENIC_PIN_GROUP("cim-data-12bit", jz4770_cim_12bit, 0),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4770_lcd_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4770_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4770_lcd_18bit, 0),
INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit, 0),
- { "lcd-no-pins", },
+ INGENIC_PIN_GROUP("lcd-special", jz4770_lcd_special, 1),
+ INGENIC_PIN_GROUP("lcd-generic", jz4770_lcd_generic, 0),
INGENIC_PIN_GROUP("pwm0", jz4770_pwm_pwm0, 0),
INGENIC_PIN_GROUP("pwm1", jz4770_pwm_pwm1, 0),
INGENIC_PIN_GROUP("pwm2", jz4770_pwm_pwm2, 0),
@@ -1077,6 +1756,7 @@ static const char *jz4780_i2c4_groups[] = { "i2c4-data-e", "i2c4-data-f", };
static const char *jz4780_i2s_groups[] = {
"i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-clk-rx", "i2s-sysclk",
};
+static const char *jz4780_dmic_groups[] = { "dmic", };
static const char *jz4780_cim_groups[] = { "cim-data", };
static const char *jz4780_hdmi_ddc_groups[] = { "hdmi-ddc", };
@@ -1104,6 +1784,7 @@ static const struct function_desc jz4780_functions[] = {
{ "i2c3", jz4780_i2c3_groups, ARRAY_SIZE(jz4780_i2c3_groups), },
{ "i2c4", jz4780_i2c4_groups, ARRAY_SIZE(jz4780_i2c4_groups), },
{ "i2s", jz4780_i2s_groups, ARRAY_SIZE(jz4780_i2s_groups), },
+ { "dmic", jz4780_dmic_groups, ARRAY_SIZE(jz4780_dmic_groups), },
{ "cim", jz4780_cim_groups, ARRAY_SIZE(jz4780_cim_groups), },
{ "lcd", jz4770_lcd_groups, ARRAY_SIZE(jz4770_lcd_groups), },
{ "pwm0", jz4770_pwm0_groups, ARRAY_SIZE(jz4770_pwm0_groups), },
@@ -1189,6 +1870,8 @@ static int x1000_i2s_data_tx_pins[] = { 0x24, };
static int x1000_i2s_data_rx_pins[] = { 0x23, };
static int x1000_i2s_clk_txrx_pins[] = { 0x21, 0x22, };
static int x1000_i2s_sysclk_pins[] = { 0x20, };
+static int x1000_dmic0_pins[] = { 0x35, 0x36, };
+static int x1000_dmic1_pins[] = { 0x25, };
static int x1000_cim_pins[] = {
0x08, 0x09, 0x0a, 0x0b,
0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0c,
@@ -1254,10 +1937,11 @@ static const struct group_desc x1000_groups[] = {
INGENIC_PIN_GROUP("i2s-data-rx", x1000_i2s_data_rx, 1),
INGENIC_PIN_GROUP("i2s-clk-txrx", x1000_i2s_clk_txrx, 1),
INGENIC_PIN_GROUP("i2s-sysclk", x1000_i2s_sysclk, 1),
+ INGENIC_PIN_GROUP("dmic0", x1000_dmic0, 0),
+ INGENIC_PIN_GROUP("dmic1", x1000_dmic1, 1),
INGENIC_PIN_GROUP("cim-data", x1000_cim, 2),
INGENIC_PIN_GROUP("lcd-8bit", x1000_lcd_8bit, 1),
INGENIC_PIN_GROUP("lcd-16bit", x1000_lcd_16bit, 1),
- { "lcd-no-pins", },
INGENIC_PIN_GROUP("pwm0", x1000_pwm_pwm0, 0),
INGENIC_PIN_GROUP("pwm1", x1000_pwm_pwm1, 1),
INGENIC_PIN_GROUP("pwm2", x1000_pwm_pwm2, 1),
@@ -1298,10 +1982,9 @@ static const char *x1000_i2c2_groups[] = { "i2c2-data", };
static const char *x1000_i2s_groups[] = {
"i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-sysclk",
};
+static const char *x1000_dmic_groups[] = { "dmic0", "dmic1", };
static const char *x1000_cim_groups[] = { "cim-data", };
-static const char *x1000_lcd_groups[] = {
- "lcd-8bit", "lcd-16bit", "lcd-no-pins",
-};
+static const char *x1000_lcd_groups[] = { "lcd-8bit", "lcd-16bit", };
static const char *x1000_pwm0_groups[] = { "pwm0", };
static const char *x1000_pwm1_groups[] = { "pwm1", };
static const char *x1000_pwm2_groups[] = { "pwm2", };
@@ -1324,6 +2007,7 @@ static const struct function_desc x1000_functions[] = {
{ "i2c1", x1000_i2c1_groups, ARRAY_SIZE(x1000_i2c1_groups), },
{ "i2c2", x1000_i2c2_groups, ARRAY_SIZE(x1000_i2c2_groups), },
{ "i2s", x1000_i2s_groups, ARRAY_SIZE(x1000_i2s_groups), },
+ { "dmic", x1000_dmic_groups, ARRAY_SIZE(x1000_dmic_groups), },
{ "cim", x1000_cim_groups, ARRAY_SIZE(x1000_cim_groups), },
{ "lcd", x1000_lcd_groups, ARRAY_SIZE(x1000_lcd_groups), },
{ "pwm0", x1000_pwm0_groups, ARRAY_SIZE(x1000_pwm0_groups), },
@@ -1363,6 +2047,8 @@ static int x1500_i2s_data_tx_pins[] = { 0x24, };
static int x1500_i2s_data_rx_pins[] = { 0x23, };
static int x1500_i2s_clk_txrx_pins[] = { 0x21, 0x22, };
static int x1500_i2s_sysclk_pins[] = { 0x20, };
+static int x1500_dmic0_pins[] = { 0x35, 0x36, };
+static int x1500_dmic1_pins[] = { 0x25, };
static int x1500_cim_pins[] = {
0x08, 0x09, 0x0a, 0x0b,
0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0c,
@@ -1392,8 +2078,9 @@ static const struct group_desc x1500_groups[] = {
INGENIC_PIN_GROUP("i2s-data-rx", x1500_i2s_data_rx, 1),
INGENIC_PIN_GROUP("i2s-clk-txrx", x1500_i2s_clk_txrx, 1),
INGENIC_PIN_GROUP("i2s-sysclk", x1500_i2s_sysclk, 1),
+ INGENIC_PIN_GROUP("dmic0", x1500_dmic0, 0),
+ INGENIC_PIN_GROUP("dmic1", x1500_dmic1, 1),
INGENIC_PIN_GROUP("cim-data", x1500_cim, 2),
- { "lcd-no-pins", },
INGENIC_PIN_GROUP("pwm0", x1500_pwm_pwm0, 0),
INGENIC_PIN_GROUP("pwm1", x1500_pwm_pwm1, 1),
INGENIC_PIN_GROUP("pwm2", x1500_pwm_pwm2, 1),
@@ -1413,8 +2100,8 @@ static const char *x1500_i2c2_groups[] = { "i2c2-data", };
static const char *x1500_i2s_groups[] = {
"i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-sysclk",
};
+static const char *x1500_dmic_groups[] = { "dmic0", "dmic1", };
static const char *x1500_cim_groups[] = { "cim-data", };
-static const char *x1500_lcd_groups[] = { "lcd-no-pins", };
static const char *x1500_pwm0_groups[] = { "pwm0", };
static const char *x1500_pwm1_groups[] = { "pwm1", };
static const char *x1500_pwm2_groups[] = { "pwm2", };
@@ -1431,8 +2118,8 @@ static const struct function_desc x1500_functions[] = {
{ "i2c1", x1500_i2c1_groups, ARRAY_SIZE(x1500_i2c1_groups), },
{ "i2c2", x1500_i2c2_groups, ARRAY_SIZE(x1500_i2c2_groups), },
{ "i2s", x1500_i2s_groups, ARRAY_SIZE(x1500_i2s_groups), },
+ { "dmic", x1500_dmic_groups, ARRAY_SIZE(x1500_dmic_groups), },
{ "cim", x1500_cim_groups, ARRAY_SIZE(x1500_cim_groups), },
- { "lcd", x1500_lcd_groups, ARRAY_SIZE(x1500_lcd_groups), },
{ "pwm0", x1500_pwm0_groups, ARRAY_SIZE(x1500_pwm0_groups), },
{ "pwm1", x1500_pwm1_groups, ARRAY_SIZE(x1500_pwm1_groups), },
{ "pwm2", x1500_pwm2_groups, ARRAY_SIZE(x1500_pwm2_groups), },
@@ -1471,16 +2158,16 @@ static int x1830_ssi0_gpc_pins[] = { 0x4d, };
static int x1830_ssi0_ce0_pins[] = { 0x50, };
static int x1830_ssi0_ce1_pins[] = { 0x4e, };
static int x1830_ssi1_dt_c_pins[] = { 0x53, };
-static int x1830_ssi1_dr_c_pins[] = { 0x54, };
-static int x1830_ssi1_clk_c_pins[] = { 0x57, };
-static int x1830_ssi1_gpc_c_pins[] = { 0x55, };
-static int x1830_ssi1_ce0_c_pins[] = { 0x58, };
-static int x1830_ssi1_ce1_c_pins[] = { 0x56, };
static int x1830_ssi1_dt_d_pins[] = { 0x62, };
+static int x1830_ssi1_dr_c_pins[] = { 0x54, };
static int x1830_ssi1_dr_d_pins[] = { 0x63, };
+static int x1830_ssi1_clk_c_pins[] = { 0x57, };
static int x1830_ssi1_clk_d_pins[] = { 0x66, };
+static int x1830_ssi1_gpc_c_pins[] = { 0x55, };
static int x1830_ssi1_gpc_d_pins[] = { 0x64, };
+static int x1830_ssi1_ce0_c_pins[] = { 0x58, };
static int x1830_ssi1_ce0_d_pins[] = { 0x67, };
+static int x1830_ssi1_ce1_c_pins[] = { 0x56, };
static int x1830_ssi1_ce1_d_pins[] = { 0x65, };
static int x1830_mmc0_1bit_pins[] = { 0x24, 0x25, 0x20, };
static int x1830_mmc0_4bit_pins[] = { 0x21, 0x22, 0x23, };
@@ -1494,11 +2181,15 @@ static int x1830_i2s_data_rx_pins[] = { 0x54, };
static int x1830_i2s_clk_txrx_pins[] = { 0x58, 0x52, };
static int x1830_i2s_clk_rx_pins[] = { 0x56, 0x55, };
static int x1830_i2s_sysclk_pins[] = { 0x57, };
-static int x1830_lcd_rgb_18bit_pins[] = {
+static int x1830_dmic0_pins[] = { 0x48, 0x59, };
+static int x1830_dmic1_pins[] = { 0x5a, };
+static int x1830_lcd_tft_8bit_pins[] = {
0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
- 0x68, 0x69, 0x6c, 0x6d, 0x6e, 0x6f,
- 0x70, 0x71, 0x72, 0x73, 0x76, 0x77,
- 0x78, 0x79, 0x7a, 0x7b,
+ 0x68, 0x73, 0x72, 0x69,
+};
+static int x1830_lcd_tft_24bit_pins[] = {
+ 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71,
+ 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b,
};
static int x1830_lcd_slcd_8bit_pins[] = {
0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x6c, 0x6d,
@@ -1562,10 +2253,12 @@ static const struct group_desc x1830_groups[] = {
INGENIC_PIN_GROUP("i2s-clk-txrx", x1830_i2s_clk_txrx, 0),
INGENIC_PIN_GROUP("i2s-clk-rx", x1830_i2s_clk_rx, 0),
INGENIC_PIN_GROUP("i2s-sysclk", x1830_i2s_sysclk, 0),
- INGENIC_PIN_GROUP("lcd-rgb-18bit", x1830_lcd_rgb_18bit, 0),
+ INGENIC_PIN_GROUP("dmic0", x1830_dmic0, 2),
+ INGENIC_PIN_GROUP("dmic1", x1830_dmic1, 2),
+ INGENIC_PIN_GROUP("lcd-tft-8bit", x1830_lcd_tft_8bit, 0),
+ INGENIC_PIN_GROUP("lcd-tft-24bit", x1830_lcd_tft_24bit, 0),
INGENIC_PIN_GROUP("lcd-slcd-8bit", x1830_lcd_slcd_8bit, 1),
INGENIC_PIN_GROUP("lcd-slcd-16bit", x1830_lcd_slcd_16bit, 1),
- { "lcd-no-pins", },
INGENIC_PIN_GROUP("pwm0-b", x1830_pwm_pwm0_b, 0),
INGENIC_PIN_GROUP("pwm0-c", x1830_pwm_pwm0_c, 1),
INGENIC_PIN_GROUP("pwm1-b", x1830_pwm_pwm1_b, 0),
@@ -1607,8 +2300,9 @@ static const char *x1830_i2c2_groups[] = { "i2c2-data", };
static const char *x1830_i2s_groups[] = {
"i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-clk-rx", "i2s-sysclk",
};
+static const char *x1830_dmic_groups[] = { "dmic0", "dmic1", };
static const char *x1830_lcd_groups[] = {
- "lcd-rgb-18bit", "lcd-slcd-8bit", "lcd-slcd-16bit", "lcd-no-pins",
+ "lcd-tft-8bit", "lcd-tft-24bit", "lcd-slcd-8bit", "lcd-slcd-16bit",
};
static const char *x1830_pwm0_groups[] = { "pwm0-b", "pwm0-c", };
static const char *x1830_pwm1_groups[] = { "pwm1-b", "pwm1-c", };
@@ -1632,6 +2326,7 @@ static const struct function_desc x1830_functions[] = {
{ "i2c1", x1830_i2c1_groups, ARRAY_SIZE(x1830_i2c1_groups), },
{ "i2c2", x1830_i2c2_groups, ARRAY_SIZE(x1830_i2c2_groups), },
{ "i2s", x1830_i2s_groups, ARRAY_SIZE(x1830_i2s_groups), },
+ { "dmic", x1830_dmic_groups, ARRAY_SIZE(x1830_dmic_groups), },
{ "lcd", x1830_lcd_groups, ARRAY_SIZE(x1830_lcd_groups), },
{ "pwm0", x1830_pwm0_groups, ARRAY_SIZE(x1830_pwm0_groups), },
{ "pwm1", x1830_pwm1_groups, ARRAY_SIZE(x1830_pwm1_groups), },
@@ -1656,6 +2351,456 @@ static const struct ingenic_chip_info x1830_chip_info = {
.pull_downs = x1830_pull_downs,
};
+static const u32 x2000_pull_ups[5] = {
+ 0x0003ffff, 0xffffffff, 0x1ff0ffff, 0xc7fe3f3f, 0x8fff003f,
+};
+
+static const u32 x2000_pull_downs[5] = {
+ 0x0003ffff, 0xffffffff, 0x1ff0ffff, 0x00000000, 0x8fff003f,
+};
+
+static int x2000_uart0_data_pins[] = { 0x77, 0x78, };
+static int x2000_uart0_hwflow_pins[] = { 0x79, 0x7a, };
+static int x2000_uart1_data_pins[] = { 0x57, 0x58, };
+static int x2000_uart1_hwflow_pins[] = { 0x55, 0x56, };
+static int x2000_uart2_data_pins[] = { 0x7e, 0x7f, };
+static int x2000_uart3_data_c_pins[] = { 0x59, 0x5a, };
+static int x2000_uart3_data_d_pins[] = { 0x62, 0x63, };
+static int x2000_uart3_hwflow_c_pins[] = { 0x5b, 0x5c, };
+static int x2000_uart3_hwflow_d_pins[] = { 0x60, 0x61, };
+static int x2000_uart4_data_a_pins[] = { 0x02, 0x03, };
+static int x2000_uart4_data_c_pins[] = { 0x4b, 0x4c, };
+static int x2000_uart4_hwflow_a_pins[] = { 0x00, 0x01, };
+static int x2000_uart4_hwflow_c_pins[] = { 0x49, 0x4a, };
+static int x2000_uart5_data_a_pins[] = { 0x04, 0x05, };
+static int x2000_uart5_data_c_pins[] = { 0x45, 0x46, };
+static int x2000_uart6_data_a_pins[] = { 0x06, 0x07, };
+static int x2000_uart6_data_c_pins[] = { 0x47, 0x48, };
+static int x2000_uart7_data_a_pins[] = { 0x08, 0x09, };
+static int x2000_uart7_data_c_pins[] = { 0x41, 0x42, };
+static int x2000_uart8_data_pins[] = { 0x3c, 0x3d, };
+static int x2000_uart9_data_pins[] = { 0x3e, 0x3f, };
+static int x2000_sfc0_d_pins[] = { 0x73, 0x74, 0x75, 0x76, 0x71, 0x72, };
+static int x2000_sfc0_e_pins[] = { 0x92, 0x93, 0x94, 0x95, 0x90, 0x91, };
+static int x2000_sfc1_pins[] = { 0x77, 0x78, 0x79, 0x7a, };
+static int x2000_ssi0_dt_b_pins[] = { 0x3e, };
+static int x2000_ssi0_dt_d_pins[] = { 0x69, };
+static int x2000_ssi0_dr_b_pins[] = { 0x3d, };
+static int x2000_ssi0_dr_d_pins[] = { 0x6a, };
+static int x2000_ssi0_clk_b_pins[] = { 0x3f, };
+static int x2000_ssi0_clk_d_pins[] = { 0x68, };
+static int x2000_ssi0_ce0_b_pins[] = { 0x3c, };
+static int x2000_ssi0_ce0_d_pins[] = { 0x6d, };
+static int x2000_ssi1_dt_c_pins[] = { 0x4b, };
+static int x2000_ssi1_dt_d_pins[] = { 0x72, };
+static int x2000_ssi1_dt_e_pins[] = { 0x91, };
+static int x2000_ssi1_dr_c_pins[] = { 0x4a, };
+static int x2000_ssi1_dr_d_pins[] = { 0x73, };
+static int x2000_ssi1_dr_e_pins[] = { 0x92, };
+static int x2000_ssi1_clk_c_pins[] = { 0x4c, };
+static int x2000_ssi1_clk_d_pins[] = { 0x71, };
+static int x2000_ssi1_clk_e_pins[] = { 0x90, };
+static int x2000_ssi1_ce0_c_pins[] = { 0x49, };
+static int x2000_ssi1_ce0_d_pins[] = { 0x76, };
+static int x2000_ssi1_ce0_e_pins[] = { 0x95, };
+static int x2000_mmc0_1bit_pins[] = { 0x71, 0x72, 0x73, };
+static int x2000_mmc0_4bit_pins[] = { 0x74, 0x75, 0x75, };
+static int x2000_mmc0_8bit_pins[] = { 0x77, 0x78, 0x79, 0x7a, };
+static int x2000_mmc1_1bit_pins[] = { 0x68, 0x69, 0x6a, };
+static int x2000_mmc1_4bit_pins[] = { 0x6b, 0x6c, 0x6d, };
+static int x2000_mmc2_1bit_pins[] = { 0x80, 0x81, 0x82, };
+static int x2000_mmc2_4bit_pins[] = { 0x83, 0x84, 0x85, };
+static int x2000_emc_8bit_data_pins[] = {
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+};
+static int x2000_emc_16bit_data_pins[] = {
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+};
+static int x2000_emc_addr_pins[] = {
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c,
+};
+static int x2000_emc_rd_we_pins[] = { 0x2d, 0x2e, };
+static int x2000_emc_wait_pins[] = { 0x2f, };
+static int x2000_emc_cs1_pins[] = { 0x57, };
+static int x2000_emc_cs2_pins[] = { 0x58, };
+static int x2000_i2c0_pins[] = { 0x4e, 0x4d, };
+static int x2000_i2c1_c_pins[] = { 0x58, 0x57, };
+static int x2000_i2c1_d_pins[] = { 0x6c, 0x6b, };
+static int x2000_i2c2_b_pins[] = { 0x37, 0x36, };
+static int x2000_i2c2_d_pins[] = { 0x75, 0x74, };
+static int x2000_i2c2_e_pins[] = { 0x94, 0x93, };
+static int x2000_i2c3_a_pins[] = { 0x11, 0x10, };
+static int x2000_i2c3_d_pins[] = { 0x7f, 0x7e, };
+static int x2000_i2c4_c_pins[] = { 0x5a, 0x59, };
+static int x2000_i2c4_d_pins[] = { 0x61, 0x60, };
+static int x2000_i2c5_c_pins[] = { 0x5c, 0x5b, };
+static int x2000_i2c5_d_pins[] = { 0x65, 0x64, };
+static int x2000_i2s1_data_tx_pins[] = { 0x47, };
+static int x2000_i2s1_data_rx_pins[] = { 0x44, };
+static int x2000_i2s1_clk_tx_pins[] = { 0x45, 0x46, };
+static int x2000_i2s1_clk_rx_pins[] = { 0x42, 0x43, };
+static int x2000_i2s1_sysclk_tx_pins[] = { 0x48, };
+static int x2000_i2s1_sysclk_rx_pins[] = { 0x41, };
+static int x2000_i2s2_data_rx0_pins[] = { 0x0a, };
+static int x2000_i2s2_data_rx1_pins[] = { 0x0b, };
+static int x2000_i2s2_data_rx2_pins[] = { 0x0c, };
+static int x2000_i2s2_data_rx3_pins[] = { 0x0d, };
+static int x2000_i2s2_clk_rx_pins[] = { 0x11, 0x09, };
+static int x2000_i2s2_sysclk_rx_pins[] = { 0x07, };
+static int x2000_i2s3_data_tx0_pins[] = { 0x03, };
+static int x2000_i2s3_data_tx1_pins[] = { 0x04, };
+static int x2000_i2s3_data_tx2_pins[] = { 0x05, };
+static int x2000_i2s3_data_tx3_pins[] = { 0x06, };
+static int x2000_i2s3_clk_tx_pins[] = { 0x10, 0x02, };
+static int x2000_i2s3_sysclk_tx_pins[] = { 0x00, };
+static int x2000_dmic0_pins[] = { 0x54, 0x55, };
+static int x2000_dmic1_pins[] = { 0x56, };
+static int x2000_dmic2_pins[] = { 0x57, };
+static int x2000_dmic3_pins[] = { 0x58, };
+static int x2000_cim_8bit_pins[] = {
+ 0x0e, 0x0c, 0x0d, 0x4f,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+};
+static int x2000_cim_12bit_pins[] = { 0x08, 0x09, 0x0a, 0x0b, };
+static int x2000_lcd_tft_8bit_pins[] = {
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x38, 0x3a, 0x39, 0x3b,
+};
+static int x2000_lcd_tft_16bit_pins[] = {
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+};
+static int x2000_lcd_tft_18bit_pins[] = {
+ 0x30, 0x31,
+};
+static int x2000_lcd_tft_24bit_pins[] = {
+ 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+};
+static int x2000_lcd_slcd_8bit_pins[] = {
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x3a, 0x38, 0x3b, 0x30, 0x39,
+};
+static int x2000_pwm_pwm0_c_pins[] = { 0x40, };
+static int x2000_pwm_pwm0_d_pins[] = { 0x7e, };
+static int x2000_pwm_pwm1_c_pins[] = { 0x41, };
+static int x2000_pwm_pwm1_d_pins[] = { 0x7f, };
+static int x2000_pwm_pwm2_c_pins[] = { 0x42, };
+static int x2000_pwm_pwm2_e_pins[] = { 0x80, };
+static int x2000_pwm_pwm3_c_pins[] = { 0x43, };
+static int x2000_pwm_pwm3_e_pins[] = { 0x81, };
+static int x2000_pwm_pwm4_c_pins[] = { 0x44, };
+static int x2000_pwm_pwm4_e_pins[] = { 0x82, };
+static int x2000_pwm_pwm5_c_pins[] = { 0x45, };
+static int x2000_pwm_pwm5_e_pins[] = { 0x83, };
+static int x2000_pwm_pwm6_c_pins[] = { 0x46, };
+static int x2000_pwm_pwm6_e_pins[] = { 0x84, };
+static int x2000_pwm_pwm7_c_pins[] = { 0x47, };
+static int x2000_pwm_pwm7_e_pins[] = { 0x85, };
+static int x2000_pwm_pwm8_pins[] = { 0x48, };
+static int x2000_pwm_pwm9_pins[] = { 0x49, };
+static int x2000_pwm_pwm10_pins[] = { 0x4a, };
+static int x2000_pwm_pwm11_pins[] = { 0x4b, };
+static int x2000_pwm_pwm12_pins[] = { 0x4c, };
+static int x2000_pwm_pwm13_pins[] = { 0x4d, };
+static int x2000_pwm_pwm14_pins[] = { 0x4e, };
+static int x2000_pwm_pwm15_pins[] = { 0x4f, };
+static int x2000_mac0_rmii_pins[] = {
+ 0x4b, 0x47, 0x46, 0x4a, 0x43, 0x42, 0x4c, 0x4d, 0x4e, 0x41,
+};
+static int x2000_mac0_rgmii_pins[] = {
+ 0x4b, 0x49, 0x48, 0x47, 0x46, 0x4a, 0x45, 0x44, 0x43, 0x42,
+ 0x4c, 0x4d, 0x4f, 0x4e, 0x41,
+};
+static int x2000_mac1_rmii_pins[] = {
+ 0x32, 0x2d, 0x2c, 0x31, 0x29, 0x28, 0x33, 0x34, 0x35, 0x37,
+};
+static int x2000_mac1_rgmii_pins[] = {
+ 0x32, 0x2f, 0x2e, 0x2d, 0x2c, 0x31, 0x2b, 0x2a, 0x29, 0x28,
+ 0x33, 0x34, 0x36, 0x35, 0x37,
+};
+static int x2000_otg_pins[] = { 0x96, };
+
+static u8 x2000_cim_8bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, };
+
+static const struct group_desc x2000_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", x2000_uart0_data, 2),
+ INGENIC_PIN_GROUP("uart0-hwflow", x2000_uart0_hwflow, 2),
+ INGENIC_PIN_GROUP("uart1-data", x2000_uart1_data, 1),
+ INGENIC_PIN_GROUP("uart1-hwflow", x2000_uart1_hwflow, 1),
+ INGENIC_PIN_GROUP("uart2-data", x2000_uart2_data, 0),
+ INGENIC_PIN_GROUP("uart3-data-c", x2000_uart3_data_c, 0),
+ INGENIC_PIN_GROUP("uart3-data-d", x2000_uart3_data_d, 1),
+ INGENIC_PIN_GROUP("uart3-hwflow-c", x2000_uart3_hwflow_c, 0),
+ INGENIC_PIN_GROUP("uart3-hwflow-d", x2000_uart3_hwflow_d, 1),
+ INGENIC_PIN_GROUP("uart4-data-a", x2000_uart4_data_a, 1),
+ INGENIC_PIN_GROUP("uart4-data-c", x2000_uart4_data_c, 3),
+ INGENIC_PIN_GROUP("uart4-hwflow-a", x2000_uart4_hwflow_a, 1),
+ INGENIC_PIN_GROUP("uart4-hwflow-c", x2000_uart4_hwflow_c, 3),
+ INGENIC_PIN_GROUP("uart5-data-a", x2000_uart5_data_a, 1),
+ INGENIC_PIN_GROUP("uart5-data-c", x2000_uart5_data_c, 3),
+ INGENIC_PIN_GROUP("uart6-data-a", x2000_uart6_data_a, 1),
+ INGENIC_PIN_GROUP("uart6-data-c", x2000_uart6_data_c, 3),
+ INGENIC_PIN_GROUP("uart7-data-a", x2000_uart7_data_a, 1),
+ INGENIC_PIN_GROUP("uart7-data-c", x2000_uart7_data_c, 3),
+ INGENIC_PIN_GROUP("uart8-data", x2000_uart8_data, 3),
+ INGENIC_PIN_GROUP("uart9-data", x2000_uart9_data, 3),
+ INGENIC_PIN_GROUP("sfc0-d", x2000_sfc0_d, 1),
+ INGENIC_PIN_GROUP("sfc0-e", x2000_sfc0_e, 0),
+ INGENIC_PIN_GROUP("sfc1", x2000_sfc1, 1),
+ INGENIC_PIN_GROUP("ssi0-dt-b", x2000_ssi0_dt_b, 1),
+ INGENIC_PIN_GROUP("ssi0-dt-d", x2000_ssi0_dt_d, 1),
+ INGENIC_PIN_GROUP("ssi0-dr-b", x2000_ssi0_dr_b, 1),
+ INGENIC_PIN_GROUP("ssi0-dr-d", x2000_ssi0_dr_d, 1),
+ INGENIC_PIN_GROUP("ssi0-clk-b", x2000_ssi0_clk_b, 1),
+ INGENIC_PIN_GROUP("ssi0-clk-d", x2000_ssi0_clk_d, 1),
+ INGENIC_PIN_GROUP("ssi0-ce0-b", x2000_ssi0_ce0_b, 1),
+ INGENIC_PIN_GROUP("ssi0-ce0-d", x2000_ssi0_ce0_d, 1),
+ INGENIC_PIN_GROUP("ssi1-dt-c", x2000_ssi1_dt_c, 2),
+ INGENIC_PIN_GROUP("ssi1-dt-d", x2000_ssi1_dt_d, 2),
+ INGENIC_PIN_GROUP("ssi1-dt-e", x2000_ssi1_dt_e, 1),
+ INGENIC_PIN_GROUP("ssi1-dr-c", x2000_ssi1_dr_c, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-d", x2000_ssi1_dr_d, 2),
+ INGENIC_PIN_GROUP("ssi1-dr-e", x2000_ssi1_dr_e, 1),
+ INGENIC_PIN_GROUP("ssi1-clk-c", x2000_ssi1_clk_c, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-d", x2000_ssi1_clk_d, 2),
+ INGENIC_PIN_GROUP("ssi1-clk-e", x2000_ssi1_clk_e, 1),
+ INGENIC_PIN_GROUP("ssi1-ce0-c", x2000_ssi1_ce0_c, 2),
+ INGENIC_PIN_GROUP("ssi1-ce0-d", x2000_ssi1_ce0_d, 2),
+ INGENIC_PIN_GROUP("ssi1-ce0-e", x2000_ssi1_ce0_e, 1),
+ INGENIC_PIN_GROUP("mmc0-1bit", x2000_mmc0_1bit, 0),
+ INGENIC_PIN_GROUP("mmc0-4bit", x2000_mmc0_4bit, 0),
+ INGENIC_PIN_GROUP("mmc0-8bit", x2000_mmc0_8bit, 0),
+ INGENIC_PIN_GROUP("mmc1-1bit", x2000_mmc1_1bit, 0),
+ INGENIC_PIN_GROUP("mmc1-4bit", x2000_mmc1_4bit, 0),
+ INGENIC_PIN_GROUP("mmc2-1bit", x2000_mmc2_1bit, 0),
+ INGENIC_PIN_GROUP("mmc2-4bit", x2000_mmc2_4bit, 0),
+ INGENIC_PIN_GROUP("emc-8bit-data", x2000_emc_8bit_data, 0),
+ INGENIC_PIN_GROUP("emc-16bit-data", x2000_emc_16bit_data, 0),
+ INGENIC_PIN_GROUP("emc-addr", x2000_emc_addr, 0),
+ INGENIC_PIN_GROUP("emc-rd-we", x2000_emc_rd_we, 0),
+ INGENIC_PIN_GROUP("emc-wait", x2000_emc_wait, 0),
+ INGENIC_PIN_GROUP("emc-cs1", x2000_emc_cs1, 3),
+ INGENIC_PIN_GROUP("emc-cs2", x2000_emc_cs2, 3),
+ INGENIC_PIN_GROUP("i2c0-data", x2000_i2c0, 3),
+ INGENIC_PIN_GROUP("i2c1-data-c", x2000_i2c1_c, 2),
+ INGENIC_PIN_GROUP("i2c1-data-d", x2000_i2c1_d, 1),
+ INGENIC_PIN_GROUP("i2c2-data-b", x2000_i2c2_b, 2),
+ INGENIC_PIN_GROUP("i2c2-data-d", x2000_i2c2_d, 2),
+ INGENIC_PIN_GROUP("i2c2-data-e", x2000_i2c2_e, 1),
+ INGENIC_PIN_GROUP("i2c3-data-a", x2000_i2c3_a, 0),
+ INGENIC_PIN_GROUP("i2c3-data-d", x2000_i2c3_d, 1),
+ INGENIC_PIN_GROUP("i2c4-data-c", x2000_i2c4_c, 1),
+ INGENIC_PIN_GROUP("i2c4-data-d", x2000_i2c4_d, 2),
+ INGENIC_PIN_GROUP("i2c5-data-c", x2000_i2c5_c, 1),
+ INGENIC_PIN_GROUP("i2c5-data-d", x2000_i2c5_d, 1),
+ INGENIC_PIN_GROUP("i2s1-data-tx", x2000_i2s1_data_tx, 2),
+ INGENIC_PIN_GROUP("i2s1-data-rx", x2000_i2s1_data_rx, 2),
+ INGENIC_PIN_GROUP("i2s1-clk-tx", x2000_i2s1_clk_tx, 2),
+ INGENIC_PIN_GROUP("i2s1-clk-rx", x2000_i2s1_clk_rx, 2),
+ INGENIC_PIN_GROUP("i2s1-sysclk-tx", x2000_i2s1_sysclk_tx, 2),
+ INGENIC_PIN_GROUP("i2s1-sysclk-rx", x2000_i2s1_sysclk_rx, 2),
+ INGENIC_PIN_GROUP("i2s2-data-rx0", x2000_i2s2_data_rx0, 2),
+ INGENIC_PIN_GROUP("i2s2-data-rx1", x2000_i2s2_data_rx1, 2),
+ INGENIC_PIN_GROUP("i2s2-data-rx2", x2000_i2s2_data_rx2, 2),
+ INGENIC_PIN_GROUP("i2s2-data-rx3", x2000_i2s2_data_rx3, 2),
+ INGENIC_PIN_GROUP("i2s2-clk-rx", x2000_i2s2_clk_rx, 2),
+ INGENIC_PIN_GROUP("i2s2-sysclk-rx", x2000_i2s2_sysclk_rx, 2),
+ INGENIC_PIN_GROUP("i2s3-data-tx0", x2000_i2s3_data_tx0, 2),
+ INGENIC_PIN_GROUP("i2s3-data-tx1", x2000_i2s3_data_tx1, 2),
+ INGENIC_PIN_GROUP("i2s3-data-tx2", x2000_i2s3_data_tx2, 2),
+ INGENIC_PIN_GROUP("i2s3-data-tx3", x2000_i2s3_data_tx3, 2),
+ INGENIC_PIN_GROUP("i2s3-clk-tx", x2000_i2s3_clk_tx, 2),
+ INGENIC_PIN_GROUP("i2s3-sysclk-tx", x2000_i2s3_sysclk_tx, 2),
+ INGENIC_PIN_GROUP("dmic0", x2000_dmic0, 0),
+ INGENIC_PIN_GROUP("dmic1", x2000_dmic1, 0),
+ INGENIC_PIN_GROUP("dmic2", x2000_dmic2, 0),
+ INGENIC_PIN_GROUP("dmic3", x2000_dmic3, 0),
+ INGENIC_PIN_GROUP_FUNCS("cim-data-8bit", x2000_cim_8bit,
+ x2000_cim_8bit_funcs),
+ INGENIC_PIN_GROUP("cim-data-12bit", x2000_cim_12bit, 0),
+ INGENIC_PIN_GROUP("lcd-tft-8bit", x2000_lcd_tft_8bit, 1),
+ INGENIC_PIN_GROUP("lcd-tft-16bit", x2000_lcd_tft_16bit, 1),
+ INGENIC_PIN_GROUP("lcd-tft-18bit", x2000_lcd_tft_18bit, 1),
+ INGENIC_PIN_GROUP("lcd-tft-24bit", x2000_lcd_tft_24bit, 1),
+ INGENIC_PIN_GROUP("lcd-slcd-8bit", x2000_lcd_slcd_8bit, 2),
+ INGENIC_PIN_GROUP("lcd-slcd-16bit", x2000_lcd_tft_16bit, 2),
+ INGENIC_PIN_GROUP("pwm0-c", x2000_pwm_pwm0_c, 0),
+ INGENIC_PIN_GROUP("pwm0-d", x2000_pwm_pwm0_d, 2),
+ INGENIC_PIN_GROUP("pwm1-c", x2000_pwm_pwm1_c, 0),
+ INGENIC_PIN_GROUP("pwm1-d", x2000_pwm_pwm1_d, 2),
+ INGENIC_PIN_GROUP("pwm2-c", x2000_pwm_pwm2_c, 0),
+ INGENIC_PIN_GROUP("pwm2-e", x2000_pwm_pwm2_e, 1),
+ INGENIC_PIN_GROUP("pwm3-c", x2000_pwm_pwm3_c, 0),
+ INGENIC_PIN_GROUP("pwm3-e", x2000_pwm_pwm3_e, 1),
+ INGENIC_PIN_GROUP("pwm4-c", x2000_pwm_pwm4_c, 0),
+ INGENIC_PIN_GROUP("pwm4-e", x2000_pwm_pwm4_e, 1),
+ INGENIC_PIN_GROUP("pwm5-c", x2000_pwm_pwm5_c, 0),
+ INGENIC_PIN_GROUP("pwm5-e", x2000_pwm_pwm5_e, 1),
+ INGENIC_PIN_GROUP("pwm6-c", x2000_pwm_pwm6_c, 0),
+ INGENIC_PIN_GROUP("pwm6-e", x2000_pwm_pwm6_e, 1),
+ INGENIC_PIN_GROUP("pwm7-c", x2000_pwm_pwm7_c, 0),
+ INGENIC_PIN_GROUP("pwm7-e", x2000_pwm_pwm7_e, 1),
+ INGENIC_PIN_GROUP("pwm8", x2000_pwm_pwm8, 0),
+ INGENIC_PIN_GROUP("pwm9", x2000_pwm_pwm9, 0),
+ INGENIC_PIN_GROUP("pwm10", x2000_pwm_pwm10, 0),
+ INGENIC_PIN_GROUP("pwm11", x2000_pwm_pwm11, 0),
+ INGENIC_PIN_GROUP("pwm12", x2000_pwm_pwm12, 0),
+ INGENIC_PIN_GROUP("pwm13", x2000_pwm_pwm13, 0),
+ INGENIC_PIN_GROUP("pwm14", x2000_pwm_pwm14, 0),
+ INGENIC_PIN_GROUP("pwm15", x2000_pwm_pwm15, 0),
+ INGENIC_PIN_GROUP("mac0-rmii", x2000_mac0_rmii, 1),
+ INGENIC_PIN_GROUP("mac0-rgmii", x2000_mac0_rgmii, 1),
+ INGENIC_PIN_GROUP("mac1-rmii", x2000_mac1_rmii, 3),
+ INGENIC_PIN_GROUP("mac1-rgmii", x2000_mac1_rgmii, 3),
+ INGENIC_PIN_GROUP("otg-vbus", x2000_otg, 0),
+};
+
+static const char *x2000_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char *x2000_uart1_groups[] = { "uart1-data", "uart1-hwflow", };
+static const char *x2000_uart2_groups[] = { "uart2-data", };
+static const char *x2000_uart3_groups[] = {
+ "uart3-data-c", "uart3-data-d", "uart3-hwflow-c", "uart3-hwflow-d",
+};
+static const char *x2000_uart4_groups[] = {
+ "uart4-data-a", "uart4-data-c", "uart4-hwflow-a", "uart4-hwflow-c",
+};
+static const char *x2000_uart5_groups[] = { "uart5-data-a", "uart5-data-c", };
+static const char *x2000_uart6_groups[] = { "uart6-data-a", "uart6-data-c", };
+static const char *x2000_uart7_groups[] = { "uart7-data-a", "uart7-data-c", };
+static const char *x2000_uart8_groups[] = { "uart8-data", };
+static const char *x2000_uart9_groups[] = { "uart9-data", };
+static const char *x2000_sfc_groups[] = { "sfc0-d", "sfc0-e", "sfc1", };
+static const char *x2000_ssi0_groups[] = {
+ "ssi0-dt-b", "ssi0-dt-d",
+ "ssi0-dr-b", "ssi0-dr-d",
+ "ssi0-clk-b", "ssi0-clk-d",
+ "ssi0-ce0-b", "ssi0-ce0-d",
+};
+static const char *x2000_ssi1_groups[] = {
+ "ssi1-dt-c", "ssi1-dt-d", "ssi1-dt-e",
+ "ssi1-dr-c", "ssi1-dr-d", "ssi1-dr-e",
+ "ssi1-clk-c", "ssi1-clk-d", "ssi1-clk-e",
+ "ssi1-ce0-c", "ssi1-ce0-d", "ssi1-ce0-e",
+};
+static const char *x2000_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", "mmc0-8bit", };
+static const char *x2000_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
+static const char *x2000_mmc2_groups[] = { "mmc2-1bit", "mmc2-4bit", };
+static const char *x2000_emc_groups[] = {
+ "emc-8bit-data", "emc-16bit-data",
+ "emc-addr", "emc-rd-we", "emc-wait",
+};
+static const char *x2000_cs1_groups[] = { "emc-cs1", };
+static const char *x2000_cs2_groups[] = { "emc-cs2", };
+static const char *x2000_i2c0_groups[] = { "i2c0-data", };
+static const char *x2000_i2c1_groups[] = { "i2c1-data-c", "i2c1-data-d", };
+static const char *x2000_i2c2_groups[] = { "i2c2-data-b", "i2c2-data-d", };
+static const char *x2000_i2c3_groups[] = { "i2c3-data-a", "i2c3-data-d", };
+static const char *x2000_i2c4_groups[] = { "i2c4-data-c", "i2c4-data-d", };
+static const char *x2000_i2c5_groups[] = { "i2c5-data-c", "i2c5-data-d", };
+static const char *x2000_i2s1_groups[] = {
+ "i2s1-data-tx", "i2s1-data-rx",
+ "i2s1-clk-tx", "i2s1-clk-rx",
+ "i2s1-sysclk-tx", "i2s1-sysclk-rx",
+};
+static const char *x2000_i2s2_groups[] = {
+ "i2s2-data-rx0", "i2s2-data-rx1", "i2s2-data-rx2", "i2s2-data-rx3",
+ "i2s2-clk-rx", "i2s2-sysclk-rx",
+};
+static const char *x2000_i2s3_groups[] = {
+ "i2s3-data-tx0", "i2s3-data-tx1", "i2s3-data-tx2", "i2s3-data-tx3",
+ "i2s3-clk-tx", "i2s3-sysclk-tx",
+};
+static const char *x2000_dmic_groups[] = { "dmic0", "dmic1", "dmic2", "dmic3", };
+static const char *x2000_cim_groups[] = { "cim-data-8bit", "cim-data-12bit", };
+static const char *x2000_lcd_groups[] = {
+ "lcd-tft-8bit", "lcd-tft-16bit", "lcd-tft-18bit", "lcd-tft-24bit",
+ "lcd-slcd-8bit", "lcd-slcd-16bit",
+};
+static const char *x2000_pwm0_groups[] = { "pwm0-c", "pwm0-d", };
+static const char *x2000_pwm1_groups[] = { "pwm1-c", "pwm1-d", };
+static const char *x2000_pwm2_groups[] = { "pwm2-c", "pwm2-e", };
+static const char *x2000_pwm3_groups[] = { "pwm3-c", "pwm3-r", };
+static const char *x2000_pwm4_groups[] = { "pwm4-c", "pwm4-e", };
+static const char *x2000_pwm5_groups[] = { "pwm5-c", "pwm5-e", };
+static const char *x2000_pwm6_groups[] = { "pwm6-c", "pwm6-e", };
+static const char *x2000_pwm7_groups[] = { "pwm7-c", "pwm7-e", };
+static const char *x2000_pwm8_groups[] = { "pwm8", };
+static const char *x2000_pwm9_groups[] = { "pwm9", };
+static const char *x2000_pwm10_groups[] = { "pwm10", };
+static const char *x2000_pwm11_groups[] = { "pwm11", };
+static const char *x2000_pwm12_groups[] = { "pwm12", };
+static const char *x2000_pwm13_groups[] = { "pwm13", };
+static const char *x2000_pwm14_groups[] = { "pwm14", };
+static const char *x2000_pwm15_groups[] = { "pwm15", };
+static const char *x2000_mac0_groups[] = { "mac0-rmii", "mac0-rgmii", };
+static const char *x2000_mac1_groups[] = { "mac1-rmii", "mac1-rgmii", };
+static const char *x2000_otg_groups[] = { "otg-vbus", };
+
+static const struct function_desc x2000_functions[] = {
+ { "uart0", x2000_uart0_groups, ARRAY_SIZE(x2000_uart0_groups), },
+ { "uart1", x2000_uart1_groups, ARRAY_SIZE(x2000_uart1_groups), },
+ { "uart2", x2000_uart2_groups, ARRAY_SIZE(x2000_uart2_groups), },
+ { "uart3", x2000_uart3_groups, ARRAY_SIZE(x2000_uart3_groups), },
+ { "uart4", x2000_uart4_groups, ARRAY_SIZE(x2000_uart4_groups), },
+ { "uart5", x2000_uart5_groups, ARRAY_SIZE(x2000_uart5_groups), },
+ { "uart6", x2000_uart6_groups, ARRAY_SIZE(x2000_uart6_groups), },
+ { "uart7", x2000_uart7_groups, ARRAY_SIZE(x2000_uart7_groups), },
+ { "uart8", x2000_uart8_groups, ARRAY_SIZE(x2000_uart8_groups), },
+ { "uart9", x2000_uart9_groups, ARRAY_SIZE(x2000_uart9_groups), },
+ { "sfc", x2000_sfc_groups, ARRAY_SIZE(x2000_sfc_groups), },
+ { "ssi0", x2000_ssi0_groups, ARRAY_SIZE(x2000_ssi0_groups), },
+ { "ssi1", x2000_ssi1_groups, ARRAY_SIZE(x2000_ssi1_groups), },
+ { "mmc0", x2000_mmc0_groups, ARRAY_SIZE(x2000_mmc0_groups), },
+ { "mmc1", x2000_mmc1_groups, ARRAY_SIZE(x2000_mmc1_groups), },
+ { "mmc2", x2000_mmc2_groups, ARRAY_SIZE(x2000_mmc2_groups), },
+ { "emc", x2000_emc_groups, ARRAY_SIZE(x2000_emc_groups), },
+ { "emc-cs1", x2000_cs1_groups, ARRAY_SIZE(x2000_cs1_groups), },
+ { "emc-cs2", x2000_cs2_groups, ARRAY_SIZE(x2000_cs2_groups), },
+ { "i2c0", x2000_i2c0_groups, ARRAY_SIZE(x2000_i2c0_groups), },
+ { "i2c1", x2000_i2c1_groups, ARRAY_SIZE(x2000_i2c1_groups), },
+ { "i2c2", x2000_i2c2_groups, ARRAY_SIZE(x2000_i2c2_groups), },
+ { "i2c3", x2000_i2c3_groups, ARRAY_SIZE(x2000_i2c3_groups), },
+ { "i2c4", x2000_i2c4_groups, ARRAY_SIZE(x2000_i2c4_groups), },
+ { "i2c5", x2000_i2c5_groups, ARRAY_SIZE(x2000_i2c5_groups), },
+ { "i2s1", x2000_i2s1_groups, ARRAY_SIZE(x2000_i2s1_groups), },
+ { "i2s2", x2000_i2s2_groups, ARRAY_SIZE(x2000_i2s2_groups), },
+ { "i2s3", x2000_i2s3_groups, ARRAY_SIZE(x2000_i2s3_groups), },
+ { "dmic", x2000_dmic_groups, ARRAY_SIZE(x2000_dmic_groups), },
+ { "cim", x2000_cim_groups, ARRAY_SIZE(x2000_cim_groups), },
+ { "lcd", x2000_lcd_groups, ARRAY_SIZE(x2000_lcd_groups), },
+ { "pwm0", x2000_pwm0_groups, ARRAY_SIZE(x2000_pwm0_groups), },
+ { "pwm1", x2000_pwm1_groups, ARRAY_SIZE(x2000_pwm1_groups), },
+ { "pwm2", x2000_pwm2_groups, ARRAY_SIZE(x2000_pwm2_groups), },
+ { "pwm3", x2000_pwm3_groups, ARRAY_SIZE(x2000_pwm3_groups), },
+ { "pwm4", x2000_pwm4_groups, ARRAY_SIZE(x2000_pwm4_groups), },
+ { "pwm5", x2000_pwm5_groups, ARRAY_SIZE(x2000_pwm5_groups), },
+ { "pwm6", x2000_pwm6_groups, ARRAY_SIZE(x2000_pwm6_groups), },
+ { "pwm7", x2000_pwm7_groups, ARRAY_SIZE(x2000_pwm7_groups), },
+ { "pwm8", x2000_pwm8_groups, ARRAY_SIZE(x2000_pwm8_groups), },
+ { "pwm9", x2000_pwm9_groups, ARRAY_SIZE(x2000_pwm9_groups), },
+ { "pwm10", x2000_pwm10_groups, ARRAY_SIZE(x2000_pwm10_groups), },
+ { "pwm11", x2000_pwm11_groups, ARRAY_SIZE(x2000_pwm11_groups), },
+ { "pwm12", x2000_pwm12_groups, ARRAY_SIZE(x2000_pwm12_groups), },
+ { "pwm13", x2000_pwm13_groups, ARRAY_SIZE(x2000_pwm13_groups), },
+ { "pwm14", x2000_pwm14_groups, ARRAY_SIZE(x2000_pwm14_groups), },
+ { "pwm15", x2000_pwm15_groups, ARRAY_SIZE(x2000_pwm15_groups), },
+ { "mac0", x2000_mac0_groups, ARRAY_SIZE(x2000_mac0_groups), },
+ { "mac1", x2000_mac1_groups, ARRAY_SIZE(x2000_mac1_groups), },
+ { "otg", x2000_otg_groups, ARRAY_SIZE(x2000_otg_groups), },
+};
+
+static const struct ingenic_chip_info x2000_chip_info = {
+ .num_chips = 5,
+ .reg_offset = 0x100,
+ .version = ID_X2000,
+ .groups = x2000_groups,
+ .num_groups = ARRAY_SIZE(x2000_groups),
+ .functions = x2000_functions,
+ .num_functions = ARRAY_SIZE(x2000_functions),
+ .pull_ups = x2000_pull_ups,
+ .pull_downs = x2000_pull_downs,
+};
+
static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
{
unsigned int val;
@@ -1668,6 +2813,12 @@ static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
static void ingenic_gpio_set_bit(struct ingenic_gpio_chip *jzgc,
u8 reg, u8 offset, bool set)
{
+ if (jzgc->jzpc->info->version == ID_JZ4730) {
+ regmap_update_bits(jzgc->jzpc->map, jzgc->reg_base + reg,
+ BIT(offset), set ? BIT(offset) : 0);
+ return;
+ }
+
if (set)
reg = REG_SET(reg);
else
@@ -1695,6 +2846,20 @@ static void ingenic_gpio_shadow_set_bit_load(struct ingenic_gpio_chip *jzgc)
jzgc->gc.base / PINS_PER_GPIO_CHIP);
}
+static void jz4730_gpio_set_bits(struct ingenic_gpio_chip *jzgc,
+ u8 reg_upper, u8 reg_lower, u8 offset, u8 value)
+{
+ /*
+ * JZ4730 function and IRQ registers support two-bits-per-pin
+ * definitions, split into two groups of 16.
+ */
+ u8 reg = offset < JZ4730_PINS_PER_PAIRED_REG ? reg_lower : reg_upper;
+ unsigned int idx = offset % JZ4730_PINS_PER_PAIRED_REG;
+ unsigned int mask = GENMASK(1, 0) << idx * 2;
+
+ regmap_update_bits(jzgc->jzpc->map, jzgc->reg_base + reg, mask, value << (idx * 2));
+}
+
static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
u8 offset)
{
@@ -1708,43 +2873,60 @@ static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
{
if (jzgc->jzpc->info->version >= ID_JZ4770)
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
- else
+ else if (jzgc->jzpc->info->version >= ID_JZ4740)
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_DATA, offset, !!value);
}
static void irq_set_type(struct ingenic_gpio_chip *jzgc,
u8 offset, unsigned int type)
{
u8 reg1, reg2;
- bool val1, val2;
+ bool val1, val2, val3;
switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ val1 = val2 = false;
+ val3 = true;
+ break;
case IRQ_TYPE_EDGE_RISING:
val1 = val2 = true;
+ val3 = false;
break;
case IRQ_TYPE_EDGE_FALLING:
- val1 = false;
+ val1 = val3 = false;
val2 = true;
break;
case IRQ_TYPE_LEVEL_HIGH:
val1 = true;
- val2 = false;
+ val2 = val3 = false;
break;
case IRQ_TYPE_LEVEL_LOW:
default:
- val1 = val2 = false;
+ val1 = val2 = val3 = false;
break;
}
if (jzgc->jzpc->info->version >= ID_JZ4770) {
reg1 = JZ4770_GPIO_PAT1;
reg2 = JZ4770_GPIO_PAT0;
- } else {
+ } else if (jzgc->jzpc->info->version >= ID_JZ4740) {
reg1 = JZ4740_GPIO_TRIG;
reg2 = JZ4740_GPIO_DIR;
+ } else {
+ ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPDIR, offset, false);
+ jz4730_gpio_set_bits(jzgc, JZ4730_GPIO_GPIDUR,
+ JZ4730_GPIO_GPIDLR, offset, (val2 << 1) | val1);
+ return;
}
- if (jzgc->jzpc->info->version >= ID_X1000) {
+ if (jzgc->jzpc->info->version >= ID_X2000) {
+ ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, val1);
+ ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, val2);
+ ingenic_gpio_shadow_set_bit_load(jzgc);
+ ingenic_gpio_set_bit(jzgc, X2000_GPIO_EDG, offset, val3);
+ } else if (jzgc->jzpc->info->version >= ID_X1000) {
ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, val1);
ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, val2);
ingenic_gpio_shadow_set_bit_load(jzgc);
@@ -1758,16 +2940,24 @@ static void ingenic_gpio_irq_mask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ int irq = irqd->hwirq;
- ingenic_gpio_set_bit(jzgc, GPIO_MSK, irqd->hwirq, true);
+ if (jzgc->jzpc->info->version >= ID_JZ4740)
+ ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, true);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIMR, irq, true);
}
static void ingenic_gpio_irq_unmask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ int irq = irqd->hwirq;
- ingenic_gpio_set_bit(jzgc, GPIO_MSK, irqd->hwirq, false);
+ if (jzgc->jzpc->info->version >= ID_JZ4740)
+ ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, false);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIMR, irq, false);
}
static void ingenic_gpio_irq_enable(struct irq_data *irqd)
@@ -1778,8 +2968,10 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
if (jzgc->jzpc->info->version >= ID_JZ4770)
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
- else
+ else if (jzgc->jzpc->info->version >= ID_JZ4740)
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIER, irq, true);
ingenic_gpio_irq_unmask(irqd);
}
@@ -1794,8 +2986,10 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
if (jzgc->jzpc->info->version >= ID_JZ4770)
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
- else
+ else if (jzgc->jzpc->info->version >= ID_JZ4740)
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIER, irq, false);
}
static void ingenic_gpio_irq_ack(struct irq_data *irqd)
@@ -1805,7 +2999,8 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
int irq = irqd->hwirq;
bool high;
- if (irqd_get_trigger_type(irqd) == IRQ_TYPE_EDGE_BOTH) {
+ if ((irqd_get_trigger_type(irqd) == IRQ_TYPE_EDGE_BOTH) &&
+ (jzgc->jzpc->info->version < ID_X2000)) {
/*
* Switch to an interrupt for the opposite edge to the one that
* triggered the interrupt being ACKed.
@@ -1819,8 +3014,10 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
if (jzgc->jzpc->info->version >= ID_JZ4770)
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
- else
+ else if (jzgc->jzpc->info->version >= ID_JZ4740)
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
+ else
+ ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPFR, irq, false);
}
static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
@@ -1842,7 +3039,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
irq_set_handler_locked(irqd, handle_bad_irq);
}
- if (type == IRQ_TYPE_EDGE_BOTH) {
+ if ((type == IRQ_TYPE_EDGE_BOTH) && (jzgc->jzpc->info->version < ID_X2000)) {
/*
* The hardware does not support interrupts on both edges. The
* best we can do is to set up a single-edge interrupt and then
@@ -1876,8 +3073,10 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
if (jzgc->jzpc->info->version >= ID_JZ4770)
flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG);
- else
+ else if (jzgc->jzpc->info->version >= ID_JZ4740)
flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
+ else
+ flag = ingenic_gpio_read_reg(jzgc, JZ4730_GPIO_GPFR);
for_each_set_bit(i, &flag, 32)
generic_handle_irq(irq_linear_revmap(gc->irq.domain, i));
@@ -1913,13 +3112,26 @@ static int ingenic_gpio_direction_output(struct gpio_chip *gc,
}
static inline void ingenic_config_pin(struct ingenic_pinctrl *jzpc,
- unsigned int pin, u8 reg, bool set)
+ unsigned int pin, unsigned int reg, bool set)
{
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
- regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
- (set ? REG_SET(reg) : REG_CLEAR(reg)), BIT(idx));
+ if (set) {
+ if (jzpc->info->version >= ID_JZ4740)
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_SET(reg), BIT(idx));
+ else
+ regmap_set_bits(jzpc->map, offt * jzpc->info->reg_offset +
+ reg, BIT(idx));
+ } else {
+ if (jzpc->info->version >= ID_JZ4740)
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_CLEAR(reg), BIT(idx));
+ else
+ regmap_clear_bits(jzpc->map, offt * jzpc->info->reg_offset +
+ reg, BIT(idx));
+ }
}
static inline void ingenic_shadow_config_pin(struct ingenic_pinctrl *jzpc,
@@ -1938,8 +3150,24 @@ static inline void ingenic_shadow_config_pin_load(struct ingenic_pinctrl *jzpc,
pin / PINS_PER_GPIO_CHIP);
}
+static inline void jz4730_config_pin_function(struct ingenic_pinctrl *jzpc,
+ unsigned int pin, u8 reg_upper, u8 reg_lower, u8 value)
+{
+ /*
+ * JZ4730 function and IRQ registers support two-bits-per-pin
+ * definitions, split into two groups of 16.
+ */
+ unsigned int idx = pin % JZ4730_PINS_PER_PAIRED_REG;
+ unsigned int mask = GENMASK(1, 0) << idx * 2;
+ unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+ u8 reg = (pin % PINS_PER_GPIO_CHIP) < JZ4730_PINS_PER_PAIRED_REG ? reg_lower : reg_upper;
+
+ regmap_update_bits(jzpc->map, offt * jzpc->info->reg_offset + reg,
+ mask, value << (idx * 2));
+}
+
static inline bool ingenic_get_pin_config(struct ingenic_pinctrl *jzpc,
- unsigned int pin, u8 reg)
+ unsigned int pin, unsigned int reg)
{
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
@@ -1961,6 +3189,10 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PAT1))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
+ } else if (jzpc->info->version == ID_JZ4730) {
+ if (!ingenic_get_pin_config(jzpc, pin, JZ4730_GPIO_GPDIR))
+ return GPIO_LINE_DIRECTION_IN;
+ return GPIO_LINE_DIRECTION_OUT;
}
if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_SELECT))
@@ -2019,10 +3251,13 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
- } else {
+ } else if (jzpc->info->version >= ID_JZ4740) {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1);
+ } else {
+ ingenic_config_pin(jzpc, pin, JZ4730_GPIO_GPIER, false);
+ jz4730_config_pin_function(jzpc, pin, JZ4730_GPIO_GPAUR, JZ4730_GPIO_GPALR, func);
}
return 0;
@@ -2083,10 +3318,14 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
- } else {
+ } else if (jzpc->info->version >= ID_JZ4740) {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false);
+ } else {
+ ingenic_config_pin(jzpc, pin, JZ4730_GPIO_GPIER, false);
+ ingenic_config_pin(jzpc, pin, JZ4730_GPIO_GPDIR, !input);
+ jz4730_config_pin_function(jzpc, pin, JZ4730_GPIO_GPAUR, JZ4730_GPIO_GPALR, 0);
}
return 0;
@@ -2107,44 +3346,120 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
enum pin_config_param param = pinconf_to_config_param(*config);
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
- bool pull;
+ unsigned int arg = 1;
+ unsigned int bias, reg;
+ bool pull, pullup, pulldown;
+
+ if (jzpc->info->version >= ID_X2000) {
+ pullup = ingenic_get_pin_config(jzpc, pin, X2000_GPIO_PEPU) &&
+ !ingenic_get_pin_config(jzpc, pin, X2000_GPIO_PEPD) &&
+ (jzpc->info->pull_ups[offt] & BIT(idx));
+ pulldown = ingenic_get_pin_config(jzpc, pin, X2000_GPIO_PEPD) &&
+ !ingenic_get_pin_config(jzpc, pin, X2000_GPIO_PEPU) &&
+ (jzpc->info->pull_downs[offt] & BIT(idx));
+
+ } else if (jzpc->info->version >= ID_X1830) {
+ unsigned int half = PINS_PER_GPIO_CHIP / 2;
+ unsigned int idxh = (pin % half) * 2;
- if (jzpc->info->version >= ID_JZ4770)
- pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
- else
- pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
+ if (idx < half)
+ regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
+ X1830_GPIO_PEL, &bias);
+ else
+ regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
+ X1830_GPIO_PEH, &bias);
+
+ bias = (bias >> idxh) & (GPIO_PULL_UP | GPIO_PULL_DOWN);
+
+ pullup = (bias == GPIO_PULL_UP) && (jzpc->info->pull_ups[offt] & BIT(idx));
+ pulldown = (bias == GPIO_PULL_DOWN) && (jzpc->info->pull_downs[offt] & BIT(idx));
+
+ } else {
+ if (jzpc->info->version >= ID_JZ4770)
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
+ else if (jzpc->info->version >= ID_JZ4740)
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
+ else
+ pull = ingenic_get_pin_config(jzpc, pin, JZ4730_GPIO_GPPUR);
+
+ pullup = pull && (jzpc->info->pull_ups[offt] & BIT(idx));
+ pulldown = pull && (jzpc->info->pull_downs[offt] & BIT(idx));
+ }
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- if (pull)
+ if (pullup || pulldown)
return -EINVAL;
+
break;
case PIN_CONFIG_BIAS_PULL_UP:
- if (!pull || !(jzpc->info->pull_ups[offt] & BIT(idx)))
+ if (!pullup)
return -EINVAL;
+
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- if (!pull || !(jzpc->info->pull_downs[offt] & BIT(idx)))
+ if (!pulldown)
return -EINVAL;
+
+ break;
+
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ if (jzpc->info->version >= ID_X2000)
+ reg = X2000_GPIO_SMT;
+ else if (jzpc->info->version >= ID_X1830)
+ reg = X1830_GPIO_SMT;
+ else
+ return -EINVAL;
+
+ arg = !!ingenic_get_pin_config(jzpc, pin, reg);
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ if (jzpc->info->version >= ID_X2000)
+ reg = X2000_GPIO_SR;
+ else if (jzpc->info->version >= ID_X1830)
+ reg = X1830_GPIO_SR;
+ else
+ return -EINVAL;
+
+ arg = !!ingenic_get_pin_config(jzpc, pin, reg);
break;
default:
return -ENOTSUPP;
}
- *config = pinconf_to_config_packed(param, 1);
+ *config = pinconf_to_config_packed(param, arg);
return 0;
}
static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
unsigned int pin, unsigned int bias)
{
- if (jzpc->info->version >= ID_X1830) {
+ if (jzpc->info->version >= ID_X2000) {
+ switch (bias) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, false);
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, true);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, false);
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, true);
+ break;
+
+ case PIN_CONFIG_BIAS_DISABLE:
+ default:
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPU, false);
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, false);
+ }
+
+ } else if (jzpc->info->version >= ID_X1830) {
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int half = PINS_PER_GPIO_CHIP / 2;
- unsigned int idxh = pin % half * 2;
+ unsigned int idxh = (pin % half) * 2;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
if (idx < half) {
@@ -2161,18 +3476,40 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
} else if (jzpc->info->version >= ID_JZ4770) {
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !bias);
- } else {
+ } else if (jzpc->info->version >= ID_JZ4740) {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
+ } else {
+ ingenic_config_pin(jzpc, pin, JZ4730_GPIO_GPPUR, bias);
}
}
+static void ingenic_set_schmitt_trigger(struct ingenic_pinctrl *jzpc,
+ unsigned int pin, bool enable)
+{
+ if (jzpc->info->version >= ID_X2000)
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_SMT, enable);
+ else
+ ingenic_config_pin(jzpc, pin, X1830_GPIO_SMT, enable);
+}
+
static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
unsigned int pin, bool high)
{
if (jzpc->info->version >= ID_JZ4770)
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, high);
- else
+ else if (jzpc->info->version >= ID_JZ4740)
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
+ else
+ ingenic_config_pin(jzpc, pin, JZ4730_GPIO_DATA, high);
+}
+
+static void ingenic_set_slew_rate(struct ingenic_pinctrl *jzpc,
+ unsigned int pin, unsigned int slew)
+{
+ if (jzpc->info->version >= ID_X2000)
+ ingenic_config_pin(jzpc, pin, X2000_GPIO_SR, slew);
+ else
+ ingenic_config_pin(jzpc, pin, X1830_GPIO_SR, slew);
}
static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
@@ -2189,7 +3526,9 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_SLEW_RATE:
continue;
default:
return -ENOTSUPP;
@@ -2222,6 +3561,13 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
ingenic_set_bias(jzpc, pin, GPIO_PULL_DOWN);
break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ if (jzpc->info->version < ID_X1830)
+ return -EINVAL;
+
+ ingenic_set_schmitt_trigger(jzpc, pin, arg);
+ break;
+
case PIN_CONFIG_OUTPUT:
ret = pinctrl_gpio_direction_output(pin);
if (ret)
@@ -2230,6 +3576,13 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
ingenic_set_output_level(jzpc, pin, arg);
break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (jzpc->info->version < ID_X1830)
+ return -EINVAL;
+
+ ingenic_set_slew_rate(jzpc, pin, arg);
+ break;
+
default:
/* unreachable */
break;
@@ -2301,13 +3654,18 @@ static const struct regmap_config ingenic_pinctrl_regmap_config = {
};
static const struct of_device_id ingenic_gpio_of_match[] __initconst = {
+ { .compatible = "ingenic,jz4730-gpio", },
{ .compatible = "ingenic,jz4740-gpio", },
{ .compatible = "ingenic,jz4725b-gpio", },
+ { .compatible = "ingenic,jz4750-gpio", },
+ { .compatible = "ingenic,jz4755-gpio", },
{ .compatible = "ingenic,jz4760-gpio", },
{ .compatible = "ingenic,jz4770-gpio", },
+ { .compatible = "ingenic,jz4775-gpio", },
{ .compatible = "ingenic,jz4780-gpio", },
{ .compatible = "ingenic,x1000-gpio", },
{ .compatible = "ingenic,x1830-gpio", },
+ { .compatible = "ingenic,x2000-gpio", },
{},
};
@@ -2380,6 +3738,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
+
girq->parents[0] = jzgc->irq;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
@@ -2485,8 +3844,10 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
for_each_child_of_node(dev->of_node, node) {
if (of_match_node(ingenic_gpio_of_match, node)) {
err = ingenic_gpio_probe(jzpc, node);
- if (err)
+ if (err) {
+ of_node_put(node);
return err;
+ }
}
}
@@ -2495,6 +3856,10 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
static const struct of_device_id ingenic_pinctrl_of_match[] = {
{
+ .compatible = "ingenic,jz4730-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_JZ4730, &jz4730_chip_info)
+ },
+ {
.compatible = "ingenic,jz4740-pinctrl",
.data = IF_ENABLED(CONFIG_MACH_JZ4740, &jz4740_chip_info)
},
@@ -2503,6 +3868,14 @@ static const struct of_device_id ingenic_pinctrl_of_match[] = {
.data = IF_ENABLED(CONFIG_MACH_JZ4725B, &jz4725b_chip_info)
},
{
+ .compatible = "ingenic,jz4750-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_JZ4750, &jz4750_chip_info)
+ },
+ {
+ .compatible = "ingenic,jz4755-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_JZ4755, &jz4755_chip_info)
+ },
+ {
.compatible = "ingenic,jz4760-pinctrl",
.data = IF_ENABLED(CONFIG_MACH_JZ4760, &jz4760_chip_info)
},
@@ -2515,6 +3888,10 @@ static const struct of_device_id ingenic_pinctrl_of_match[] = {
.data = IF_ENABLED(CONFIG_MACH_JZ4770, &jz4770_chip_info)
},
{
+ .compatible = "ingenic,jz4775-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_JZ4775, &jz4775_chip_info)
+ },
+ {
.compatible = "ingenic,jz4780-pinctrl",
.data = IF_ENABLED(CONFIG_MACH_JZ4780, &jz4780_chip_info)
},
@@ -2534,6 +3911,14 @@ static const struct of_device_id ingenic_pinctrl_of_match[] = {
.compatible = "ingenic,x1830-pinctrl",
.data = IF_ENABLED(CONFIG_MACH_X1830, &x1830_chip_info)
},
+ {
+ .compatible = "ingenic,x2000-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_X2000, &x2000_chip_info)
+ },
+ {
+ .compatible = "ingenic,x2000e-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_X2000, &x2000_chip_info)
+ },
{ /* sentinel */ },
};
diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
index 8a733cf77ba0..f831526d06ff 100644
--- a/drivers/pinctrl/pinctrl-k210.c
+++ b/drivers/pinctrl/pinctrl-k210.c
@@ -15,7 +15,6 @@
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/io.h>
#include <dt-bindings/pinctrl/k210-fpioa.h>
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index 7b2f885e68bd..ed9bf2c89998 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -646,7 +646,7 @@ static const struct pin_config_item lpc18xx_conf_items[ARRAY_SIZE(lpc18xx_params
static int lpc18xx_pconf_get_usb1(enum pin_config_param param, int *arg, u32 reg)
{
switch (param) {
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
if (reg & LPC18XX_SCU_USB1_EPWR)
*arg = 0;
else
@@ -904,7 +904,7 @@ static int lpc18xx_pconf_set_usb1(struct pinctrl_dev *pctldev,
u32 param_val, u32 *reg)
{
switch (param) {
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
if (param_val)
*reg &= ~LPC18XX_SCU_USB1_EPWR;
else
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 53a0badc6b03..067fc4208de4 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -16,10 +16,12 @@
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/machine.h>
@@ -61,8 +63,17 @@ enum rockchip_pinctrl_type {
RK3308,
RK3368,
RK3399,
+ RK3568,
};
+
+/**
+ * Generate a bitmask for setting a value (v) with a write mask bit in hiword
+ * register 31:16 area.
+ */
+#define WRITE_MASK_VAL(h, l, v) \
+ (GENMASK(((h) + 16), ((l) + 16)) | (((v) << (l)) & GENMASK((h), (l))))
+
/*
* Encode variants of iomux registers into a type variable
*/
@@ -290,6 +301,25 @@ struct rockchip_pin_bank {
.pull_type[3] = pull3, \
}
+#define PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, FLAG) \
+ { \
+ .bank_num = ID, \
+ .pin = PIN, \
+ .func = FUNC, \
+ .route_offset = REG, \
+ .route_val = VAL, \
+ .route_location = FLAG, \
+ }
+
+#define RK_MUXROUTE_SAME(ID, PIN, FUNC, REG, VAL) \
+ PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, ROCKCHIP_ROUTE_SAME)
+
+#define RK_MUXROUTE_GRF(ID, PIN, FUNC, REG, VAL) \
+ PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, ROCKCHIP_ROUTE_GRF)
+
+#define RK_MUXROUTE_PMU(ID, PIN, FUNC, REG, VAL) \
+ PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, ROCKCHIP_ROUTE_PMU)
+
/**
* struct rockchip_mux_recalced_data: represent a pin iomux data.
* @num: bank number.
@@ -801,597 +831,203 @@ static void rockchip_get_recalced_mux(struct rockchip_pin_bank *bank, int pin,
}
static struct rockchip_mux_route_data px30_mux_route_data[] = {
- {
- /* cif-d2m0 */
- .bank_num = 2,
- .pin = 0,
- .func = 1,
- .route_offset = 0x184,
- .route_val = BIT(16 + 7),
- }, {
- /* cif-d2m1 */
- .bank_num = 3,
- .pin = 3,
- .func = 3,
- .route_offset = 0x184,
- .route_val = BIT(16 + 7) | BIT(7),
- }, {
- /* pdm-m0 */
- .bank_num = 3,
- .pin = 22,
- .func = 2,
- .route_offset = 0x184,
- .route_val = BIT(16 + 8),
- }, {
- /* pdm-m1 */
- .bank_num = 2,
- .pin = 22,
- .func = 1,
- .route_offset = 0x184,
- .route_val = BIT(16 + 8) | BIT(8),
- }, {
- /* uart2-rxm0 */
- .bank_num = 1,
- .pin = 27,
- .func = 2,
- .route_offset = 0x184,
- .route_val = BIT(16 + 10),
- }, {
- /* uart2-rxm1 */
- .bank_num = 2,
- .pin = 14,
- .func = 2,
- .route_offset = 0x184,
- .route_val = BIT(16 + 10) | BIT(10),
- }, {
- /* uart3-rxm0 */
- .bank_num = 0,
- .pin = 17,
- .func = 2,
- .route_offset = 0x184,
- .route_val = BIT(16 + 9),
- }, {
- /* uart3-rxm1 */
- .bank_num = 1,
- .pin = 15,
- .func = 2,
- .route_offset = 0x184,
- .route_val = BIT(16 + 9) | BIT(9),
- },
+ RK_MUXROUTE_SAME(2, RK_PA0, 1, 0x184, BIT(16 + 7)), /* cif-d2m0 */
+ RK_MUXROUTE_SAME(3, RK_PA3, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d2m1 */
+ RK_MUXROUTE_SAME(3, RK_PC6, 2, 0x184, BIT(16 + 8)), /* pdm-m0 */
+ RK_MUXROUTE_SAME(2, RK_PC6, 1, 0x184, BIT(16 + 8) | BIT(8)), /* pdm-m1 */
+ RK_MUXROUTE_SAME(1, RK_PD3, 2, 0x184, BIT(16 + 10)), /* uart2-rxm0 */
+ RK_MUXROUTE_SAME(2, RK_PB6, 2, 0x184, BIT(16 + 10) | BIT(10)), /* uart2-rxm1 */
+ RK_MUXROUTE_SAME(0, RK_PC1, 2, 0x184, BIT(16 + 9)), /* uart3-rxm0 */
+ RK_MUXROUTE_SAME(1, RK_PB7, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-rxm1 */
};
static struct rockchip_mux_route_data rk3128_mux_route_data[] = {
- {
- /* spi-0 */
- .bank_num = 1,
- .pin = 10,
- .func = 1,
- .route_offset = 0x144,
- .route_val = BIT(16 + 3) | BIT(16 + 4),
- }, {
- /* spi-1 */
- .bank_num = 1,
- .pin = 27,
- .func = 3,
- .route_offset = 0x144,
- .route_val = BIT(16 + 3) | BIT(16 + 4) | BIT(3),
- }, {
- /* spi-2 */
- .bank_num = 0,
- .pin = 13,
- .func = 2,
- .route_offset = 0x144,
- .route_val = BIT(16 + 3) | BIT(16 + 4) | BIT(4),
- }, {
- /* i2s-0 */
- .bank_num = 1,
- .pin = 5,
- .func = 1,
- .route_offset = 0x144,
- .route_val = BIT(16 + 5),
- }, {
- /* i2s-1 */
- .bank_num = 0,
- .pin = 14,
- .func = 1,
- .route_offset = 0x144,
- .route_val = BIT(16 + 5) | BIT(5),
- }, {
- /* emmc-0 */
- .bank_num = 1,
- .pin = 22,
- .func = 2,
- .route_offset = 0x144,
- .route_val = BIT(16 + 6),
- }, {
- /* emmc-1 */
- .bank_num = 2,
- .pin = 4,
- .func = 2,
- .route_offset = 0x144,
- .route_val = BIT(16 + 6) | BIT(6),
- },
+ RK_MUXROUTE_SAME(1, RK_PB2, 1, 0x144, BIT(16 + 3) | BIT(16 + 4)), /* spi-0 */
+ RK_MUXROUTE_SAME(1, RK_PD3, 3, 0x144, BIT(16 + 3) | BIT(16 + 4) | BIT(3)), /* spi-1 */
+ RK_MUXROUTE_SAME(0, RK_PB5, 2, 0x144, BIT(16 + 3) | BIT(16 + 4) | BIT(4)), /* spi-2 */
+ RK_MUXROUTE_SAME(1, RK_PA5, 1, 0x144, BIT(16 + 5)), /* i2s-0 */
+ RK_MUXROUTE_SAME(0, RK_PB6, 1, 0x144, BIT(16 + 5) | BIT(5)), /* i2s-1 */
+ RK_MUXROUTE_SAME(1, RK_PC6, 2, 0x144, BIT(16 + 6)), /* emmc-0 */
+ RK_MUXROUTE_SAME(2, RK_PA4, 2, 0x144, BIT(16 + 6) | BIT(6)), /* emmc-1 */
};
static struct rockchip_mux_route_data rk3188_mux_route_data[] = {
- {
- /* non-iomuxed emmc/flash pins on flash-dqs */
- .bank_num = 0,
- .pin = 24,
- .func = 1,
- .route_location = ROCKCHIP_ROUTE_GRF,
- .route_offset = 0xa0,
- .route_val = BIT(16 + 11),
- }, {
- /* non-iomuxed emmc/flash pins on emmc-clk */
- .bank_num = 0,
- .pin = 24,
- .func = 2,
- .route_location = ROCKCHIP_ROUTE_GRF,
- .route_offset = 0xa0,
- .route_val = BIT(16 + 11) | BIT(11),
- },
+ RK_MUXROUTE_SAME(0, RK_PD0, 1, 0xa0, BIT(16 + 11)), /* non-iomuxed emmc/flash pins on flash-dqs */
+ RK_MUXROUTE_SAME(0, RK_PD0, 2, 0xa0, BIT(16 + 11) | BIT(11)), /* non-iomuxed emmc/flash pins on emmc-clk */
};
static struct rockchip_mux_route_data rk3228_mux_route_data[] = {
- {
- /* pwm0-0 */
- .bank_num = 0,
- .pin = 26,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16),
- }, {
- /* pwm0-1 */
- .bank_num = 3,
- .pin = 21,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16) | BIT(0),
- }, {
- /* pwm1-0 */
- .bank_num = 0,
- .pin = 27,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 1),
- }, {
- /* pwm1-1 */
- .bank_num = 0,
- .pin = 30,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 1) | BIT(1),
- }, {
- /* pwm2-0 */
- .bank_num = 0,
- .pin = 28,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 2),
- }, {
- /* pwm2-1 */
- .bank_num = 1,
- .pin = 12,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 2) | BIT(2),
- }, {
- /* pwm3-0 */
- .bank_num = 3,
- .pin = 26,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 3),
- }, {
- /* pwm3-1 */
- .bank_num = 1,
- .pin = 11,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 3) | BIT(3),
- }, {
- /* sdio-0_d0 */
- .bank_num = 1,
- .pin = 1,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 4),
- }, {
- /* sdio-1_d0 */
- .bank_num = 3,
- .pin = 2,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 4) | BIT(4),
- }, {
- /* spi-0_rx */
- .bank_num = 0,
- .pin = 13,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 5),
- }, {
- /* spi-1_rx */
- .bank_num = 2,
- .pin = 0,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 5) | BIT(5),
- }, {
- /* emmc-0_cmd */
- .bank_num = 1,
- .pin = 22,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 7),
- }, {
- /* emmc-1_cmd */
- .bank_num = 2,
- .pin = 4,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 7) | BIT(7),
- }, {
- /* uart2-0_rx */
- .bank_num = 1,
- .pin = 19,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 8),
- }, {
- /* uart2-1_rx */
- .bank_num = 1,
- .pin = 10,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 8) | BIT(8),
- }, {
- /* uart1-0_rx */
- .bank_num = 1,
- .pin = 10,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 11),
- }, {
- /* uart1-1_rx */
- .bank_num = 3,
- .pin = 13,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 11) | BIT(11),
- },
+ RK_MUXROUTE_SAME(0, RK_PD2, 1, 0x50, BIT(16)), /* pwm0-0 */
+ RK_MUXROUTE_SAME(3, RK_PC5, 1, 0x50, BIT(16) | BIT(0)), /* pwm0-1 */
+ RK_MUXROUTE_SAME(0, RK_PD3, 1, 0x50, BIT(16 + 1)), /* pwm1-0 */
+ RK_MUXROUTE_SAME(0, RK_PD6, 2, 0x50, BIT(16 + 1) | BIT(1)), /* pwm1-1 */
+ RK_MUXROUTE_SAME(0, RK_PD4, 1, 0x50, BIT(16 + 2)), /* pwm2-0 */
+ RK_MUXROUTE_SAME(1, RK_PB4, 2, 0x50, BIT(16 + 2) | BIT(2)), /* pwm2-1 */
+ RK_MUXROUTE_SAME(3, RK_PD2, 1, 0x50, BIT(16 + 3)), /* pwm3-0 */
+ RK_MUXROUTE_SAME(1, RK_PB3, 2, 0x50, BIT(16 + 3) | BIT(3)), /* pwm3-1 */
+ RK_MUXROUTE_SAME(1, RK_PA1, 1, 0x50, BIT(16 + 4)), /* sdio-0_d0 */
+ RK_MUXROUTE_SAME(3, RK_PA2, 1, 0x50, BIT(16 + 4) | BIT(4)), /* sdio-1_d0 */
+ RK_MUXROUTE_SAME(0, RK_PB5, 2, 0x50, BIT(16 + 5)), /* spi-0_rx */
+ RK_MUXROUTE_SAME(2, RK_PA0, 2, 0x50, BIT(16 + 5) | BIT(5)), /* spi-1_rx */
+ RK_MUXROUTE_SAME(1, RK_PC6, 2, 0x50, BIT(16 + 7)), /* emmc-0_cmd */
+ RK_MUXROUTE_SAME(2, RK_PA4, 2, 0x50, BIT(16 + 7) | BIT(7)), /* emmc-1_cmd */
+ RK_MUXROUTE_SAME(1, RK_PC3, 2, 0x50, BIT(16 + 8)), /* uart2-0_rx */
+ RK_MUXROUTE_SAME(1, RK_PB2, 2, 0x50, BIT(16 + 8) | BIT(8)), /* uart2-1_rx */
+ RK_MUXROUTE_SAME(1, RK_PB2, 1, 0x50, BIT(16 + 11)), /* uart1-0_rx */
+ RK_MUXROUTE_SAME(3, RK_PB5, 1, 0x50, BIT(16 + 11) | BIT(11)), /* uart1-1_rx */
};
static struct rockchip_mux_route_data rk3288_mux_route_data[] = {
- {
- /* edphdmi_cecinoutt1 */
- .bank_num = 7,
- .pin = 16,
- .func = 2,
- .route_offset = 0x264,
- .route_val = BIT(16 + 12) | BIT(12),
- }, {
- /* edphdmi_cecinout */
- .bank_num = 7,
- .pin = 23,
- .func = 4,
- .route_offset = 0x264,
- .route_val = BIT(16 + 12),
- },
+ RK_MUXROUTE_SAME(7, RK_PC0, 2, 0x264, BIT(16 + 12) | BIT(12)), /* edphdmi_cecinoutt1 */
+ RK_MUXROUTE_SAME(7, RK_PC7, 4, 0x264, BIT(16 + 12)), /* edphdmi_cecinout */
};
static struct rockchip_mux_route_data rk3308_mux_route_data[] = {
- {
- /* rtc_clk */
- .bank_num = 0,
- .pin = 19,
- .func = 1,
- .route_offset = 0x314,
- .route_val = BIT(16 + 0) | BIT(0),
- }, {
- /* uart2_rxm0 */
- .bank_num = 1,
- .pin = 22,
- .func = 2,
- .route_offset = 0x314,
- .route_val = BIT(16 + 2) | BIT(16 + 3),
- }, {
- /* uart2_rxm1 */
- .bank_num = 4,
- .pin = 26,
- .func = 2,
- .route_offset = 0x314,
- .route_val = BIT(16 + 2) | BIT(16 + 3) | BIT(2),
- }, {
- /* i2c3_sdam0 */
- .bank_num = 0,
- .pin = 15,
- .func = 2,
- .route_offset = 0x608,
- .route_val = BIT(16 + 8) | BIT(16 + 9),
- }, {
- /* i2c3_sdam1 */
- .bank_num = 3,
- .pin = 12,
- .func = 2,
- .route_offset = 0x608,
- .route_val = BIT(16 + 8) | BIT(16 + 9) | BIT(8),
- }, {
- /* i2c3_sdam2 */
- .bank_num = 2,
- .pin = 0,
- .func = 3,
- .route_offset = 0x608,
- .route_val = BIT(16 + 8) | BIT(16 + 9) | BIT(9),
- }, {
- /* i2s-8ch-1-sclktxm0 */
- .bank_num = 1,
- .pin = 3,
- .func = 2,
- .route_offset = 0x308,
- .route_val = BIT(16 + 3),
- }, {
- /* i2s-8ch-1-sclkrxm0 */
- .bank_num = 1,
- .pin = 4,
- .func = 2,
- .route_offset = 0x308,
- .route_val = BIT(16 + 3),
- }, {
- /* i2s-8ch-1-sclktxm1 */
- .bank_num = 1,
- .pin = 13,
- .func = 2,
- .route_offset = 0x308,
- .route_val = BIT(16 + 3) | BIT(3),
- }, {
- /* i2s-8ch-1-sclkrxm1 */
- .bank_num = 1,
- .pin = 14,
- .func = 2,
- .route_offset = 0x308,
- .route_val = BIT(16 + 3) | BIT(3),
- }, {
- /* pdm-clkm0 */
- .bank_num = 1,
- .pin = 4,
- .func = 3,
- .route_offset = 0x308,
- .route_val = BIT(16 + 12) | BIT(16 + 13),
- }, {
- /* pdm-clkm1 */
- .bank_num = 1,
- .pin = 14,
- .func = 4,
- .route_offset = 0x308,
- .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(12),
- }, {
- /* pdm-clkm2 */
- .bank_num = 2,
- .pin = 6,
- .func = 2,
- .route_offset = 0x308,
- .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(13),
- }, {
- /* pdm-clkm-m2 */
- .bank_num = 2,
- .pin = 4,
- .func = 3,
- .route_offset = 0x600,
- .route_val = BIT(16 + 2) | BIT(2),
- }, {
- /* spi1_miso */
- .bank_num = 3,
- .pin = 10,
- .func = 3,
- .route_offset = 0x314,
- .route_val = BIT(16 + 9),
- }, {
- /* spi1_miso_m1 */
- .bank_num = 2,
- .pin = 4,
- .func = 2,
- .route_offset = 0x314,
- .route_val = BIT(16 + 9) | BIT(9),
- }, {
- /* owire_m0 */
- .bank_num = 0,
- .pin = 11,
- .func = 3,
- .route_offset = 0x314,
- .route_val = BIT(16 + 10) | BIT(16 + 11),
- }, {
- /* owire_m1 */
- .bank_num = 1,
- .pin = 22,
- .func = 7,
- .route_offset = 0x314,
- .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(10),
- }, {
- /* owire_m2 */
- .bank_num = 2,
- .pin = 2,
- .func = 5,
- .route_offset = 0x314,
- .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(11),
- }, {
- /* can_rxd_m0 */
- .bank_num = 0,
- .pin = 11,
- .func = 2,
- .route_offset = 0x314,
- .route_val = BIT(16 + 12) | BIT(16 + 13),
- }, {
- /* can_rxd_m1 */
- .bank_num = 1,
- .pin = 22,
- .func = 5,
- .route_offset = 0x314,
- .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(12),
- }, {
- /* can_rxd_m2 */
- .bank_num = 2,
- .pin = 2,
- .func = 4,
- .route_offset = 0x314,
- .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(13),
- }, {
- /* mac_rxd0_m0 */
- .bank_num = 1,
- .pin = 20,
- .func = 3,
- .route_offset = 0x314,
- .route_val = BIT(16 + 14),
- }, {
- /* mac_rxd0_m1 */
- .bank_num = 4,
- .pin = 2,
- .func = 2,
- .route_offset = 0x314,
- .route_val = BIT(16 + 14) | BIT(14),
- }, {
- /* uart3_rx */
- .bank_num = 3,
- .pin = 12,
- .func = 4,
- .route_offset = 0x314,
- .route_val = BIT(16 + 15),
- }, {
- /* uart3_rx_m1 */
- .bank_num = 0,
- .pin = 17,
- .func = 3,
- .route_offset = 0x314,
- .route_val = BIT(16 + 15) | BIT(15),
- },
+ RK_MUXROUTE_SAME(0, RK_PC3, 1, 0x314, BIT(16 + 0) | BIT(0)), /* rtc_clk */
+ RK_MUXROUTE_SAME(1, RK_PC6, 2, 0x314, BIT(16 + 2) | BIT(16 + 3)), /* uart2_rxm0 */
+ RK_MUXROUTE_SAME(4, RK_PD2, 2, 0x314, BIT(16 + 2) | BIT(16 + 3) | BIT(2)), /* uart2_rxm1 */
+ RK_MUXROUTE_SAME(0, RK_PB7, 2, 0x608, BIT(16 + 8) | BIT(16 + 9)), /* i2c3_sdam0 */
+ RK_MUXROUTE_SAME(3, RK_PB4, 2, 0x608, BIT(16 + 8) | BIT(16 + 9) | BIT(8)), /* i2c3_sdam1 */
+ RK_MUXROUTE_SAME(2, RK_PA0, 3, 0x608, BIT(16 + 8) | BIT(16 + 9) | BIT(9)), /* i2c3_sdam2 */
+ RK_MUXROUTE_SAME(1, RK_PA3, 2, 0x308, BIT(16 + 3)), /* i2s-8ch-1-sclktxm0 */
+ RK_MUXROUTE_SAME(1, RK_PA4, 2, 0x308, BIT(16 + 3)), /* i2s-8ch-1-sclkrxm0 */
+ RK_MUXROUTE_SAME(1, RK_PB5, 2, 0x308, BIT(16 + 3) | BIT(3)), /* i2s-8ch-1-sclktxm1 */
+ RK_MUXROUTE_SAME(1, RK_PB6, 2, 0x308, BIT(16 + 3) | BIT(3)), /* i2s-8ch-1-sclkrxm1 */
+ RK_MUXROUTE_SAME(1, RK_PA4, 3, 0x308, BIT(16 + 12) | BIT(16 + 13)), /* pdm-clkm0 */
+ RK_MUXROUTE_SAME(1, RK_PB6, 4, 0x308, BIT(16 + 12) | BIT(16 + 13) | BIT(12)), /* pdm-clkm1 */
+ RK_MUXROUTE_SAME(2, RK_PA6, 2, 0x308, BIT(16 + 12) | BIT(16 + 13) | BIT(13)), /* pdm-clkm2 */
+ RK_MUXROUTE_SAME(2, RK_PA4, 3, 0x600, BIT(16 + 2) | BIT(2)), /* pdm-clkm-m2 */
+ RK_MUXROUTE_SAME(3, RK_PB2, 3, 0x314, BIT(16 + 9)), /* spi1_miso */
+ RK_MUXROUTE_SAME(2, RK_PA4, 2, 0x314, BIT(16 + 9) | BIT(9)), /* spi1_miso_m1 */
+ RK_MUXROUTE_SAME(0, RK_PB3, 3, 0x314, BIT(16 + 10) | BIT(16 + 11)), /* owire_m0 */
+ RK_MUXROUTE_SAME(1, RK_PC6, 7, 0x314, BIT(16 + 10) | BIT(16 + 11) | BIT(10)), /* owire_m1 */
+ RK_MUXROUTE_SAME(2, RK_PA2, 5, 0x314, BIT(16 + 10) | BIT(16 + 11) | BIT(11)), /* owire_m2 */
+ RK_MUXROUTE_SAME(0, RK_PB3, 2, 0x314, BIT(16 + 12) | BIT(16 + 13)), /* can_rxd_m0 */
+ RK_MUXROUTE_SAME(1, RK_PC6, 5, 0x314, BIT(16 + 12) | BIT(16 + 13) | BIT(12)), /* can_rxd_m1 */
+ RK_MUXROUTE_SAME(2, RK_PA2, 4, 0x314, BIT(16 + 12) | BIT(16 + 13) | BIT(13)), /* can_rxd_m2 */
+ RK_MUXROUTE_SAME(1, RK_PC4, 3, 0x314, BIT(16 + 14)), /* mac_rxd0_m0 */
+ RK_MUXROUTE_SAME(4, RK_PA2, 2, 0x314, BIT(16 + 14) | BIT(14)), /* mac_rxd0_m1 */
+ RK_MUXROUTE_SAME(3, RK_PB4, 4, 0x314, BIT(16 + 15)), /* uart3_rx */
+ RK_MUXROUTE_SAME(0, RK_PC1, 3, 0x314, BIT(16 + 15) | BIT(15)), /* uart3_rx_m1 */
};
static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
- {
- /* uart2dbg_rxm0 */
- .bank_num = 1,
- .pin = 1,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16) | BIT(16 + 1),
- }, {
- /* uart2dbg_rxm1 */
- .bank_num = 2,
- .pin = 1,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16) | BIT(16 + 1) | BIT(0),
- }, {
- /* gmac-m1_rxd0 */
- .bank_num = 1,
- .pin = 11,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 2) | BIT(2),
- }, {
- /* gmac-m1-optimized_rxd3 */
- .bank_num = 1,
- .pin = 14,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 10) | BIT(10),
- }, {
- /* pdm_sdi0m0 */
- .bank_num = 2,
- .pin = 19,
- .func = 2,
- .route_offset = 0x50,
- .route_val = BIT(16 + 3),
- }, {
- /* pdm_sdi0m1 */
- .bank_num = 1,
- .pin = 23,
- .func = 3,
- .route_offset = 0x50,
- .route_val = BIT(16 + 3) | BIT(3),
- }, {
- /* spi_rxdm2 */
- .bank_num = 3,
- .pin = 2,
- .func = 4,
- .route_offset = 0x50,
- .route_val = BIT(16 + 4) | BIT(16 + 5) | BIT(5),
- }, {
- /* i2s2_sdim0 */
- .bank_num = 1,
- .pin = 24,
- .func = 1,
- .route_offset = 0x50,
- .route_val = BIT(16 + 6),
- }, {
- /* i2s2_sdim1 */
- .bank_num = 3,
- .pin = 2,
- .func = 6,
- .route_offset = 0x50,
- .route_val = BIT(16 + 6) | BIT(6),
- }, {
- /* card_iom1 */
- .bank_num = 2,
- .pin = 22,
- .func = 3,
- .route_offset = 0x50,
- .route_val = BIT(16 + 7) | BIT(7),
- }, {
- /* tsp_d5m1 */
- .bank_num = 2,
- .pin = 16,
- .func = 3,
- .route_offset = 0x50,
- .route_val = BIT(16 + 8) | BIT(8),
- }, {
- /* cif_data5m1 */
- .bank_num = 2,
- .pin = 16,
- .func = 4,
- .route_offset = 0x50,
- .route_val = BIT(16 + 9) | BIT(9),
- },
+ RK_MUXROUTE_SAME(1, RK_PA1, 2, 0x50, BIT(16) | BIT(16 + 1)), /* uart2dbg_rxm0 */
+ RK_MUXROUTE_SAME(2, RK_PA1, 1, 0x50, BIT(16) | BIT(16 + 1) | BIT(0)), /* uart2dbg_rxm1 */
+ RK_MUXROUTE_SAME(1, RK_PB3, 2, 0x50, BIT(16 + 2) | BIT(2)), /* gmac-m1_rxd0 */
+ RK_MUXROUTE_SAME(1, RK_PB6, 2, 0x50, BIT(16 + 10) | BIT(10)), /* gmac-m1-optimized_rxd3 */
+ RK_MUXROUTE_SAME(2, RK_PC3, 2, 0x50, BIT(16 + 3)), /* pdm_sdi0m0 */
+ RK_MUXROUTE_SAME(1, RK_PC7, 3, 0x50, BIT(16 + 3) | BIT(3)), /* pdm_sdi0m1 */
+ RK_MUXROUTE_SAME(3, RK_PA2, 4, 0x50, BIT(16 + 4) | BIT(16 + 5) | BIT(5)), /* spi_rxdm2 */
+ RK_MUXROUTE_SAME(1, RK_PD0, 1, 0x50, BIT(16 + 6)), /* i2s2_sdim0 */
+ RK_MUXROUTE_SAME(3, RK_PA2, 6, 0x50, BIT(16 + 6) | BIT(6)), /* i2s2_sdim1 */
+ RK_MUXROUTE_SAME(2, RK_PC6, 3, 0x50, BIT(16 + 7) | BIT(7)), /* card_iom1 */
+ RK_MUXROUTE_SAME(2, RK_PC0, 3, 0x50, BIT(16 + 8) | BIT(8)), /* tsp_d5m1 */
+ RK_MUXROUTE_SAME(2, RK_PC0, 4, 0x50, BIT(16 + 9) | BIT(9)), /* cif_data5m1 */
};
static struct rockchip_mux_route_data rk3399_mux_route_data[] = {
- {
- /* uart2dbga_rx */
- .bank_num = 4,
- .pin = 8,
- .func = 2,
- .route_offset = 0xe21c,
- .route_val = BIT(16 + 10) | BIT(16 + 11),
- }, {
- /* uart2dbgb_rx */
- .bank_num = 4,
- .pin = 16,
- .func = 2,
- .route_offset = 0xe21c,
- .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(10),
- }, {
- /* uart2dbgc_rx */
- .bank_num = 4,
- .pin = 19,
- .func = 1,
- .route_offset = 0xe21c,
- .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(11),
- }, {
- /* pcie_clkreqn */
- .bank_num = 2,
- .pin = 26,
- .func = 2,
- .route_offset = 0xe21c,
- .route_val = BIT(16 + 14),
- }, {
- /* pcie_clkreqnb */
- .bank_num = 4,
- .pin = 24,
- .func = 1,
- .route_offset = 0xe21c,
- .route_val = BIT(16 + 14) | BIT(14),
- },
+ RK_MUXROUTE_SAME(4, RK_PB0, 2, 0xe21c, BIT(16 + 10) | BIT(16 + 11)), /* uart2dbga_rx */
+ RK_MUXROUTE_SAME(4, RK_PC0, 2, 0xe21c, BIT(16 + 10) | BIT(16 + 11) | BIT(10)), /* uart2dbgb_rx */
+ RK_MUXROUTE_SAME(4, RK_PC3, 1, 0xe21c, BIT(16 + 10) | BIT(16 + 11) | BIT(11)), /* uart2dbgc_rx */
+ RK_MUXROUTE_SAME(2, RK_PD2, 2, 0xe21c, BIT(16 + 14)), /* pcie_clkreqn */
+ RK_MUXROUTE_SAME(4, RK_PD0, 1, 0xe21c, BIT(16 + 14) | BIT(14)), /* pcie_clkreqnb */
+};
+
+static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
+ RK_MUXROUTE_PMU(0, RK_PB7, 1, 0x0110, WRITE_MASK_VAL(1, 0, 0)), /* PWM0 IO mux M0 */
+ RK_MUXROUTE_PMU(0, RK_PC7, 2, 0x0110, WRITE_MASK_VAL(1, 0, 1)), /* PWM0 IO mux M1 */
+ RK_MUXROUTE_PMU(0, RK_PC0, 1, 0x0110, WRITE_MASK_VAL(3, 2, 0)), /* PWM1 IO mux M0 */
+ RK_MUXROUTE_PMU(0, RK_PB5, 4, 0x0110, WRITE_MASK_VAL(3, 2, 1)), /* PWM1 IO mux M1 */
+ RK_MUXROUTE_PMU(0, RK_PC1, 1, 0x0110, WRITE_MASK_VAL(5, 4, 0)), /* PWM2 IO mux M0 */
+ RK_MUXROUTE_PMU(0, RK_PB6, 4, 0x0110, WRITE_MASK_VAL(5, 4, 1)), /* PWM2 IO mux M1 */
+ RK_MUXROUTE_PMU(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PA1, 4, 0x0300, WRITE_MASK_VAL(0, 0, 1)), /* CAN0 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA1, 3, 0x0300, WRITE_MASK_VAL(2, 2, 0)), /* CAN1 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC3, 3, 0x0300, WRITE_MASK_VAL(2, 2, 1)), /* CAN1 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PB5, 3, 0x0300, WRITE_MASK_VAL(4, 4, 0)), /* CAN2 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PB2, 4, 0x0300, WRITE_MASK_VAL(4, 4, 1)), /* CAN2 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PC4, 1, 0x0300, WRITE_MASK_VAL(6, 6, 0)), /* HPDIN IO mux M0 */
+ RK_MUXROUTE_PMU(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB1, 3, 0x0300, WRITE_MASK_VAL(8, 8, 0)), /* GMAC1 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PA7, 3, 0x0300, WRITE_MASK_VAL(8, 8, 1)), /* GMAC1 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PD1, 1, 0x0300, WRITE_MASK_VAL(10, 10, 0)), /* HDMITX IO mux M0 */
+ RK_MUXROUTE_PMU(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
+ RK_MUXROUTE_PMU(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PB4, 1, 0x0300, WRITE_MASK_VAL(14, 14, 1)), /* I2C2 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA0, 1, 0x0304, WRITE_MASK_VAL(0, 0, 0)), /* I2C3 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PB6, 4, 0x0304, WRITE_MASK_VAL(0, 0, 1)), /* I2C3 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PB2, 1, 0x0304, WRITE_MASK_VAL(2, 2, 0)), /* I2C4 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PB1, 2, 0x0304, WRITE_MASK_VAL(2, 2, 1)), /* I2C4 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB4, 4, 0x0304, WRITE_MASK_VAL(4, 4, 0)), /* I2C5 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PD0, 2, 0x0304, WRITE_MASK_VAL(4, 4, 1)), /* I2C5 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB1, 5, 0x0304, WRITE_MASK_VAL(14, 14, 0)), /* PWM8 IO mux M0 */
+ RK_MUXROUTE_GRF(1, RK_PD5, 4, 0x0304, WRITE_MASK_VAL(14, 14, 1)), /* PWM8 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB2, 5, 0x0308, WRITE_MASK_VAL(0, 0, 0)), /* PWM9 IO mux M0 */
+ RK_MUXROUTE_GRF(1, RK_PD6, 4, 0x0308, WRITE_MASK_VAL(0, 0, 1)), /* PWM9 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB5, 5, 0x0308, WRITE_MASK_VAL(2, 2, 0)), /* PWM10 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PA1, 2, 0x0308, WRITE_MASK_VAL(2, 2, 1)), /* PWM10 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB6, 5, 0x0308, WRITE_MASK_VAL(4, 4, 0)), /* PWM11 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC0, 3, 0x0308, WRITE_MASK_VAL(4, 4, 1)), /* PWM11 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PB7, 2, 0x0308, WRITE_MASK_VAL(6, 6, 0)), /* PWM12 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC5, 1, 0x0308, WRITE_MASK_VAL(6, 6, 1)), /* PWM12 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PC0, 2, 0x0308, WRITE_MASK_VAL(8, 8, 0)), /* PWM13 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC6, 1, 0x0308, WRITE_MASK_VAL(8, 8, 1)), /* PWM13 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PC4, 1, 0x0308, WRITE_MASK_VAL(10, 10, 0)), /* PWM14 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC2, 1, 0x0308, WRITE_MASK_VAL(10, 10, 1)), /* PWM14 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PC5, 1, 0x0308, WRITE_MASK_VAL(12, 12, 0)), /* PWM15 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC3, 1, 0x0308, WRITE_MASK_VAL(12, 12, 1)), /* PWM15 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PD2, 3, 0x0308, WRITE_MASK_VAL(14, 14, 0)), /* SDMMC2 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PA5, 5, 0x0308, WRITE_MASK_VAL(14, 14, 1)), /* SDMMC2 IO mux M1 */
+ RK_MUXROUTE_PMU(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PD3, 3, 0x030c, WRITE_MASK_VAL(0, 0, 1)), /* SPI0 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PB5, 3, 0x030c, WRITE_MASK_VAL(2, 2, 0)), /* SPI1 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PC3, 3, 0x030c, WRITE_MASK_VAL(2, 2, 1)), /* SPI1 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PC1, 4, 0x030c, WRITE_MASK_VAL(4, 4, 0)), /* SPI2 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PA0, 3, 0x030c, WRITE_MASK_VAL(4, 4, 1)), /* SPI2 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PB3, 4, 0x030c, WRITE_MASK_VAL(6, 6, 0)), /* SPI3 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC2, 2, 0x030c, WRITE_MASK_VAL(6, 6, 1)), /* SPI3 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PB4, 2, 0x030c, WRITE_MASK_VAL(8, 8, 0)), /* UART1 IO mux M0 */
+ RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
+ RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
+ RK_MUXROUTE_GRF(1, RK_PD5, 2, 0x030c, WRITE_MASK_VAL(10, 10, 1)), /* UART2 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA1, 2, 0x030c, WRITE_MASK_VAL(12, 12, 0)), /* UART3 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PB7, 4, 0x030c, WRITE_MASK_VAL(12, 12, 1)), /* UART3 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA6, 2, 0x030c, WRITE_MASK_VAL(14, 14, 0)), /* UART4 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PB2, 4, 0x030c, WRITE_MASK_VAL(14, 14, 1)), /* UART4 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PA2, 3, 0x0310, WRITE_MASK_VAL(0, 0, 0)), /* UART5 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PC2, 4, 0x0310, WRITE_MASK_VAL(0, 0, 1)), /* UART5 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PA4, 3, 0x0310, WRITE_MASK_VAL(2, 2, 0)), /* UART6 IO mux M0 */
+ RK_MUXROUTE_GRF(1, RK_PD5, 3, 0x0310, WRITE_MASK_VAL(2, 2, 1)), /* UART6 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PA6, 3, 0x0310, WRITE_MASK_VAL(5, 4, 0)), /* UART7 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PC4, 4, 0x0310, WRITE_MASK_VAL(5, 4, 1)), /* UART7 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PA2, 4, 0x0310, WRITE_MASK_VAL(5, 4, 2)), /* UART7 IO mux M2 */
+ RK_MUXROUTE_GRF(2, RK_PC5, 3, 0x0310, WRITE_MASK_VAL(6, 6, 0)), /* UART8 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PD7, 4, 0x0310, WRITE_MASK_VAL(6, 6, 1)), /* UART8 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PB0, 3, 0x0310, WRITE_MASK_VAL(9, 8, 0)), /* UART9 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC5, 4, 0x0310, WRITE_MASK_VAL(9, 8, 1)), /* UART9 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PA4, 4, 0x0310, WRITE_MASK_VAL(9, 8, 2)), /* UART9 IO mux M2 */
+ RK_MUXROUTE_GRF(1, RK_PA2, 1, 0x0310, WRITE_MASK_VAL(11, 10, 0)), /* I2S1 IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PC6, 4, 0x0310, WRITE_MASK_VAL(11, 10, 1)), /* I2S1 IO mux M1 */
+ RK_MUXROUTE_GRF(2, RK_PD0, 5, 0x0310, WRITE_MASK_VAL(11, 10, 2)), /* I2S1 IO mux M2 */
+ RK_MUXROUTE_GRF(2, RK_PC1, 1, 0x0310, WRITE_MASK_VAL(12, 12, 0)), /* I2S2 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PB6, 5, 0x0310, WRITE_MASK_VAL(12, 12, 1)), /* I2S2 IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PA2, 4, 0x0310, WRITE_MASK_VAL(14, 14, 0)), /* I2S3 IO mux M0 */
+ RK_MUXROUTE_GRF(4, RK_PC2, 5, 0x0310, WRITE_MASK_VAL(14, 14, 1)), /* I2S3 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(1, 0, 0)), /* PDM IO mux M0 */
+ RK_MUXROUTE_GRF(1, RK_PA6, 3, 0x0314, WRITE_MASK_VAL(1, 0, 0)), /* PDM IO mux M0 */
+ RK_MUXROUTE_GRF(3, RK_PD6, 5, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PA0, 4, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
+ RK_MUXROUTE_GRF(3, RK_PC4, 5, 0x0314, WRITE_MASK_VAL(1, 0, 2)), /* PDM IO mux M2 */
+ RK_MUXROUTE_PMU(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PD0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 1)), /* PCIE20 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PB0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 2)), /* PCIE20 IO mux M2 */
+ RK_MUXROUTE_PMU(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PD2, 4, 0x0314, WRITE_MASK_VAL(5, 4, 1)), /* PCIE30X1 IO mux M1 */
+ RK_MUXROUTE_GRF(1, RK_PA5, 4, 0x0314, WRITE_MASK_VAL(5, 4, 2)), /* PCIE30X1 IO mux M2 */
+ RK_MUXROUTE_PMU(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
+ RK_MUXROUTE_GRF(2, RK_PD4, 4, 0x0314, WRITE_MASK_VAL(7, 6, 1)), /* PCIE30X2 IO mux M1 */
+ RK_MUXROUTE_GRF(4, RK_PC2, 4, 0x0314, WRITE_MASK_VAL(7, 6, 2)), /* PCIE30X2 IO mux M2 */
};
static bool rockchip_get_mux_route(struct rockchip_pin_bank *bank, int pin,
@@ -2102,6 +1738,68 @@ static void rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
*bit = (pin_num % 8) * 2;
}
+#define RK3568_PULL_PMU_OFFSET 0x20
+#define RK3568_PULL_GRF_OFFSET 0x80
+#define RK3568_PULL_BITS_PER_PIN 2
+#define RK3568_PULL_PINS_PER_REG 8
+#define RK3568_PULL_BANK_STRIDE 0x10
+
+static void rk3568_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = RK3568_PULL_PMU_OFFSET;
+ *reg += bank->bank_num * RK3568_PULL_BANK_STRIDE;
+ *reg += ((pin_num / RK3568_PULL_PINS_PER_REG) * 4);
+
+ *bit = pin_num % RK3568_PULL_PINS_PER_REG;
+ *bit *= RK3568_PULL_BITS_PER_PIN;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = RK3568_PULL_GRF_OFFSET;
+ *reg += (bank->bank_num - 1) * RK3568_PULL_BANK_STRIDE;
+ *reg += ((pin_num / RK3568_PULL_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3568_PULL_PINS_PER_REG);
+ *bit *= RK3568_PULL_BITS_PER_PIN;
+ }
+}
+
+#define RK3568_DRV_PMU_OFFSET 0x70
+#define RK3568_DRV_GRF_OFFSET 0x200
+#define RK3568_DRV_BITS_PER_PIN 8
+#define RK3568_DRV_PINS_PER_REG 2
+#define RK3568_DRV_BANK_STRIDE 0x40
+
+static void rk3568_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 32 pins of the first bank are located in PMU */
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = RK3568_DRV_PMU_OFFSET;
+ *reg += ((pin_num / RK3568_DRV_PINS_PER_REG) * 4);
+
+ *bit = pin_num % RK3568_DRV_PINS_PER_REG;
+ *bit *= RK3568_DRV_BITS_PER_PIN;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = RK3568_DRV_GRF_OFFSET;
+ *reg += (bank->bank_num - 1) * RK3568_DRV_BANK_STRIDE;
+ *reg += ((pin_num / RK3568_DRV_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3568_DRV_PINS_PER_REG);
+ *bit *= RK3568_DRV_BITS_PER_PIN;
+ }
+}
+
static int rockchip_perpin_drv_list[DRV_TYPE_MAX][8] = {
{ 2, 4, 8, 12, -1, -1, -1, -1 },
{ 3, 6, 9, 12, -1, -1, -1, -1 },
@@ -2202,6 +1900,11 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
bank->bank_num, pin_num, strength);
ctrl->drv_calc_reg(bank, pin_num, &regmap, &reg, &bit);
+ if (ctrl->type == RK3568) {
+ rmask_bits = RK3568_DRV_BITS_PER_PIN;
+ ret = (1 << (strength + 1)) - 1;
+ goto config;
+ }
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(rockchip_perpin_drv_list[drv_type]); i++) {
@@ -2271,6 +1974,7 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
return -EINVAL;
}
+config:
/* enable the write to the equivalent lower bits */
data = ((1 << rmask_bits) - 1) << (bit + 16);
rmask = data | (data >> 16);
@@ -2373,6 +2077,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RK3308:
case RK3368:
case RK3399:
+ case RK3568:
pull_type = bank->pull_type[pin_num / 8];
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(rockchip_pull_list[pull_type]);
@@ -2382,6 +2087,14 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
break;
}
}
+ /*
+ * In the TRM, pull-up being 1 for everything except the GPIO0_D0-D6,
+ * where that pull up value becomes 3.
+ */
+ if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
+ if (ret == 1)
+ ret = 3;
+ }
if (ret < 0) {
dev_err(info->dev, "unsupported pull setting %d\n",
@@ -2426,6 +2139,35 @@ static int rk3328_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
return 0;
}
+#define RK3568_SCHMITT_BITS_PER_PIN 2
+#define RK3568_SCHMITT_PINS_PER_REG 8
+#define RK3568_SCHMITT_BANK_STRIDE 0x10
+#define RK3568_SCHMITT_GRF_OFFSET 0xc0
+#define RK3568_SCHMITT_PMUGRF_OFFSET 0x30
+
+static int rk3568_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num,
+ struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = RK3568_SCHMITT_PMUGRF_OFFSET;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = RK3568_SCHMITT_GRF_OFFSET;
+ *reg += (bank->bank_num - 1) * RK3568_SCHMITT_BANK_STRIDE;
+ }
+
+ *reg += ((pin_num / RK3568_SCHMITT_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3568_SCHMITT_PINS_PER_REG;
+ *bit *= RK3568_SCHMITT_BITS_PER_PIN;
+
+ return 0;
+}
+
static int rockchip_get_schmitt(struct rockchip_pin_bank *bank, int pin_num)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -2444,6 +2186,13 @@ static int rockchip_get_schmitt(struct rockchip_pin_bank *bank, int pin_num)
return ret;
data >>= bit;
+ switch (ctrl->type) {
+ case RK3568:
+ return data & ((1 << RK3568_SCHMITT_BITS_PER_PIN) - 1);
+ default:
+ break;
+ }
+
return data & 0x1;
}
@@ -2465,8 +2214,17 @@ static int rockchip_set_schmitt(struct rockchip_pin_bank *bank,
return ret;
/* enable the write to the equivalent lower bits */
- data = BIT(bit + 16) | (enable << bit);
- rmask = BIT(bit + 16) | BIT(bit);
+ switch (ctrl->type) {
+ case RK3568:
+ data = ((1 << RK3568_SCHMITT_BITS_PER_PIN) - 1) << (bit + 16);
+ rmask = data | (data >> 16);
+ data |= ((enable ? 0x2 : 0x1) << bit);
+ break;
+ default:
+ data = BIT(bit + 16) | (enable << bit);
+ rmask = BIT(bit + 16) | BIT(bit);
+ break;
+ }
return regmap_update_bits(regmap, reg, rmask, data);
}
@@ -2640,6 +2398,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RK3308:
case RK3368:
case RK3399:
+ case RK3568:
return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
}
@@ -3433,6 +3192,7 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
* things enabled, so for us that's all masked and all enabled.
*/
writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTMASK);
+ writel_relaxed(0xffffffff, bank->reg_base + GPIO_PORTS_EOI);
writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTEN);
gc->mask_cache = 0xffffffff;
@@ -4213,6 +3973,45 @@ static struct rockchip_pin_ctrl rk3399_pin_ctrl = {
.drv_calc_reg = rk3399_calc_drv_reg_and_bit,
};
+static struct rockchip_pin_bank rk3568_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT,
+ IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT,
+ IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT,
+ IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS(4, 32, "gpio4", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT),
+};
+
+static struct rockchip_pin_ctrl rk3568_pin_ctrl = {
+ .pin_banks = rk3568_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3568_pin_banks),
+ .label = "RK3568-GPIO",
+ .type = RK3568,
+ .grf_mux_offset = 0x0,
+ .pmu_mux_offset = 0x0,
+ .grf_drv_offset = 0x0200,
+ .pmu_drv_offset = 0x0070,
+ .iomux_routes = rk3568_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(rk3568_mux_route_data),
+ .pull_calc_reg = rk3568_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk3568_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rk3568_calc_schmitt_reg_and_bit,
+};
+
static const struct of_device_id rockchip_pinctrl_dt_match[] = {
{ .compatible = "rockchip,px30-pinctrl",
.data = &px30_pin_ctrl },
@@ -4242,6 +4041,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &rk3368_pin_ctrl },
{ .compatible = "rockchip,rk3399-pinctrl",
.data = &rk3399_pin_ctrl },
+ { .compatible = "rockchip,rk3568-pinctrl",
+ .data = &rk3568_pin_ctrl },
{},
};
@@ -4259,3 +4060,14 @@ static int __init rockchip_pinctrl_drv_register(void)
return platform_driver_register(&rockchip_pinctrl_driver);
}
postcore_initcall(rockchip_pinctrl_drv_register);
+
+static void __exit rockchip_pinctrl_drv_unregister(void)
+{
+ platform_driver_unregister(&rockchip_pinctrl_driver);
+}
+module_exit(rockchip_pinctrl_drv_unregister);
+
+MODULE_DESCRIPTION("ROCKCHIP Pin Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pinctrl-rockchip");
+MODULE_DEVICE_TABLE(of, rockchip_pinctrl_dt_match);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 7771316dfffa..2c9c9835f375 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -270,20 +270,44 @@ static void __maybe_unused pcs_writel(unsigned val, void __iomem *reg)
writel(val, reg);
}
+static unsigned int pcs_pin_reg_offset_get(struct pcs_device *pcs,
+ unsigned int pin)
+{
+ unsigned int mux_bytes = pcs->width / BITS_PER_BYTE;
+
+ if (pcs->bits_per_mux) {
+ unsigned int pin_offset_bytes;
+
+ pin_offset_bytes = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
+ return (pin_offset_bytes / mux_bytes) * mux_bytes;
+ }
+
+ return pin * mux_bytes;
+}
+
+static unsigned int pcs_pin_shift_reg_get(struct pcs_device *pcs,
+ unsigned int pin)
+{
+ return (pin % (pcs->width / pcs->bits_per_pin)) * pcs->bits_per_pin;
+}
+
static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned pin)
{
struct pcs_device *pcs;
- unsigned val, mux_bytes;
+ unsigned int val;
unsigned long offset;
size_t pa;
pcs = pinctrl_dev_get_drvdata(pctldev);
- mux_bytes = pcs->width / BITS_PER_BYTE;
- offset = pin * mux_bytes;
+ offset = pcs_pin_reg_offset_get(pcs, pin);
val = pcs->read(pcs->base + offset);
+
+ if (pcs->bits_per_mux)
+ val &= pcs->fmask << pcs_pin_shift_reg_get(pcs, pin);
+
pa = pcs->res->start + offset;
seq_printf(s, "%zx %08x %s ", pa, val, DRIVER_NAME);
@@ -384,7 +408,6 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
struct pcs_gpiofunc_range *frange = NULL;
struct list_head *pos, *tmp;
- int mux_bytes = 0;
unsigned data;
/* If function mask is null, return directly. */
@@ -392,29 +415,27 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
return -ENOTSUPP;
list_for_each_safe(pos, tmp, &pcs->gpiofuncs) {
+ u32 offset;
+
frange = list_entry(pos, struct pcs_gpiofunc_range, node);
if (pin >= frange->offset + frange->npins
|| pin < frange->offset)
continue;
- mux_bytes = pcs->width / BITS_PER_BYTE;
- if (pcs->bits_per_mux) {
- int byte_num, offset, pin_shift;
+ offset = pcs_pin_reg_offset_get(pcs, pin);
- byte_num = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
- offset = (byte_num / mux_bytes) * mux_bytes;
- pin_shift = pin % (pcs->width / pcs->bits_per_pin) *
- pcs->bits_per_pin;
+ if (pcs->bits_per_mux) {
+ int pin_shift = pcs_pin_shift_reg_get(pcs, pin);
data = pcs->read(pcs->base + offset);
data &= ~(pcs->fmask << pin_shift);
data |= frange->gpiofunc << pin_shift;
pcs->write(data, pcs->base + offset);
} else {
- data = pcs->read(pcs->base + pin * mux_bytes);
+ data = pcs->read(pcs->base + offset);
data &= ~pcs->fmask;
data |= frange->gpiofunc;
- pcs->write(data, pcs->base + pin * mux_bytes);
+ pcs->write(data, pcs->base + offset);
}
break;
}
@@ -512,7 +533,7 @@ static int pcs_pinconf_get(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_DRIVE_STRENGTH:
case PIN_CONFIG_SLEW_RATE:
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
default:
*config = data;
break;
@@ -550,7 +571,7 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_INPUT_SCHMITT:
case PIN_CONFIG_DRIVE_STRENGTH:
case PIN_CONFIG_SLEW_RATE:
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
shift = ffs(func->conf[i].mask) - 1;
data &= ~func->conf[i].mask;
data |= (arg << shift) & func->conf[i].mask;
@@ -656,10 +677,8 @@ static const struct pinconf_ops pcs_pinconf_ops = {
* pcs_add_pin() - add a pin to the static per controller pin array
* @pcs: pcs driver instance
* @offset: register offset from base
- * @pin_pos: unused
*/
-static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
- unsigned pin_pos)
+static int pcs_add_pin(struct pcs_device *pcs, unsigned int offset)
{
struct pcs_soc_data *pcs_soc = &pcs->socdata;
struct pinctrl_pin_desc *pin;
@@ -703,14 +722,12 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
static int pcs_allocate_pin_table(struct pcs_device *pcs)
{
int mux_bytes, nr_pins, i;
- int num_pins_in_register = 0;
mux_bytes = pcs->width / BITS_PER_BYTE;
if (pcs->bits_per_mux) {
pcs->bits_per_pin = fls(pcs->fmask);
nr_pins = (pcs->size * BITS_PER_BYTE) / pcs->bits_per_pin;
- num_pins_in_register = pcs->width / pcs->bits_per_pin;
} else {
nr_pins = pcs->size / mux_bytes;
}
@@ -728,17 +745,9 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs)
for (i = 0; i < pcs->desc.npins; i++) {
unsigned offset;
int res;
- int byte_num;
- int pin_pos = 0;
- if (pcs->bits_per_mux) {
- byte_num = (pcs->bits_per_pin * i) / BITS_PER_BYTE;
- offset = (byte_num / mux_bytes) * mux_bytes;
- pin_pos = i % num_pins_in_register;
- } else {
- offset = i * mux_bytes;
- }
- res = pcs_add_pin(pcs, offset, pin_pos);
+ offset = pcs_pin_reg_offset_get(pcs, i);
+ res = pcs_add_pin(pcs, offset);
if (res < 0) {
dev_err(pcs->dev, "error adding pins: %i\n", res);
return res;
@@ -910,7 +919,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
{ "pinctrl-single,drive-strength", PIN_CONFIG_DRIVE_STRENGTH, },
{ "pinctrl-single,slew-rate", PIN_CONFIG_SLEW_RATE, },
{ "pinctrl-single,input-schmitt", PIN_CONFIG_INPUT_SCHMITT, },
- { "pinctrl-single,low-power-mode", PIN_CONFIG_LOW_POWER_MODE, },
+ { "pinctrl-single,low-power-mode", PIN_CONFIG_MODE_LOW_POWER, },
};
static const struct pcs_conf_type prop4[] = {
{ "pinctrl-single,bias-pullup", PIN_CONFIG_BIAS_PULL_UP, },
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index c6052a0e827a..5fb924a2eedd 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -1016,7 +1016,7 @@ static int zynq_pinconf_cfg_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_SLEW_RATE:
arg = !!(reg & ZYNQ_PINCONF_SPEED);
break;
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
{
enum zynq_io_standards iostd = zynq_pinconf_iostd_get(reg);
@@ -1087,7 +1087,7 @@ static int zynq_pinconf_cfg_set(struct pinctrl_dev *pctldev,
reg &= ~ZYNQ_PINCONF_IOTYPE_MASK;
reg |= arg << ZYNQ_PINCONF_IOTYPE_SHIFT;
break;
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
if (arg)
reg |= ZYNQ_PINCONF_DISABLE_RECVR;
else
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
new file mode 100644
index 000000000000..d5497003ce71
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -0,0 +1,906 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP pin controller
+ *
+ * Copyright (C) 2020 Xilinx, Inc.
+ *
+ * Sai Krishna Potthuri <lakshmi.sai.krishna.potthuri@xilinx.com>
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ */
+
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "core.h"
+#include "pinctrl-utils.h"
+
+#define ZYNQMP_PIN_PREFIX "MIO"
+#define PINCTRL_GET_FUNC_NAME_RESP_LEN 16
+#define MAX_FUNC_NAME_LEN 16
+#define MAX_GROUP_PIN 50
+#define MAX_PIN_GROUPS 50
+#define END_OF_FUNCTIONS "END_OF_FUNCTIONS"
+#define NUM_GROUPS_PER_RESP 6
+
+#define PINCTRL_GET_FUNC_GROUPS_RESP_LEN 12
+#define PINCTRL_GET_PIN_GROUPS_RESP_LEN 12
+#define NA_GROUP 0xFFFF
+#define RESERVED_GROUP 0xFFFE
+
+#define DRIVE_STRENGTH_2MA 2
+#define DRIVE_STRENGTH_4MA 4
+#define DRIVE_STRENGTH_8MA 8
+#define DRIVE_STRENGTH_12MA 12
+
+/**
+ * struct zynqmp_pmux_function - a pinmux function
+ * @name: Name of the pin mux function
+ * @groups: List of pin groups for this function
+ * @ngroups: Number of entries in @groups
+ * @node: Firmware node matching with the function
+ *
+ * This structure holds information about pin control function
+ * and function group names supporting that function.
+ */
+struct zynqmp_pmux_function {
+ char name[MAX_FUNC_NAME_LEN];
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+/**
+ * struct zynqmp_pinctrl - driver data
+ * @pctrl: Pin control device
+ * @groups: Pin groups
+ * @ngroups: Number of @groups
+ * @funcs: Pin mux functions
+ * @nfuncs: Number of @funcs
+ *
+ * This struct is stored as driver data and used to retrieve
+ * information regarding pin control functions, groups and
+ * group pins.
+ */
+struct zynqmp_pinctrl {
+ struct pinctrl_dev *pctrl;
+ const struct zynqmp_pctrl_group *groups;
+ unsigned int ngroups;
+ const struct zynqmp_pmux_function *funcs;
+ unsigned int nfuncs;
+};
+
+/**
+ * struct zynqmp_pctrl_group - Pin control group info
+ * @name: Group name
+ * @pins: Group pin numbers
+ * @npins: Number of pins in the group
+ */
+struct zynqmp_pctrl_group {
+ const char *name;
+ unsigned int pins[MAX_GROUP_PIN];
+ unsigned int npins;
+};
+
+static struct pinctrl_desc zynqmp_desc;
+
+static int zynqmp_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->ngroups;
+}
+
+static const char *zynqmp_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->groups[selector].name;
+}
+
+static int zynqmp_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *npins)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pctrl->groups[selector].pins;
+ *npins = pctrl->groups[selector].npins;
+
+ return 0;
+}
+
+static const struct pinctrl_ops zynqmp_pctrl_ops = {
+ .get_groups_count = zynqmp_pctrl_get_groups_count,
+ .get_group_name = zynqmp_pctrl_get_group_name,
+ .get_group_pins = zynqmp_pctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int zynqmp_pinmux_request_pin(struct pinctrl_dev *pctldev,
+ unsigned int pin)
+{
+ int ret;
+
+ ret = zynqmp_pm_pinctrl_request(pin);
+ if (ret) {
+ dev_err(pctldev->dev, "request failed for pin %u\n", pin);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zynqmp_pmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->nfuncs;
+}
+
+static const char *zynqmp_pmux_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->funcs[selector].name;
+}
+
+/**
+ * zynqmp_pmux_get_function_groups() - Get groups for the function
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Function ID
+ * @groups: Group names.
+ * @num_groups: Number of function groups.
+ *
+ * Get function's group count and group names.
+ */
+static int zynqmp_pmux_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctrl->funcs[selector].groups;
+ *num_groups = pctrl->funcs[selector].ngroups;
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinmux_set_mux() - Set requested function for the group
+ * @pctldev: Pincontrol device pointer.
+ * @function: Function ID.
+ * @group: Group ID.
+ *
+ * Loop through all pins of the group and call firmware API
+ * to set requested function for all pins in the group.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ unsigned int group)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct zynqmp_pctrl_group *pgrp = &pctrl->groups[group];
+ int ret, i;
+
+ for (i = 0; i < pgrp->npins; i++) {
+ unsigned int pin = pgrp->pins[i];
+
+ ret = zynqmp_pm_pinctrl_set_function(pin, function);
+ if (ret) {
+ dev_err(pctldev->dev, "set mux failed for pin %u\n",
+ pin);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int zynqmp_pinmux_release_pin(struct pinctrl_dev *pctldev,
+ unsigned int pin)
+{
+ int ret;
+
+ ret = zynqmp_pm_pinctrl_release(pin);
+ if (ret) {
+ dev_err(pctldev->dev, "free pin failed for pin %u\n",
+ pin);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops zynqmp_pinmux_ops = {
+ .request = zynqmp_pinmux_request_pin,
+ .get_functions_count = zynqmp_pmux_get_functions_count,
+ .get_function_name = zynqmp_pmux_get_function_name,
+ .get_function_groups = zynqmp_pmux_get_function_groups,
+ .set_mux = zynqmp_pinmux_set_mux,
+ .free = zynqmp_pinmux_release_pin,
+};
+
+/**
+ * zynqmp_pinconf_cfg_get() - get config value for the pin
+ * @pctldev: Pin control device pointer.
+ * @pin: Pin number.
+ * @config: Value of config param.
+ *
+ * Get value of the requested configuration parameter for the
+ * given pin.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_cfg_get(struct pinctrl_dev *pctldev,
+ unsigned int pin,
+ unsigned long *config)
+{
+ unsigned int arg, param = pinconf_to_config_param(*config);
+ int ret;
+
+ if (pin >= zynqmp_desc.npins)
+ return -EOPNOTSUPP;
+
+ switch (param) {
+ case PIN_CONFIG_SLEW_RATE:
+ param = PM_PINCTRL_CONFIG_SLEW_RATE;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ param = PM_PINCTRL_CONFIG_PULL_CTRL;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &arg);
+ if (arg != PM_PINCTRL_BIAS_PULL_UP)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ param = PM_PINCTRL_CONFIG_PULL_CTRL;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &arg);
+ if (arg != PM_PINCTRL_BIAS_PULL_DOWN)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ param = PM_PINCTRL_CONFIG_BIAS_STATUS;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &arg);
+ if (arg != PM_PINCTRL_BIAS_DISABLE)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ param = PM_PINCTRL_CONFIG_VOLTAGE_STATUS;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &arg);
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ param = PM_PINCTRL_CONFIG_SCHMITT_CMOS;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &arg);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ param = PM_PINCTRL_CONFIG_DRIVE_STRENGTH;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &arg);
+ switch (arg) {
+ case PM_PINCTRL_DRIVE_STRENGTH_2MA:
+ arg = DRIVE_STRENGTH_2MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_4MA:
+ arg = DRIVE_STRENGTH_4MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_8MA:
+ arg = DRIVE_STRENGTH_8MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_12MA:
+ arg = DRIVE_STRENGTH_12MA;
+ break;
+ default:
+ /* Invalid drive strength */
+ dev_warn(pctldev->dev,
+ "Invalid drive strength for pin %d\n",
+ pin);
+ return -EINVAL;
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ param = pinconf_to_config_param(*config);
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinconf_cfg_set() - Set requested config for the pin
+ * @pctldev: Pincontrol device pointer.
+ * @pin: Pin number.
+ * @configs: Configuration to set.
+ * @num_configs: Number of configurations.
+ *
+ * Loop through all configurations and call firmware API
+ * to set requested configurations for the pin.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+
+ if (pin >= zynqmp_desc.npins)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < num_configs; i++) {
+ unsigned int param = pinconf_to_config_param(configs[i]);
+ unsigned int arg = pinconf_to_config_argument(configs[i]);
+ unsigned int value;
+
+ switch (param) {
+ case PIN_CONFIG_SLEW_RATE:
+ param = PM_PINCTRL_CONFIG_SLEW_RATE;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ param = PM_PINCTRL_CONFIG_PULL_CTRL;
+ arg = PM_PINCTRL_BIAS_PULL_UP;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ param = PM_PINCTRL_CONFIG_PULL_CTRL;
+ arg = PM_PINCTRL_BIAS_PULL_DOWN;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ param = PM_PINCTRL_CONFIG_BIAS_STATUS;
+ arg = PM_PINCTRL_BIAS_DISABLE;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ param = PM_PINCTRL_CONFIG_SCHMITT_CMOS;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ switch (arg) {
+ case DRIVE_STRENGTH_2MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_2MA;
+ break;
+ case DRIVE_STRENGTH_4MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_4MA;
+ break;
+ case DRIVE_STRENGTH_8MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_8MA;
+ break;
+ case DRIVE_STRENGTH_12MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_12MA;
+ break;
+ default:
+ /* Invalid drive strength */
+ dev_warn(pctldev->dev,
+ "Invalid drive strength for pin %d\n",
+ pin);
+ return -EINVAL;
+ }
+
+ param = PM_PINCTRL_CONFIG_DRIVE_STRENGTH;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, value);
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ param = PM_PINCTRL_CONFIG_VOLTAGE_STATUS;
+ ret = zynqmp_pm_pinctrl_get_config(pin, param, &value);
+
+ if (arg != value)
+ dev_warn(pctldev->dev,
+ "Invalid IO Standard requested for pin %d\n",
+ pin);
+
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ case PIN_CONFIG_MODE_LOW_POWER:
+ /*
+ * These cases are mentioned in dts but configurable
+ * registers are unknown. So falling through to ignore
+ * boot time warnings as of now.
+ */
+ ret = 0;
+ break;
+ default:
+ dev_warn(pctldev->dev,
+ "unsupported configuration parameter '%u'\n",
+ param);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+ if (ret)
+ dev_warn(pctldev->dev,
+ "failed to set: pin %u param %u value %u\n",
+ pin, param, arg);
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinconf_group_set() - Set requested config for the group
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Group ID.
+ * @configs: Configuration to set.
+ * @num_configs: Number of configurations.
+ *
+ * Call function to set configs for each pin in the group.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct zynqmp_pctrl_group *pgrp = &pctrl->groups[selector];
+
+ for (i = 0; i < pgrp->npins; i++) {
+ ret = zynqmp_pinconf_cfg_set(pctldev, pgrp->pins[i], configs,
+ num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops zynqmp_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = zynqmp_pinconf_cfg_get,
+ .pin_config_set = zynqmp_pinconf_cfg_set,
+ .pin_config_group_set = zynqmp_pinconf_group_set,
+};
+
+static struct pinctrl_desc zynqmp_desc = {
+ .name = "zynqmp_pinctrl",
+ .owner = THIS_MODULE,
+ .pctlops = &zynqmp_pctrl_ops,
+ .pmxops = &zynqmp_pinmux_ops,
+ .confops = &zynqmp_pinconf_ops,
+};
+
+static int zynqmp_pinctrl_get_function_groups(u32 fid, u32 index, u16 *groups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_FUNCTION_GROUPS;
+ qdata.arg1 = fid;
+ qdata.arg2 = index;
+
+ ret = zynqmp_pm_query_data(qdata, payload);
+ if (ret)
+ return ret;
+
+ memcpy(groups, &payload[1], PINCTRL_GET_FUNC_GROUPS_RESP_LEN);
+
+ return ret;
+}
+
+static int zynqmp_pinctrl_get_func_num_groups(u32 fid, unsigned int *ngroups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS;
+ qdata.arg1 = fid;
+
+ ret = zynqmp_pm_query_data(qdata, payload);
+ if (ret)
+ return ret;
+
+ *ngroups = payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_func_groups() - prepare function and groups data
+ * @dev: Device pointer.
+ * @fid: Function ID.
+ * @func: Function data.
+ * @groups: Groups data.
+ *
+ * Query firmware to get group IDs for each function. Firmware returns
+ * group IDs. Based on group index for the function, group names in
+ * the function are stored. For example, the first group in "eth0" function
+ * is named as "eth0_0" and second group as "eth0_1" and so on.
+ *
+ * Based on the group ID received from the firmware, function stores name of
+ * the group for that group ID. For example, if "eth0" first group ID
+ * is x, groups[x] name will be stored as "eth0_0".
+ *
+ * Once done for each function, each function would have its group names
+ * and each groups would also have their names.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_func_groups(struct device *dev, u32 fid,
+ struct zynqmp_pmux_function *func,
+ struct zynqmp_pctrl_group *groups)
+{
+ u16 resp[NUM_GROUPS_PER_RESP] = {0};
+ const char **fgroups;
+ int ret = 0, index, i;
+
+ fgroups = devm_kzalloc(dev, sizeof(*fgroups) * func->ngroups, GFP_KERNEL);
+ if (!fgroups)
+ return -ENOMEM;
+
+ for (index = 0; index < func->ngroups; index += NUM_GROUPS_PER_RESP) {
+ ret = zynqmp_pinctrl_get_function_groups(fid, index, resp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NUM_GROUPS_PER_RESP; i++) {
+ if (resp[i] == NA_GROUP)
+ goto done;
+
+ if (resp[i] == RESERVED_GROUP)
+ continue;
+
+ fgroups[index + i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s_%d_grp",
+ func->name,
+ index + i);
+ if (!fgroups[index + i])
+ return -ENOMEM;
+
+ groups[resp[i]].name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s_%d_grp",
+ func->name,
+ index + i);
+ if (!groups[resp[i]].name)
+ return -ENOMEM;
+ }
+ }
+done:
+ func->groups = fgroups;
+
+ return ret;
+}
+
+static void zynqmp_pinctrl_get_function_name(u32 fid, char *name)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 payload[PAYLOAD_ARG_CNT];
+
+ qdata.qid = PM_QID_PINCTRL_GET_FUNCTION_NAME;
+ qdata.arg1 = fid;
+
+ /*
+ * Name of the function is maximum 16 bytes and cannot
+ * accommodate the return value in SMC buffers, hence ignoring
+ * the return value for this specific qid.
+ */
+ zynqmp_pm_query_data(qdata, payload);
+ memcpy(name, payload, PINCTRL_GET_FUNC_NAME_RESP_LEN);
+}
+
+static int zynqmp_pinctrl_get_num_functions(unsigned int *nfuncs)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_FUNCTIONS;
+
+ ret = zynqmp_pm_query_data(qdata, payload);
+ if (ret)
+ return ret;
+
+ *nfuncs = payload[1];
+
+ return ret;
+}
+
+static int zynqmp_pinctrl_get_pin_groups(u32 pin, u32 index, u16 *groups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_PIN_GROUPS;
+ qdata.arg1 = pin;
+ qdata.arg2 = index;
+
+ ret = zynqmp_pm_query_data(qdata, payload);
+ if (ret)
+ return ret;
+
+ memcpy(groups, &payload[1], PINCTRL_GET_PIN_GROUPS_RESP_LEN);
+
+ return ret;
+}
+
+static void zynqmp_pinctrl_group_add_pin(struct zynqmp_pctrl_group *group,
+ unsigned int pin)
+{
+ group->pins[group->npins++] = pin;
+}
+
+/**
+ * zynqmp_pinctrl_create_pin_groups() - assign pins to respective groups
+ * @dev: Device pointer.
+ * @groups: Groups data.
+ * @pin: Pin number.
+ *
+ * Query firmware to get groups available for the given pin.
+ * Based on the firmware response(group IDs for the pin), add
+ * pin number to the respective group's pin array.
+ *
+ * Once all pins are queries, each groups would have its number
+ * of pins and pin numbers data.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_create_pin_groups(struct device *dev,
+ struct zynqmp_pctrl_group *groups,
+ unsigned int pin)
+{
+ u16 resp[NUM_GROUPS_PER_RESP] = {0};
+ int ret, i, index = 0;
+
+ do {
+ ret = zynqmp_pinctrl_get_pin_groups(pin, index, resp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NUM_GROUPS_PER_RESP; i++) {
+ if (resp[i] == NA_GROUP)
+ return ret;
+
+ if (resp[i] == RESERVED_GROUP)
+ continue;
+
+ zynqmp_pinctrl_group_add_pin(&groups[resp[i]], pin);
+ }
+ index += NUM_GROUPS_PER_RESP;
+ } while (index <= MAX_PIN_GROUPS);
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_group_pins() - prepare each group's pin data
+ * @dev: Device pointer.
+ * @groups: Groups data.
+ * @ngroups: Number of groups.
+ *
+ * Prepare pin number and number of pins data for each pins.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_group_pins(struct device *dev,
+ struct zynqmp_pctrl_group *groups,
+ unsigned int ngroups)
+{
+ unsigned int pin;
+ int ret;
+
+ for (pin = 0; pin < zynqmp_desc.npins; pin++) {
+ ret = zynqmp_pinctrl_create_pin_groups(dev, groups, pin);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_function_info() - prepare function info
+ * @dev: Device pointer.
+ * @pctrl: Pin control driver data.
+ *
+ * Query firmware for functions, groups and pin information and
+ * prepare pin control driver data.
+ *
+ * Query number of functions and number of function groups (number
+ * of groups in given function) to allocate required memory buffers
+ * for functions and groups. Once buffers are allocated to store
+ * functions and groups data, query and store required information
+ * (number of groups and group names for each function, number of
+ * pins and pin numbers for each group).
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_function_info(struct device *dev,
+ struct zynqmp_pinctrl *pctrl)
+{
+ struct zynqmp_pmux_function *funcs;
+ struct zynqmp_pctrl_group *groups;
+ int ret, i;
+
+ ret = zynqmp_pinctrl_get_num_functions(&pctrl->nfuncs);
+ if (ret)
+ return ret;
+
+ funcs = devm_kzalloc(dev, sizeof(*funcs) * pctrl->nfuncs, GFP_KERNEL);
+ if (!funcs)
+ return -ENOMEM;
+
+ for (i = 0; i < pctrl->nfuncs; i++) {
+ zynqmp_pinctrl_get_function_name(i, funcs[i].name);
+
+ ret = zynqmp_pinctrl_get_func_num_groups(i, &funcs[i].ngroups);
+ if (ret)
+ return ret;
+
+ pctrl->ngroups += funcs[i].ngroups;
+ }
+
+ groups = devm_kzalloc(dev, sizeof(*groups) * pctrl->ngroups, GFP_KERNEL);
+ if (!groups)
+ return -ENOMEM;
+
+ for (i = 0; i < pctrl->nfuncs; i++) {
+ ret = zynqmp_pinctrl_prepare_func_groups(dev, i, &funcs[i],
+ groups);
+ if (ret)
+ return ret;
+ }
+
+ ret = zynqmp_pinctrl_prepare_group_pins(dev, groups, pctrl->ngroups);
+ if (ret)
+ return ret;
+
+ pctrl->funcs = funcs;
+ pctrl->groups = groups;
+
+ return ret;
+}
+
+static int zynqmp_pinctrl_get_num_pins(unsigned int *npins)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_PINS;
+
+ ret = zynqmp_pm_query_data(qdata, payload);
+ if (ret)
+ return ret;
+
+ *npins = payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_pin_desc() - prepare pin description info
+ * @dev: Device pointer.
+ * @zynqmp_pins: Pin information.
+ * @npins: Number of pins.
+ *
+ * Query number of pins information from firmware and prepare pin
+ * description containing pin number and pin name.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev,
+ const struct pinctrl_pin_desc
+ **zynqmp_pins,
+ unsigned int *npins)
+{
+ struct pinctrl_pin_desc *pins, *pin;
+ int ret;
+ int i;
+
+ ret = zynqmp_pinctrl_get_num_pins(npins);
+ if (ret)
+ return ret;
+
+ pins = devm_kzalloc(dev, sizeof(*pins) * *npins, GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < *npins; i++) {
+ pin = &pins[i];
+ pin->number = i;
+ pin->name = devm_kasprintf(dev, GFP_KERNEL, "%s%d",
+ ZYNQMP_PIN_PREFIX, i);
+ if (!pin->name)
+ return -ENOMEM;
+ }
+
+ *zynqmp_pins = pins;
+
+ return 0;
+}
+
+static int zynqmp_pinctrl_probe(struct platform_device *pdev)
+{
+ struct zynqmp_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ ret = zynqmp_pinctrl_prepare_pin_desc(&pdev->dev,
+ &zynqmp_desc.pins,
+ &zynqmp_desc.npins);
+ if (ret) {
+ dev_err(&pdev->dev, "pin desc prepare fail with %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = zynqmp_pinctrl_prepare_function_info(&pdev->dev, pctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "function info prepare fail with %d\n",
+ ret);
+ return ret;
+ }
+
+ pctrl->pctrl = pinctrl_register(&zynqmp_desc, &pdev->dev, pctrl);
+ if (IS_ERR(pctrl->pctrl))
+ return PTR_ERR(pctrl->pctrl);
+
+ platform_set_drvdata(pdev, pctrl);
+
+ return ret;
+}
+
+static int zynqmp_pinctrl_remove(struct platform_device *pdev)
+{
+ struct zynqmp_pinctrl *pctrl = platform_get_drvdata(pdev);
+
+ pinctrl_unregister(pctrl->pctrl);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_pinctrl_of_match[] = {
+ { .compatible = "xlnx,zynqmp-pinctrl" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_pinctrl_of_match);
+
+static struct platform_driver zynqmp_pinctrl_driver = {
+ .driver = {
+ .name = "zynqmp-pinctrl",
+ .of_match_table = zynqmp_pinctrl_of_match,
+ },
+ .probe = zynqmp_pinctrl_probe,
+ .remove = zynqmp_pinctrl_remove,
+};
+
+module_platform_driver(zynqmp_pinctrl_driver);
+
+MODULE_AUTHOR("Sai Krishna Potthuri <lakshmi.sai.krishna.potthuri@xilinx.com>");
+MODULE_DESCRIPTION("ZynqMP Pin Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 36a11c9e893a..6cdbd9ccf2f0 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -12,6 +12,7 @@
*/
#define pr_fmt(fmt) "pinmux core: " fmt
+#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -673,13 +674,114 @@ void pinmux_show_setting(struct seq_file *s,
DEFINE_SHOW_ATTRIBUTE(pinmux_functions);
DEFINE_SHOW_ATTRIBUTE(pinmux_pins);
+#define PINMUX_SELECT_MAX 128
+static ssize_t pinmux_select(struct file *file, const char __user *user_buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *sfile = file->private_data;
+ struct pinctrl_dev *pctldev = sfile->private;
+ const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
+ const char *const *groups;
+ char *buf, *gname, *fname;
+ unsigned int num_groups;
+ int fsel, gsel, ret;
+
+ if (len > PINMUX_SELECT_MAX)
+ return -ENOMEM;
+
+ buf = kzalloc(PINMUX_SELECT_MAX, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = strncpy_from_user(buf, user_buf, PINMUX_SELECT_MAX);
+ if (ret < 0)
+ goto exit_free_buf;
+ buf[len-1] = '\0';
+
+ /* remove leading and trailing spaces of input buffer */
+ gname = strstrip(buf);
+ if (*gname == '\0') {
+ ret = -EINVAL;
+ goto exit_free_buf;
+ }
+
+ /* find a separator which is a spacelike character */
+ for (fname = gname; !isspace(*fname); fname++) {
+ if (*fname == '\0') {
+ ret = -EINVAL;
+ goto exit_free_buf;
+ }
+ }
+ *fname = '\0';
+
+ /* drop extra spaces between function and group names */
+ fname = skip_spaces(fname + 1);
+ if (*fname == '\0') {
+ ret = -EINVAL;
+ goto exit_free_buf;
+ }
+
+ ret = pinmux_func_name_to_selector(pctldev, fname);
+ if (ret < 0) {
+ dev_err(pctldev->dev, "invalid function %s in map table\n", fname);
+ goto exit_free_buf;
+ }
+ fsel = ret;
+
+ ret = pmxops->get_function_groups(pctldev, fsel, &groups, &num_groups);
+ if (ret) {
+ dev_err(pctldev->dev, "no groups for function %d (%s)", fsel, fname);
+ goto exit_free_buf;
+ }
+
+ ret = match_string(groups, num_groups, gname);
+ if (ret < 0) {
+ dev_err(pctldev->dev, "invalid group %s", gname);
+ goto exit_free_buf;
+ }
+
+ ret = pinctrl_get_group_selector(pctldev, gname);
+ if (ret < 0) {
+ dev_err(pctldev->dev, "failed to get group selector for %s", gname);
+ goto exit_free_buf;
+ }
+ gsel = ret;
+
+ ret = pmxops->set_mux(pctldev, fsel, gsel);
+ if (ret) {
+ dev_err(pctldev->dev, "set_mux() failed: %d", ret);
+ goto exit_free_buf;
+ }
+ ret = len;
+
+exit_free_buf:
+ kfree(buf);
+
+ return ret;
+}
+
+static int pinmux_select_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, NULL, inode->i_private);
+}
+
+static const struct file_operations pinmux_select_ops = {
+ .owner = THIS_MODULE,
+ .open = pinmux_select_open,
+ .write = pinmux_select,
+ .llseek = no_llseek,
+ .release = single_release,
+};
+
void pinmux_init_device_debugfs(struct dentry *devroot,
struct pinctrl_dev *pctldev)
{
- debugfs_create_file("pinmux-functions", S_IFREG | S_IRUGO,
+ debugfs_create_file("pinmux-functions", 0444,
devroot, pctldev, &pinmux_functions_fops);
- debugfs_create_file("pinmux-pins", S_IFREG | S_IRUGO,
+ debugfs_create_file("pinmux-pins", 0444,
devroot, pctldev, &pinmux_pins_fops);
+ debugfs_create_file("pinmux-select", 0200,
+ devroot, pctldev, &pinmux_select_ops);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index eab029a21643..d2568dab8c78 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -194,7 +194,7 @@ static int pxa2xx_pconf_group_get(struct pinctrl_dev *pctldev,
spin_lock_irqsave(&pctl->lock, flags);
val = readl_relaxed(pgsr) & BIT(pin % 32);
- *config = val ? PIN_CONFIG_LOW_POWER_MODE : 0;
+ *config = val ? PIN_CONFIG_MODE_LOW_POWER : 0;
spin_unlock_irqrestore(&pctl->lock, flags);
dev_dbg(pctl->dev, "get sleep gpio state(pin=%d) %d\n",
@@ -217,7 +217,7 @@ static int pxa2xx_pconf_group_set(struct pinctrl_dev *pctldev,
for (i = 0; i < num_configs; i++) {
switch (pinconf_to_config_param(configs[i])) {
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
is_set = pinconf_to_config_argument(configs[i]);
break;
default:
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 6853a896c476..25d2f7f7f3b6 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -3,7 +3,7 @@ if (ARCH_QCOM || COMPILE_TEST)
config PINCTRL_MSM
tristate "Qualcomm core pin controller driver"
- depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
+ depends on GPIOLIB && (QCOM_SCM || !QCOM_SCM) #if QCOM_SCM=m this can't be =y
select PINMUX
select PINCONF
select GENERIC_PINCONF
@@ -222,7 +222,7 @@ config PINCTRL_SC7280
config PINCTRL_SC8180X
tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
- depends on GPIOLIB && OF
+ depends on GPIOLIB && (OF || ACPI)
select PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
index 9d41abfca37e..afddf6d60dbe 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
@@ -1449,6 +1449,28 @@ static const struct msm_pingroup sc7280_groups[] = {
[182] = SDC_QDSD_PINGROUP(sdc2_data, 0xb4000, 9, 0),
};
+static const struct msm_gpio_wakeirq_map sc7280_pdc_map[] = {
+ { 0, 134 }, { 3, 131 }, { 4, 121 }, { 7, 103 }, { 8, 155 },
+ { 11, 93 }, { 12, 78 }, { 15, 79 }, { 16, 80 }, { 18, 81 },
+ { 19, 107 }, { 20, 82 }, { 21, 83 }, { 23, 99 }, { 24, 86 },
+ { 25, 95 }, { 27, 158 }, { 28, 159 }, { 31, 90 }, { 32, 144 },
+ { 34, 77 }, { 35, 92 }, { 36, 157 }, { 39, 73 }, { 40, 97 },
+ { 41, 98 }, { 43, 85 }, { 44, 100 }, { 45, 101 }, { 47, 102 },
+ { 48, 74 }, { 51, 112 }, { 52, 156 }, { 54, 117 }, { 55, 84 },
+ { 56, 108 }, { 59, 110 }, { 60, 111 }, { 61, 123 }, { 63, 104 },
+ { 68, 127 }, { 72, 150 }, { 75, 133 }, { 77, 125 }, { 78, 105 },
+ { 79, 106 }, { 80, 118 }, { 81, 119 }, { 82, 162 }, { 83, 122 },
+ { 86, 75 }, { 88, 154 }, { 89, 124 }, { 90, 149 }, { 91, 76 },
+ { 93, 128 }, { 95, 160 }, { 101, 126 }, { 102, 96 }, { 103, 116 },
+ { 104, 114 }, { 112, 72 }, { 116, 135 }, { 117, 163 }, { 119, 137 },
+ { 121, 138 }, { 123, 139 }, { 125, 140 }, { 127, 141 }, { 128, 165 },
+ { 129, 143 }, { 130, 94 }, { 131, 145 }, { 133, 146 }, { 136, 147 },
+ { 140, 148 }, { 141, 115 }, { 142, 113 }, { 145, 130 }, { 148, 132 },
+ { 150, 87 }, { 151, 88 }, { 153, 89 }, { 155, 164 }, { 156, 129 },
+ { 157, 161 }, { 158, 120 }, { 161, 136 }, { 163, 142 }, { 172, 166 },
+ { 174, 167 },
+};
+
static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
.pins = sc7280_pins,
.npins = ARRAY_SIZE(sc7280_pins),
@@ -1457,6 +1479,8 @@ static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
.groups = sc7280_groups,
.ngroups = ARRAY_SIZE(sc7280_groups),
.ngpios = 176,
+ .wakeirq_map = sc7280_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sc7280_pdc_map),
};
static int sc7280_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
index b765bf667574..0d9654b4ab60 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
@@ -23,6 +23,21 @@ enum {
WEST
};
+/*
+ * ACPI DSDT has one single memory resource for TLMM. The offsets below are
+ * used to locate different tiles for ACPI probe.
+ */
+struct tile_info {
+ u32 offset;
+ u32 size;
+};
+
+static const struct tile_info sc8180x_tile_info[] = {
+ { 0x00d00000, 0x00300000, },
+ { 0x00500000, 0x00700000, },
+ { 0x00100000, 0x00300000, },
+};
+
#define FUNCTION(fname) \
[msm_mux_##fname] = { \
.name = #fname, \
@@ -1557,6 +1572,13 @@ static const struct msm_pingroup sc8180x_groups[] = {
[193] = SDC_QDSD_PINGROUP(sdc2_data, 0x4b2000, 9, 0),
};
+static const int sc8180x_acpi_reserved_gpios[] = {
+ 0, 1, 2, 3,
+ 47, 48, 49, 50,
+ 126, 127, 128, 129,
+ -1 /* terminator */
+};
+
static const struct msm_gpio_wakeirq_map sc8180x_pdc_map[] = {
{ 3, 31 }, { 5, 32 }, { 8, 33 }, { 9, 34 }, { 10, 100 }, { 12, 104 },
{ 24, 37 }, { 26, 38 }, { 27, 41 }, { 28, 42 }, { 30, 39 }, { 36, 43 },
@@ -1588,13 +1610,109 @@ static struct msm_pinctrl_soc_data sc8180x_pinctrl = {
.nwakeirq_map = ARRAY_SIZE(sc8180x_pdc_map),
};
+static const struct msm_pinctrl_soc_data sc8180x_acpi_pinctrl = {
+ .tiles = sc8180x_tiles,
+ .ntiles = ARRAY_SIZE(sc8180x_tiles),
+ .pins = sc8180x_pins,
+ .npins = ARRAY_SIZE(sc8180x_pins),
+ .groups = sc8180x_groups,
+ .ngroups = ARRAY_SIZE(sc8180x_groups),
+ .reserved_gpios = sc8180x_acpi_reserved_gpios,
+ .ngpios = 190,
+};
+
+/*
+ * ACPI DSDT has one single memory resource for TLMM, which voilates the
+ * hardware layout of 3 sepearte tiles. Let's split the memory resource into
+ * 3 named ones, so that msm_pinctrl_probe() can map memory for ACPI in the
+ * same way as for DT probe.
+ */
+static int sc8180x_pinctrl_add_tile_resources(struct platform_device *pdev)
+{
+ int nres_num = pdev->num_resources + ARRAY_SIZE(sc8180x_tiles) - 1;
+ struct resource *mres, *nres, *res;
+ int i, ret;
+
+ /*
+ * DT already has tiles defined properly, so nothing needs to be done
+ * for DT probe.
+ */
+ if (pdev->dev.of_node)
+ return 0;
+
+ /* Allocate for new resources */
+ nres = devm_kzalloc(&pdev->dev, sizeof(*nres) * nres_num, GFP_KERNEL);
+ if (!nres)
+ return -ENOMEM;
+
+ res = nres;
+
+ for (i = 0; i < pdev->num_resources; i++) {
+ struct resource *r = &pdev->resource[i];
+
+ /* Save memory resource and copy others */
+ if (resource_type(r) == IORESOURCE_MEM)
+ mres = r;
+ else
+ *res++ = *r;
+ }
+
+ /* Append tile memory resources */
+ for (i = 0; i < ARRAY_SIZE(sc8180x_tiles); i++, res++) {
+ const struct tile_info *info = &sc8180x_tile_info[i];
+
+ res->start = mres->start + info->offset;
+ res->end = mres->start + info->offset + info->size - 1;
+ res->flags = mres->flags;
+ res->name = sc8180x_tiles[i];
+
+ /* Add new MEM to resource tree */
+ insert_resource(mres->parent, res);
+ }
+
+ /* Remove old MEM from resource tree */
+ remove_resource(mres);
+
+ /* Free old resources and install new ones */
+ ret = platform_device_add_resources(pdev, nres, nres_num);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add new resources: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int sc8180x_pinctrl_probe(struct platform_device *pdev)
{
- return msm_pinctrl_probe(pdev, &sc8180x_pinctrl);
+ const struct msm_pinctrl_soc_data *soc_data;
+ int ret;
+
+ soc_data = device_get_match_data(&pdev->dev);
+ if (!soc_data)
+ return -EINVAL;
+
+ ret = sc8180x_pinctrl_add_tile_resources(pdev);
+ if (ret)
+ return ret;
+
+ return msm_pinctrl_probe(pdev, soc_data);
}
+static const struct acpi_device_id sc8180x_pinctrl_acpi_match[] = {
+ {
+ .id = "QCOM040D",
+ .driver_data = (kernel_ulong_t) &sc8180x_acpi_pinctrl,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, sc8180x_pinctrl_acpi_match);
+
static const struct of_device_id sc8180x_pinctrl_of_match[] = {
- { .compatible = "qcom,sc8180x-tlmm", },
+ {
+ .compatible = "qcom,sc8180x-tlmm",
+ .data = &sc8180x_pinctrl,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, sc8180x_pinctrl_of_match);
@@ -1603,6 +1721,7 @@ static struct platform_driver sc8180x_pinctrl_driver = {
.driver = {
.name = "sc8180x-pinctrl",
.of_match_table = sc8180x_pinctrl_of_match,
+ .acpi_match_table = sc8180x_pinctrl_acpi_match,
},
.probe = sc8180x_pinctrl_probe,
.remove = msm_pinctrl_remove,
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c
index a406ed0ec7d3..4d8f8636c2b3 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c
@@ -1603,6 +1603,25 @@ static const struct msm_pingroup sm8350_groups[] = {
[206] = SDC_PINGROUP(sdc2_data, 0x1cf000, 9, 0),
};
+static const struct msm_gpio_wakeirq_map sm8350_pdc_map[] = {
+ { 2, 117 }, { 7, 82 }, { 11, 83 }, { 14, 80 }, { 15, 146 },
+ { 19, 121 }, { 23, 84 }, { 26, 86 }, { 27, 75 }, { 31, 85 },
+ { 32, 97 }, { 34, 98 }, { 35, 131 }, { 36, 79 }, { 38, 99 },
+ { 39, 92 }, { 40, 101 }, { 43, 137 }, { 44, 102 }, { 46, 96 },
+ { 47, 93 }, { 50, 108 }, { 51, 127 }, { 55, 128 }, { 56, 81 },
+ { 59, 112 }, { 60, 119 }, { 63, 73 }, { 67, 74 }, { 71, 134 },
+ { 75, 103 }, { 79, 104 }, { 80, 126 }, { 81, 139 }, { 82, 140 },
+ { 83, 141 }, { 84, 124 }, { 85, 109 }, { 86, 143 }, { 87, 138 },
+ { 88, 122 }, { 89, 113 }, { 90, 114 }, { 91, 115 }, { 92, 76 },
+ { 95, 147 }, { 96, 148 }, { 98, 149 }, { 99, 150 }, { 115, 125 },
+ { 116, 106 }, { 117, 105 }, { 118, 116 }, { 119, 123 }, { 130, 145 },
+ { 136, 72 }, { 140, 100 }, { 151, 110 }, { 153, 95 }, { 155, 107 },
+ { 156, 94 }, { 157, 111 }, { 159, 118 }, { 162, 77 }, { 165, 78 },
+ { 169, 70 }, { 172, 132 }, { 174, 87 }, { 175, 88 }, { 177, 89 },
+ { 179, 120 }, { 180, 129 }, { 183, 90 }, { 185, 136 }, { 187, 142 },
+ { 190, 144 }, { 198, 91 }, { 200, 133 }, { 202, 135 },
+};
+
static const struct msm_pinctrl_soc_data sm8350_tlmm = {
.pins = sm8350_pins,
.npins = ARRAY_SIZE(sm8350_pins),
@@ -1611,6 +1630,8 @@ static const struct msm_pinctrl_soc_data sm8350_tlmm = {
.groups = sm8350_groups,
.ngroups = ARRAY_SIZE(sm8350_groups),
.ngpios = 204,
+ .wakeirq_map = sm8350_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sm8350_pdc_map),
};
static int sm8350_tlmm_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 9801c717e311..00870da0c94e 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1127,8 +1127,15 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8150b-gpio", .data = (void *) 12 },
/* pm8150l has 12 GPIOs with holes on 7 */
{ .compatible = "qcom,pm8150l-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm8350-gpio", .data = (void *) 10 },
+ { .compatible = "qcom,pm8350b-gpio", .data = (void *) 8 },
+ { .compatible = "qcom,pm8350c-gpio", .data = (void *) 9 },
+ { .compatible = "qcom,pmk8350-gpio", .data = (void *) 4 },
+ { .compatible = "qcom,pmr735a-gpio", .data = (void *) 4 },
+ { .compatible = "qcom,pmr735b-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm6150-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm6150l-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm8008-gpio", .data = (void *) 2 },
/* pmx55 has 11 GPIOs with holes on 3, 7, 10, 11 */
{ .compatible = "qcom,pmx55-gpio", .data = (void *) 11 },
{ },
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index 2bfd3006f6fd..5ccc49b387f1 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -394,26 +394,6 @@ int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
return 0;
}
-const struct pinmux_bias_reg *
-sh_pfc_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
- unsigned int *bit)
-{
- unsigned int i, j;
-
- for (i = 0; pfc->info->bias_regs[i].puen; i++) {
- for (j = 0; j < ARRAY_SIZE(pfc->info->bias_regs[i].pins); j++) {
- if (pfc->info->bias_regs[i].pins[j] == pin) {
- *bit = j;
- return &pfc->info->bias_regs[i];
- }
- }
- }
-
- WARN_ONCE(1, "Pin %u is not in bias info list\n", pin);
-
- return NULL;
-}
-
static int sh_pfc_init_ranges(struct sh_pfc *pfc)
{
struct sh_pfc_pin_range *range;
diff --git a/drivers/pinctrl/renesas/core.h b/drivers/pinctrl/renesas/core.h
index 5ca7e0830ae9..51f391e9713a 100644
--- a/drivers/pinctrl/renesas/core.h
+++ b/drivers/pinctrl/renesas/core.h
@@ -29,12 +29,4 @@ void sh_pfc_write(struct sh_pfc *pfc, u32 reg, u32 data);
int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin);
int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type);
-const struct pinmux_bias_reg *
-sh_pfc_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
- unsigned int *bit);
-
-unsigned int rcar_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin);
-void rcar_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias);
-
#endif /* __SH_PFC_CORE_H__ */
diff --git a/drivers/pinctrl/renesas/pfc-r8a73a4.c b/drivers/pinctrl/renesas/pfc-r8a73a4.c
index b21f5afe610f..b26ff9d6ead4 100644
--- a/drivers/pinctrl/renesas/pfc-r8a73a4.c
+++ b/drivers/pinctrl/renesas/pfc-r8a73a4.c
@@ -2649,59 +2649,21 @@ static const struct pinmux_irq pinmux_irqs[] = {
PINMUX_IRQ(329), /* IRQ57 */
};
-#define PORTCR_PULMD_OFF (0 << 6)
-#define PORTCR_PULMD_DOWN (2 << 6)
-#define PORTCR_PULMD_UP (3 << 6)
-#define PORTCR_PULMD_MASK (3 << 6)
-
static const unsigned int r8a73a4_portcr_offsets[] = {
0x00000000, 0x00001000, 0x00000000, 0x00001000,
0x00001000, 0x00002000, 0x00002000, 0x00002000,
0x00002000, 0x00003000, 0x00003000,
};
-static unsigned int r8a73a4_pinmux_get_bias(struct sh_pfc *pfc,
- unsigned int pin)
-{
- void __iomem *addr;
-
- addr = pfc->windows->virt + r8a73a4_portcr_offsets[pin >> 5] + pin;
-
- switch (ioread8(addr) & PORTCR_PULMD_MASK) {
- case PORTCR_PULMD_UP:
- return PIN_CONFIG_BIAS_PULL_UP;
- case PORTCR_PULMD_DOWN:
- return PIN_CONFIG_BIAS_PULL_DOWN;
- case PORTCR_PULMD_OFF:
- default:
- return PIN_CONFIG_BIAS_DISABLE;
- }
-}
-
-static void r8a73a4_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
+static void __iomem *r8a73a4_pin_to_portcr(struct sh_pfc *pfc, unsigned int pin)
{
- void __iomem *addr;
- u32 value;
-
- addr = pfc->windows->virt + r8a73a4_portcr_offsets[pin >> 5] + pin;
- value = ioread8(addr) & ~PORTCR_PULMD_MASK;
-
- switch (bias) {
- case PIN_CONFIG_BIAS_PULL_UP:
- value |= PORTCR_PULMD_UP;
- break;
- case PIN_CONFIG_BIAS_PULL_DOWN:
- value |= PORTCR_PULMD_DOWN;
- break;
- }
-
- iowrite8(value, addr);
+ return pfc->windows->virt + r8a73a4_portcr_offsets[pin >> 5] + pin;
}
static const struct sh_pfc_soc_operations r8a73a4_pfc_ops = {
- .get_bias = r8a73a4_pinmux_get_bias,
- .set_bias = r8a73a4_pinmux_set_bias,
+ .get_bias = rmobile_pinmux_get_bias,
+ .set_bias = rmobile_pinmux_set_bias,
+ .pin_to_portcr = r8a73a4_pin_to_portcr,
};
const struct sh_pfc_soc_info r8a73a4_pinmux_info = {
diff --git a/drivers/pinctrl/renesas/pfc-r8a7740.c b/drivers/pinctrl/renesas/pfc-r8a7740.c
index fdf1b0f09f57..4eac3899d69b 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7740.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7740.c
@@ -3672,11 +3672,6 @@ static const struct pinmux_irq pinmux_irqs[] = {
PINMUX_IRQ(41, 167), /* IRQ31A */
};
-#define PORTnCR_PULMD_OFF (0 << 6)
-#define PORTnCR_PULMD_DOWN (2 << 6)
-#define PORTnCR_PULMD_UP (3 << 6)
-#define PORTnCR_PULMD_MASK (3 << 6)
-
struct r8a7740_portcr_group {
unsigned int end_pin;
unsigned int offset;
@@ -3686,7 +3681,7 @@ static const struct r8a7740_portcr_group r8a7740_portcr_offsets[] = {
{ 83, 0x0000 }, { 114, 0x1000 }, { 209, 0x2000 }, { 211, 0x3000 },
};
-static void __iomem *r8a7740_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin)
+static void __iomem *r8a7740_pin_to_portcr(struct sh_pfc *pfc, unsigned int pin)
{
unsigned int i;
@@ -3701,43 +3696,10 @@ static void __iomem *r8a7740_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin)
return NULL;
}
-static unsigned int r8a7740_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin)
-{
- void __iomem *addr = r8a7740_pinmux_portcr(pfc, pin);
- u32 value = ioread8(addr) & PORTnCR_PULMD_MASK;
-
- switch (value) {
- case PORTnCR_PULMD_UP:
- return PIN_CONFIG_BIAS_PULL_UP;
- case PORTnCR_PULMD_DOWN:
- return PIN_CONFIG_BIAS_PULL_DOWN;
- case PORTnCR_PULMD_OFF:
- default:
- return PIN_CONFIG_BIAS_DISABLE;
- }
-}
-
-static void r8a7740_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
-{
- void __iomem *addr = r8a7740_pinmux_portcr(pfc, pin);
- u32 value = ioread8(addr) & ~PORTnCR_PULMD_MASK;
-
- switch (bias) {
- case PIN_CONFIG_BIAS_PULL_UP:
- value |= PORTnCR_PULMD_UP;
- break;
- case PIN_CONFIG_BIAS_PULL_DOWN:
- value |= PORTnCR_PULMD_DOWN;
- break;
- }
-
- iowrite8(value, addr);
-}
-
static const struct sh_pfc_soc_operations r8a7740_pfc_ops = {
- .get_bias = r8a7740_pinmux_get_bias,
- .set_bias = r8a7740_pinmux_set_bias,
+ .get_bias = rmobile_pinmux_get_bias,
+ .set_bias = rmobile_pinmux_set_bias,
+ .pin_to_portcr = r8a7740_pin_to_portcr,
};
const struct sh_pfc_soc_info r8a7740_pinmux_info = {
diff --git a/drivers/pinctrl/renesas/pfc-r8a7778.c b/drivers/pinctrl/renesas/pfc-r8a7778.c
index 75f52b1798c3..6185af9c4990 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7778.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7778.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/pinctrl/pinconf-generic.h>
-#include "core.h"
#include "sh_pfc.h"
#define PORT_GP_PUP_1(bank, pin, fn, sfx) \
diff --git a/drivers/pinctrl/renesas/pfc-r8a7791.c b/drivers/pinctrl/renesas/pfc-r8a7791.c
index 6fce9fe2e98f..fe4ccab6b0b8 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7791.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7791.c
@@ -16,22 +16,50 @@
* which case they support both 3.3V and 1.8V signalling.
*/
#define CPU_ALL_GP(fn, sfx) \
- PORT_GP_32(0, fn, sfx), \
- PORT_GP_26(1, fn, sfx), \
- PORT_GP_32(2, fn, sfx), \
- PORT_GP_32(3, fn, sfx), \
- PORT_GP_32(4, fn, sfx), \
- PORT_GP_32(5, fn, sfx), \
- PORT_GP_CFG_24(6, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
- PORT_GP_1(6, 24, fn, sfx), \
- PORT_GP_1(6, 25, fn, sfx), \
- PORT_GP_1(6, 26, fn, sfx), \
- PORT_GP_1(6, 27, fn, sfx), \
- PORT_GP_1(6, 28, fn, sfx), \
- PORT_GP_1(6, 29, fn, sfx), \
- PORT_GP_1(6, 30, fn, sfx), \
- PORT_GP_1(6, 31, fn, sfx), \
- PORT_GP_26(7, fn, sfx)
+ PORT_GP_CFG_32(0, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_26(1, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_32(2, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_32(3, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_32(4, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_32(5, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_24(6, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE | SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 24, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 25, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 26, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 27, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 28, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 29, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 30, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 31, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_7(7, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_1(7, 7, fn, sfx), \
+ PORT_GP_1(7, 8, fn, sfx), \
+ PORT_GP_1(7, 9, fn, sfx), \
+ PORT_GP_CFG_1(7, 10, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 11, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 12, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 13, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 14, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 15, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 16, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 17, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 18, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 19, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 20, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 21, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 22, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 23, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 24, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(7, 25, fn, sfx, SH_PFC_PIN_CFG_PULL_UP)
+
+#define CPU_ALL_NOGP(fn) \
+ PIN_NOGP_CFG(ASEBRK_N_ACK, "ASEBRK#/ACK", fn, SH_PFC_PIN_CFG_PULL_DOWN), \
+ PIN_NOGP_CFG(AVS1, "AVS1", fn, SH_PFC_PIN_CFG_PULL_UP), \
+ PIN_NOGP_CFG(AVS2, "AVS2", fn, SH_PFC_PIN_CFG_PULL_UP), \
+ PIN_NOGP_CFG(TCK, "TCK", fn, SH_PFC_PIN_CFG_PULL_UP), \
+ PIN_NOGP_CFG(TDI, "TDI", fn, SH_PFC_PIN_CFG_PULL_UP), \
+ PIN_NOGP_CFG(TMS, "TMS", fn, SH_PFC_PIN_CFG_PULL_UP), \
+ PIN_NOGP_CFG(TRST_N, "TRST#", fn, SH_PFC_PIN_CFG_PULL_UP)
enum {
PINMUX_RESERVED = 0,
@@ -1696,8 +1724,17 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP16_11_10, CAN1_RX_B, SEL_CAN1_1),
};
+/*
+ * Pins not associated with a GPIO port.
+ */
+enum {
+ GP_ASSIGN_LAST(),
+ NOGP_ALL(),
+};
+
static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
+ PINMUX_NOGP_ALL(),
};
#if defined(CONFIG_PINCTRL_PFC_R8A7791) || defined(CONFIG_PINCTRL_PFC_R8A7793)
@@ -6645,8 +6682,322 @@ static int r8a7791_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
return 31 - (pin & 0x1f);
}
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUPR0", 0xe6060100, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(1, 4), /* A20 */
+ [ 1] = RCAR_GP_PIN(1, 5), /* A21 */
+ [ 2] = RCAR_GP_PIN(1, 6), /* A22 */
+ [ 3] = RCAR_GP_PIN(1, 7), /* A23 */
+ [ 4] = RCAR_GP_PIN(1, 8), /* A24 */
+ [ 5] = RCAR_GP_PIN(6, 31), /* DU0_DOTCLKIN */
+ [ 6] = RCAR_GP_PIN(0, 0), /* D0 */
+ [ 7] = RCAR_GP_PIN(0, 1), /* D1 */
+ [ 8] = RCAR_GP_PIN(0, 2), /* D2 */
+ [ 9] = RCAR_GP_PIN(0, 3), /* D3 */
+ [10] = RCAR_GP_PIN(0, 4), /* D4 */
+ [11] = RCAR_GP_PIN(0, 5), /* D5 */
+ [12] = RCAR_GP_PIN(0, 6), /* D6 */
+ [13] = RCAR_GP_PIN(0, 7), /* D7 */
+ [14] = RCAR_GP_PIN(0, 8), /* D8 */
+ [15] = RCAR_GP_PIN(0, 9), /* D9 */
+ [16] = RCAR_GP_PIN(0, 10), /* D10 */
+ [17] = RCAR_GP_PIN(0, 11), /* D11 */
+ [18] = RCAR_GP_PIN(0, 12), /* D12 */
+ [19] = RCAR_GP_PIN(0, 13), /* D13 */
+ [20] = RCAR_GP_PIN(0, 14), /* D14 */
+ [21] = RCAR_GP_PIN(0, 15), /* D15 */
+ [22] = RCAR_GP_PIN(0, 16), /* A0 */
+ [23] = RCAR_GP_PIN(0, 17), /* A1 */
+ [24] = RCAR_GP_PIN(0, 18), /* A2 */
+ [25] = RCAR_GP_PIN(0, 19), /* A3 */
+ [26] = RCAR_GP_PIN(0, 20), /* A4 */
+ [27] = RCAR_GP_PIN(0, 21), /* A5 */
+ [28] = RCAR_GP_PIN(0, 22), /* A6 */
+ [29] = RCAR_GP_PIN(0, 23), /* A7 */
+ [30] = RCAR_GP_PIN(0, 24), /* A8 */
+ [31] = RCAR_GP_PIN(0, 25), /* A9 */
+ } },
+ { PINMUX_BIAS_REG("PUPR1", 0xe6060104, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(0, 26), /* A10 */
+ [ 1] = RCAR_GP_PIN(0, 27), /* A11 */
+ [ 2] = RCAR_GP_PIN(0, 28), /* A12 */
+ [ 3] = RCAR_GP_PIN(0, 29), /* A13 */
+ [ 4] = RCAR_GP_PIN(0, 30), /* A14 */
+ [ 5] = RCAR_GP_PIN(0, 31), /* A15 */
+ [ 6] = RCAR_GP_PIN(1, 0), /* A16 */
+ [ 7] = RCAR_GP_PIN(1, 1), /* A17 */
+ [ 8] = RCAR_GP_PIN(1, 2), /* A18 */
+ [ 9] = RCAR_GP_PIN(1, 3), /* A19 */
+ [10] = PIN_TRST_N, /* TRST# */
+ [11] = PIN_TCK, /* TCK */
+ [12] = PIN_TMS, /* TMS */
+ [13] = PIN_TDI, /* TDI */
+ [14] = RCAR_GP_PIN(1, 11), /* CS1#/A26 */
+ [15] = RCAR_GP_PIN(1, 12), /* EX_CS0# */
+ [16] = RCAR_GP_PIN(1, 13), /* EX_CS1# */
+ [17] = RCAR_GP_PIN(1, 14), /* EX_CS2# */
+ [18] = RCAR_GP_PIN(1, 15), /* EX_CS3# */
+ [19] = RCAR_GP_PIN(1, 16), /* EX_CS4# */
+ [20] = RCAR_GP_PIN(1, 17), /* EX_CS5# */
+ [21] = RCAR_GP_PIN(1, 18), /* BS# */
+ [22] = RCAR_GP_PIN(1, 19), /* RD# */
+ [23] = RCAR_GP_PIN(1, 20), /* RD/WR# */
+ [24] = RCAR_GP_PIN(1, 21), /* WE0# */
+ [25] = RCAR_GP_PIN(1, 22), /* WE1# */
+ [26] = RCAR_GP_PIN(1, 23), /* EX_WAIT0 */
+ [27] = RCAR_GP_PIN(1, 24), /* DREQ0 */
+ [28] = RCAR_GP_PIN(1, 25), /* DACK0 */
+ [29] = RCAR_GP_PIN(5, 31), /* SPEEDIN */
+ [30] = RCAR_GP_PIN(2, 0), /* SSI_SCK0129 */
+ [31] = RCAR_GP_PIN(2, 1), /* SSI_WS0129 */
+ } },
+ { PINMUX_BIAS_REG("PUPR2", 0xe6060108, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(2, 2), /* SSI_SDATA0 */
+ [ 1] = RCAR_GP_PIN(2, 3), /* SSI_SCK1 */
+ [ 2] = RCAR_GP_PIN(2, 4), /* SSI_WS1 */
+ [ 3] = RCAR_GP_PIN(2, 5), /* SSI_SDATA1 */
+ [ 4] = RCAR_GP_PIN(2, 6), /* SSI_SCK2 */
+ [ 5] = RCAR_GP_PIN(2, 7), /* SSI_WS2 */
+ [ 6] = RCAR_GP_PIN(2, 8), /* SSI_SDATA2 */
+ [ 7] = RCAR_GP_PIN(2, 9), /* SSI_SCK34 */
+ [ 8] = RCAR_GP_PIN(2, 10), /* SSI_WS34 */
+ [ 9] = RCAR_GP_PIN(2, 11), /* SSI_SDATA3 */
+ [10] = RCAR_GP_PIN(2, 12), /* SSI_SCK4 */
+ [11] = RCAR_GP_PIN(2, 13), /* SSI_WS4 */
+ [12] = RCAR_GP_PIN(2, 14), /* SSI_SDATA4 */
+ [13] = RCAR_GP_PIN(2, 15), /* SSI_SCK5 */
+ [14] = RCAR_GP_PIN(2, 16), /* SSI_WS5 */
+ [15] = RCAR_GP_PIN(2, 17), /* SSI_SDATA5 */
+ [16] = RCAR_GP_PIN(2, 18), /* SSI_SCK6 */
+ [17] = RCAR_GP_PIN(2, 19), /* SSI_WS6 */
+ [18] = RCAR_GP_PIN(2, 20), /* SSI_SDATA6 */
+ [19] = RCAR_GP_PIN(2, 21), /* SSI_SCK78 */
+ [20] = RCAR_GP_PIN(2, 22), /* SSI_WS78 */
+ [21] = RCAR_GP_PIN(2, 23), /* SSI_SDATA7 */
+ [22] = RCAR_GP_PIN(2, 24), /* SSI_SDATA8 */
+ [23] = RCAR_GP_PIN(2, 25), /* SSI_SCK9 */
+ [24] = RCAR_GP_PIN(2, 26), /* SSI_WS9 */
+ [25] = RCAR_GP_PIN(2, 27), /* SSI_SDATA9 */
+ [26] = RCAR_GP_PIN(2, 28), /* AUDIO_CLKA */
+ [27] = RCAR_GP_PIN(2, 29), /* AUDIO_CLKB */
+ [28] = RCAR_GP_PIN(2, 30), /* AUDIO_CLKC */
+ [29] = RCAR_GP_PIN(2, 31), /* AUDIO_CLKOUT */
+ [30] = RCAR_GP_PIN(7, 10), /* IRQ0 */
+ [31] = RCAR_GP_PIN(7, 11), /* IRQ1 */
+ } },
+ { PINMUX_BIAS_REG("PUPR3", 0xe606010c, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(7, 12), /* IRQ2 */
+ [ 1] = RCAR_GP_PIN(7, 13), /* IRQ3 */
+ [ 2] = RCAR_GP_PIN(7, 14), /* IRQ4 */
+ [ 3] = RCAR_GP_PIN(7, 15), /* IRQ5 */
+ [ 4] = RCAR_GP_PIN(7, 16), /* IRQ6 */
+ [ 5] = RCAR_GP_PIN(7, 17), /* IRQ7 */
+ [ 6] = RCAR_GP_PIN(7, 18), /* IRQ8 */
+ [ 7] = RCAR_GP_PIN(7, 19), /* IRQ9 */
+ [ 8] = RCAR_GP_PIN(3, 0), /* DU1_DR0 */
+ [ 9] = RCAR_GP_PIN(3, 1), /* DU1_DR1 */
+ [10] = RCAR_GP_PIN(3, 2), /* DU1_DR2 */
+ [11] = RCAR_GP_PIN(3, 3), /* DU1_DR3 */
+ [12] = RCAR_GP_PIN(3, 4), /* DU1_DR4 */
+ [13] = RCAR_GP_PIN(3, 5), /* DU1_DR5 */
+ [14] = RCAR_GP_PIN(3, 6), /* DU1_DR6 */
+ [15] = RCAR_GP_PIN(3, 7), /* DU1_DR7 */
+ [16] = RCAR_GP_PIN(3, 8), /* DU1_DG0 */
+ [17] = RCAR_GP_PIN(3, 9), /* DU1_DG1 */
+ [18] = RCAR_GP_PIN(3, 10), /* DU1_DG2 */
+ [19] = RCAR_GP_PIN(3, 11), /* DU1_DG3 */
+ [20] = RCAR_GP_PIN(3, 12), /* DU1_DG4 */
+ [21] = RCAR_GP_PIN(3, 13), /* DU1_DG5 */
+ [22] = RCAR_GP_PIN(3, 14), /* DU1_DG6 */
+ [23] = RCAR_GP_PIN(3, 15), /* DU1_DG7 */
+ [24] = RCAR_GP_PIN(3, 16), /* DU1_DB0 */
+ [25] = RCAR_GP_PIN(3, 17), /* DU1_DB1 */
+ [26] = RCAR_GP_PIN(3, 18), /* DU1_DB2 */
+ [27] = RCAR_GP_PIN(3, 19), /* DU1_DB3 */
+ [28] = RCAR_GP_PIN(3, 20), /* DU1_DB4 */
+ [29] = RCAR_GP_PIN(3, 21), /* DU1_DB5 */
+ [30] = RCAR_GP_PIN(3, 22), /* DU1_DB6 */
+ [31] = RCAR_GP_PIN(3, 23), /* DU1_DB7 */
+ } },
+ { PINMUX_BIAS_REG("PUPR4", 0xe6060110, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(3, 24), /* DU1_DOTCLKIN */
+ [ 1] = RCAR_GP_PIN(3, 25), /* DU1_DOTCLKOUT0 */
+ [ 2] = RCAR_GP_PIN(3, 26), /* DU1_DOTCLKOUT1 */
+ [ 3] = RCAR_GP_PIN(3, 27), /* DU1_EXHSYNC_DU1_HSYNC */
+ [ 4] = RCAR_GP_PIN(3, 28), /* DU1_EXVSYNC_DU1_VSYNC */
+ [ 5] = RCAR_GP_PIN(3, 29), /* DU1_EXODDF_DU1_ODDF_DISP_CDE */
+ [ 6] = RCAR_GP_PIN(3, 30), /* DU1_DISP */
+ [ 7] = RCAR_GP_PIN(3, 31), /* DU1_CDE */
+ [ 8] = RCAR_GP_PIN(4, 0), /* VI0_CLK */
+ [ 9] = RCAR_GP_PIN(4, 1), /* VI0_CLKENB */
+ [10] = RCAR_GP_PIN(4, 2), /* VI0_FIELD */
+ [11] = RCAR_GP_PIN(4, 3), /* VI0_HSYNC# */
+ [12] = RCAR_GP_PIN(4, 4), /* VI0_VSYNC# */
+ [13] = RCAR_GP_PIN(4, 5), /* VI0_DATA0_VI0_B0 */
+ [14] = RCAR_GP_PIN(4, 6), /* VI0_DATA1_VI0_B1 */
+ [15] = RCAR_GP_PIN(4, 7), /* VI0_DATA2_VI0_B2 */
+ [16] = RCAR_GP_PIN(4, 8), /* VI0_DATA3_VI0_B3 */
+ [17] = RCAR_GP_PIN(4, 9), /* VI0_DATA4_VI0_B4 */
+ [18] = RCAR_GP_PIN(4, 10), /* VI0_DATA5_VI0_B5 */
+ [19] = RCAR_GP_PIN(4, 11), /* VI0_DATA6_VI0_B6 */
+ [20] = RCAR_GP_PIN(4, 12), /* VI0_DATA7_VI0_B7 */
+ [21] = RCAR_GP_PIN(4, 13), /* VI0_G0 */
+ [22] = RCAR_GP_PIN(4, 14), /* VI0_G1 */
+ [23] = RCAR_GP_PIN(4, 15), /* VI0_G2 */
+ [24] = RCAR_GP_PIN(4, 16), /* VI0_G3 */
+ [25] = RCAR_GP_PIN(4, 17), /* VI0_G4 */
+ [26] = RCAR_GP_PIN(4, 18), /* VI0_G5 */
+ [27] = RCAR_GP_PIN(4, 19), /* VI0_G6 */
+ [28] = RCAR_GP_PIN(4, 20), /* VI0_G7 */
+ [29] = RCAR_GP_PIN(4, 21), /* VI0_R0 */
+ [30] = RCAR_GP_PIN(4, 22), /* VI0_R1 */
+ [31] = RCAR_GP_PIN(4, 23), /* VI0_R2 */
+ } },
+ { PINMUX_BIAS_REG("PUPR5", 0xe6060114, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(4, 24), /* VI0_R3 */
+ [ 1] = RCAR_GP_PIN(4, 25), /* VI0_R4 */
+ [ 2] = RCAR_GP_PIN(4, 26), /* VI0_R5 */
+ [ 3] = RCAR_GP_PIN(4, 27), /* VI0_R6 */
+ [ 4] = RCAR_GP_PIN(4, 28), /* VI0_R7 */
+ [ 5] = RCAR_GP_PIN(5, 0), /* VI1_HSYNC# */
+ [ 6] = RCAR_GP_PIN(5, 1), /* VI1_VSYNC# */
+ [ 7] = RCAR_GP_PIN(5, 2), /* VI1_CLKENB */
+ [ 8] = RCAR_GP_PIN(5, 3), /* VI1_FIELD */
+ [ 9] = RCAR_GP_PIN(5, 4), /* VI1_CLK */
+ [10] = RCAR_GP_PIN(5, 5), /* VI1_DATA0 */
+ [11] = RCAR_GP_PIN(5, 6), /* VI1_DATA1 */
+ [12] = RCAR_GP_PIN(5, 7), /* VI1_DATA2 */
+ [13] = RCAR_GP_PIN(5, 8), /* VI1_DATA3 */
+ [14] = RCAR_GP_PIN(5, 9), /* VI1_DATA4 */
+ [15] = RCAR_GP_PIN(5, 10), /* VI1_DATA5 */
+ [16] = RCAR_GP_PIN(5, 11), /* VI1_DATA6 */
+ [17] = RCAR_GP_PIN(5, 12), /* VI1_DATA7 */
+ [18] = RCAR_GP_PIN(5, 13), /* ETH_MDIO */
+ [19] = RCAR_GP_PIN(5, 14), /* ETH_CRS_DV */
+ [20] = RCAR_GP_PIN(5, 15), /* ETH_RX_ER */
+ [21] = RCAR_GP_PIN(5, 16), /* ETH_RXD0 */
+ [22] = RCAR_GP_PIN(5, 17), /* ETH_RXD1 */
+ [23] = RCAR_GP_PIN(5, 18), /* ETH_LINK */
+ [24] = RCAR_GP_PIN(5, 19), /* ETH_REFCLK */
+ [25] = RCAR_GP_PIN(5, 20), /* ETH_TXD1 */
+ [26] = RCAR_GP_PIN(5, 21), /* ETH_TX_EN */
+ [27] = RCAR_GP_PIN(5, 22), /* ETH_MAGIC */
+ [28] = RCAR_GP_PIN(5, 23), /* ETH_TXD0 */
+ [29] = RCAR_GP_PIN(5, 24), /* ETH_MDC */
+ [30] = RCAR_GP_PIN(5, 25), /* STP_IVCXO27_0 */
+ [31] = RCAR_GP_PIN(5, 26), /* STP_ISCLK_0 */
+ } },
+ { PINMUX_BIAS_REG("PUPR6", 0xe6060118, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(5, 27), /* STP_ISD_0 */
+ [ 1] = RCAR_GP_PIN(5, 28), /* STP_ISEN_0 */
+ [ 2] = RCAR_GP_PIN(5, 29), /* STP_ISSYNC_0 */
+ [ 3] = RCAR_GP_PIN(5, 30), /* STP_OPWM_0 */
+ [ 4] = RCAR_GP_PIN(6, 0), /* SD0_CLK */
+ [ 5] = RCAR_GP_PIN(6, 1), /* SD0_CMD */
+ [ 6] = RCAR_GP_PIN(6, 2), /* SD0_DATA0 */
+ [ 7] = RCAR_GP_PIN(6, 3), /* SD0_DATA1 */
+ [ 8] = RCAR_GP_PIN(6, 4), /* SD0_DATA2 */
+ [ 9] = RCAR_GP_PIN(6, 5), /* SD0_DATA3 */
+ [10] = RCAR_GP_PIN(6, 6), /* SD0_CD */
+ [11] = RCAR_GP_PIN(6, 7), /* SD0_WP */
+ [12] = RCAR_GP_PIN(6, 8), /* SD2_CLK */
+ [13] = RCAR_GP_PIN(6, 9), /* SD2_CMD */
+ [14] = RCAR_GP_PIN(6, 10), /* SD2_DATA0 */
+ [15] = RCAR_GP_PIN(6, 11), /* SD2_DATA1 */
+ [16] = RCAR_GP_PIN(6, 12), /* SD2_DATA2 */
+ [17] = RCAR_GP_PIN(6, 13), /* SD2_DATA3 */
+ [18] = RCAR_GP_PIN(6, 14), /* SD2_CD */
+ [19] = RCAR_GP_PIN(6, 15), /* SD2_WP */
+ [20] = RCAR_GP_PIN(6, 16), /* SD3_CLK */
+ [21] = RCAR_GP_PIN(6, 17), /* SD3_CMD */
+ [22] = RCAR_GP_PIN(6, 18), /* SD3_DATA0 */
+ [23] = RCAR_GP_PIN(6, 19), /* SD3_DATA1 */
+ [24] = RCAR_GP_PIN(6, 20), /* SD3_DATA2 */
+ [25] = RCAR_GP_PIN(6, 21), /* SD3_DATA3 */
+ [26] = RCAR_GP_PIN(6, 22), /* SD3_CD */
+ [27] = RCAR_GP_PIN(6, 23), /* SD3_WP */
+ [28] = RCAR_GP_PIN(6, 24), /* MSIOF0_SCK */
+ [29] = RCAR_GP_PIN(6, 25), /* MSIOF0_SYNC */
+ [30] = RCAR_GP_PIN(6, 26), /* MSIOF0_TXD */
+ [31] = RCAR_GP_PIN(6, 27), /* MSIOF0_RXD */
+ } },
+ { PINMUX_BIAS_REG("PUPR7", 0xe606011c, "N/A", 0) {
+ /* PUPR7 pull-up pins */
+ [ 0] = RCAR_GP_PIN(6, 28), /* MSIOF0_SS1 */
+ [ 1] = RCAR_GP_PIN(6, 29), /* MSIOF0_SS2 */
+ [ 2] = RCAR_GP_PIN(4, 29), /* SIM0_RST */
+ [ 3] = RCAR_GP_PIN(4, 30), /* SIM0_CLK */
+ [ 4] = RCAR_GP_PIN(4, 31), /* SIM0_D */
+ [ 5] = RCAR_GP_PIN(7, 20), /* GPS_CLK */
+ [ 6] = RCAR_GP_PIN(7, 21), /* GPS_SIGN */
+ [ 7] = RCAR_GP_PIN(7, 22), /* GPS_MAG */
+ [ 8] = RCAR_GP_PIN(7, 0), /* HCTS0# */
+ [ 9] = RCAR_GP_PIN(7, 1), /* HRTS0# */
+ [10] = RCAR_GP_PIN(7, 2), /* HSCK0 */
+ [11] = RCAR_GP_PIN(7, 3), /* HRX0 */
+ [12] = RCAR_GP_PIN(7, 4), /* HTX0 */
+ [13] = RCAR_GP_PIN(7, 5), /* HRX1 */
+ [14] = RCAR_GP_PIN(7, 6), /* HTX1 */
+ [15] = SH_PFC_PIN_NONE,
+ [16] = SH_PFC_PIN_NONE,
+ [17] = SH_PFC_PIN_NONE,
+ [18] = RCAR_GP_PIN(1, 9), /* A25 */
+ [19] = SH_PFC_PIN_NONE,
+ [20] = RCAR_GP_PIN(1, 10), /* CS0# */
+ [21] = RCAR_GP_PIN(7, 23), /* USB0_PWEN */
+ [22] = RCAR_GP_PIN(7, 24), /* USB0_OVC */
+ [23] = RCAR_GP_PIN(7, 25), /* USB1_PWEN */
+ [24] = RCAR_GP_PIN(6, 30), /* USB1_OVC */
+ [25] = PIN_AVS1, /* AVS1 */
+ [26] = PIN_AVS2, /* AVS2 */
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("N/A", 0, "PUPR7", 0xe606011c) {
+ /* PUPR7 pull-down pins */
+ [ 0] = SH_PFC_PIN_NONE,
+ [ 1] = SH_PFC_PIN_NONE,
+ [ 2] = SH_PFC_PIN_NONE,
+ [ 3] = SH_PFC_PIN_NONE,
+ [ 4] = SH_PFC_PIN_NONE,
+ [ 5] = SH_PFC_PIN_NONE,
+ [ 6] = SH_PFC_PIN_NONE,
+ [ 7] = SH_PFC_PIN_NONE,
+ [ 8] = SH_PFC_PIN_NONE,
+ [ 9] = SH_PFC_PIN_NONE,
+ [10] = SH_PFC_PIN_NONE,
+ [11] = SH_PFC_PIN_NONE,
+ [12] = SH_PFC_PIN_NONE,
+ [13] = SH_PFC_PIN_NONE,
+ [14] = SH_PFC_PIN_NONE,
+ [15] = SH_PFC_PIN_NONE,
+ [16] = SH_PFC_PIN_NONE,
+ [17] = SH_PFC_PIN_NONE,
+ [18] = SH_PFC_PIN_NONE,
+ [19] = PIN_ASEBRK_N_ACK, /* ASEBRK#/ACK */
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { /* sentinel */ },
+};
+
static const struct sh_pfc_soc_operations r8a7791_pinmux_ops = {
.pin_to_pocctrl = r8a7791_pin_to_pocctrl,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
};
#ifdef CONFIG_PINCTRL_PFC_R8A7743
@@ -6665,6 +7016,7 @@ const struct sh_pfc_soc_info r8a7743_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions.common),
.cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
@@ -6687,6 +7039,7 @@ const struct sh_pfc_soc_info r8a7744_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions.common),
.cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
@@ -6711,6 +7064,7 @@ const struct sh_pfc_soc_info r8a7791_pinmux_info = {
ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
@@ -6735,6 +7089,7 @@ const struct sh_pfc_soc_info r8a7793_pinmux_info = {
ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/renesas/pfc-r8a7792.c b/drivers/pinctrl/renesas/pfc-r8a7792.c
index 258f82fb31c0..f54a7c81005d 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7792.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7792.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_GP(fn, sfx) \
diff --git a/drivers/pinctrl/renesas/pfc-r8a77950.c b/drivers/pinctrl/renesas/pfc-r8a77950.c
index 32fe8caca70a..ee4ce9349aae 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77950.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77950.c
@@ -8,7 +8,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
diff --git a/drivers/pinctrl/renesas/pfc-r8a77951.c b/drivers/pinctrl/renesas/pfc-r8a77951.c
index bdd605e41303..be4eee070842 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77951.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77951.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/sys_soc.h>
-#include "core.h"
#include "sh_pfc.h"
#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
@@ -4126,6 +4125,18 @@ static const union vin_data vin4_data_b_mux = {
VI4_DATA22_MARK, VI4_DATA23_MARK,
},
};
+static const unsigned int vin4_g8_pins[] = {
+ RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 1),
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+static const unsigned int vin4_g8_mux[] = {
+ VI4_DATA8_MARK, VI4_DATA9_MARK,
+ VI4_DATA10_MARK, VI4_DATA11_MARK,
+ VI4_DATA12_MARK, VI4_DATA13_MARK,
+ VI4_DATA14_MARK, VI4_DATA15_MARK,
+};
static const unsigned int vin4_sync_pins[] = {
/* HSYNC#, VSYNC# */
RCAR_GP_PIN(1, 18), RCAR_GP_PIN(1, 17),
@@ -4180,6 +4191,18 @@ static const union vin_data16 vin5_data_mux = {
VI5_DATA14_MARK, VI5_DATA15_MARK,
},
};
+static const unsigned int vin5_high8_pins[] = {
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+static const unsigned int vin5_high8_mux[] = {
+ VI5_DATA8_MARK, VI5_DATA9_MARK,
+ VI5_DATA10_MARK, VI5_DATA11_MARK,
+ VI5_DATA12_MARK, VI5_DATA13_MARK,
+ VI5_DATA14_MARK, VI5_DATA15_MARK,
+};
static const unsigned int vin5_sync_pins[] = {
/* HSYNC#, VSYNC# */
RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
@@ -4210,7 +4233,7 @@ static const unsigned int vin5_clk_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[326];
+ struct sh_pfc_pin_group common[328];
#ifdef CONFIG_PINCTRL_PFC_R8A77951
struct sh_pfc_pin_group automotive[30];
#endif
@@ -4530,6 +4553,7 @@ static const struct {
SH_PFC_PIN_GROUP(vin4_data18_b),
VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
+ SH_PFC_PIN_GROUP(vin4_g8),
SH_PFC_PIN_GROUP(vin4_sync),
SH_PFC_PIN_GROUP(vin4_field),
SH_PFC_PIN_GROUP(vin4_clkenb),
@@ -4538,6 +4562,7 @@ static const struct {
VIN_DATA_PIN_GROUP(vin5_data, 10),
VIN_DATA_PIN_GROUP(vin5_data, 12),
VIN_DATA_PIN_GROUP(vin5_data, 16),
+ SH_PFC_PIN_GROUP(vin5_high8),
SH_PFC_PIN_GROUP(vin5_sync),
SH_PFC_PIN_GROUP(vin5_field),
SH_PFC_PIN_GROUP(vin5_clkenb),
@@ -5097,6 +5122,7 @@ static const char * const vin4_groups[] = {
"vin4_data18_b",
"vin4_data20_b",
"vin4_data24_b",
+ "vin4_g8",
"vin4_sync",
"vin4_field",
"vin4_clkenb",
@@ -5108,6 +5134,7 @@ static const char * const vin5_groups[] = {
"vin5_data10",
"vin5_data12",
"vin5_data16",
+ "vin5_high8",
"vin5_sync",
"vin5_field",
"vin5_clkenb",
diff --git a/drivers/pinctrl/renesas/pfc-r8a7796.c b/drivers/pinctrl/renesas/pfc-r8a7796.c
index 96b5b1509bb7..44e9d2eea484 100644
--- a/drivers/pinctrl/renesas/pfc-r8a7796.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7796.c
@@ -14,7 +14,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
@@ -4100,6 +4099,18 @@ static const union vin_data vin4_data_b_mux = {
VI4_DATA22_MARK, VI4_DATA23_MARK,
},
};
+static const unsigned int vin4_g8_pins[] = {
+ RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 1),
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+static const unsigned int vin4_g8_mux[] = {
+ VI4_DATA8_MARK, VI4_DATA9_MARK,
+ VI4_DATA10_MARK, VI4_DATA11_MARK,
+ VI4_DATA12_MARK, VI4_DATA13_MARK,
+ VI4_DATA14_MARK, VI4_DATA15_MARK,
+};
static const unsigned int vin4_sync_pins[] = {
/* HSYNC#, VSYNC# */
RCAR_GP_PIN(1, 18), RCAR_GP_PIN(1, 17),
@@ -4154,6 +4165,18 @@ static const union vin_data16 vin5_data_mux = {
VI5_DATA14_MARK, VI5_DATA15_MARK,
},
};
+static const unsigned int vin5_high8_pins[] = {
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+static const unsigned int vin5_high8_mux[] = {
+ VI5_DATA8_MARK, VI5_DATA9_MARK,
+ VI5_DATA10_MARK, VI5_DATA11_MARK,
+ VI5_DATA12_MARK, VI5_DATA13_MARK,
+ VI5_DATA14_MARK, VI5_DATA15_MARK,
+};
static const unsigned int vin5_sync_pins[] = {
/* HSYNC#, VSYNC# */
RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
@@ -4184,7 +4207,7 @@ static const unsigned int vin5_clk_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[322];
+ struct sh_pfc_pin_group common[324];
#if defined(CONFIG_PINCTRL_PFC_R8A77960) || defined(CONFIG_PINCTRL_PFC_R8A77961)
struct sh_pfc_pin_group automotive[30];
#endif
@@ -4500,6 +4523,7 @@ static const struct {
SH_PFC_PIN_GROUP(vin4_data18_b),
VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
+ SH_PFC_PIN_GROUP(vin4_g8),
SH_PFC_PIN_GROUP(vin4_sync),
SH_PFC_PIN_GROUP(vin4_field),
SH_PFC_PIN_GROUP(vin4_clkenb),
@@ -4508,6 +4532,7 @@ static const struct {
VIN_DATA_PIN_GROUP(vin5_data, 10),
VIN_DATA_PIN_GROUP(vin5_data, 12),
VIN_DATA_PIN_GROUP(vin5_data, 16),
+ SH_PFC_PIN_GROUP(vin5_high8),
SH_PFC_PIN_GROUP(vin5_sync),
SH_PFC_PIN_GROUP(vin5_field),
SH_PFC_PIN_GROUP(vin5_clkenb),
@@ -5054,6 +5079,7 @@ static const char * const vin4_groups[] = {
"vin4_data18_b",
"vin4_data20_b",
"vin4_data24_b",
+ "vin4_g8",
"vin4_sync",
"vin4_field",
"vin4_clkenb",
@@ -5065,6 +5091,7 @@ static const char * const vin5_groups[] = {
"vin5_data10",
"vin5_data12",
"vin5_data16",
+ "vin5_high8",
"vin5_sync",
"vin5_field",
"vin5_clkenb",
diff --git a/drivers/pinctrl/renesas/pfc-r8a77965.c b/drivers/pinctrl/renesas/pfc-r8a77965.c
index f15e29383d9b..e69210cc6148 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77965.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77965.c
@@ -15,7 +15,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
@@ -4337,6 +4336,20 @@ static const union vin_data vin4_data_b_mux = {
},
};
+static const unsigned int vin4_g8_pins[] = {
+ RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 1),
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+
+static const unsigned int vin4_g8_mux[] = {
+ VI4_DATA8_MARK, VI4_DATA9_MARK,
+ VI4_DATA10_MARK, VI4_DATA11_MARK,
+ VI4_DATA12_MARK, VI4_DATA13_MARK,
+ VI4_DATA14_MARK, VI4_DATA15_MARK,
+};
+
static const unsigned int vin4_sync_pins[] = {
/* VSYNC_N, HSYNC_N */
RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 18),
@@ -4397,6 +4410,20 @@ static const union vin_data16 vin5_data_mux = {
},
};
+static const unsigned int vin5_high8_pins[] = {
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+
+static const unsigned int vin5_high8_mux[] = {
+ VI5_DATA8_MARK, VI5_DATA9_MARK,
+ VI5_DATA10_MARK, VI5_DATA11_MARK,
+ VI5_DATA12_MARK, VI5_DATA13_MARK,
+ VI5_DATA14_MARK, VI5_DATA15_MARK,
+};
+
static const unsigned int vin5_sync_pins[] = {
/* VSYNC_N, HSYNC_N */
RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 10),
@@ -4431,7 +4458,7 @@ static const unsigned int vin5_clk_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[324];
+ struct sh_pfc_pin_group common[326];
#ifdef CONFIG_PINCTRL_PFC_R8A77965
struct sh_pfc_pin_group automotive[30];
#endif
@@ -4749,6 +4776,7 @@ static const struct {
SH_PFC_PIN_GROUP(vin4_data18_b),
VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
+ SH_PFC_PIN_GROUP(vin4_g8),
SH_PFC_PIN_GROUP(vin4_sync),
SH_PFC_PIN_GROUP(vin4_field),
SH_PFC_PIN_GROUP(vin4_clkenb),
@@ -4757,6 +4785,7 @@ static const struct {
VIN_DATA_PIN_GROUP(vin5_data, 10),
VIN_DATA_PIN_GROUP(vin5_data, 12),
VIN_DATA_PIN_GROUP(vin5_data, 16),
+ SH_PFC_PIN_GROUP(vin5_high8),
SH_PFC_PIN_GROUP(vin5_sync),
SH_PFC_PIN_GROUP(vin5_field),
SH_PFC_PIN_GROUP(vin5_clkenb),
@@ -5307,6 +5336,7 @@ static const char * const vin4_groups[] = {
"vin4_data18_b",
"vin4_data20_b",
"vin4_data24_b",
+ "vin4_g8",
"vin4_sync",
"vin4_field",
"vin4_clkenb",
@@ -5318,6 +5348,7 @@ static const char * const vin5_groups[] = {
"vin5_data10",
"vin5_data12",
"vin5_data16",
+ "vin5_high8",
"vin5_sync",
"vin5_field",
"vin5_clkenb",
diff --git a/drivers/pinctrl/renesas/pfc-r8a77970.c b/drivers/pinctrl/renesas/pfc-r8a77970.c
index e8a0fc468eb2..7935826cfae7 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77970.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77970.c
@@ -16,7 +16,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_GP(fn, sfx) \
diff --git a/drivers/pinctrl/renesas/pfc-r8a77980.c b/drivers/pinctrl/renesas/pfc-r8a77980.c
index ebd07bebaeeb..20cff93a2a13 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77980.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77980.c
@@ -16,7 +16,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_GP(fn, sfx) \
diff --git a/drivers/pinctrl/renesas/pfc-r8a77990.c b/drivers/pinctrl/renesas/pfc-r8a77990.c
index 0a32e3c317c1..d040eb3e305d 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77990.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77990.c
@@ -14,7 +14,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CFG_FLAGS (SH_PFC_PIN_CFG_PULL_UP_DOWN)
@@ -3697,6 +3696,20 @@ static const union vin_data vin4_data_b_mux = {
},
};
+static const unsigned int vin4_g8_pins[] = {
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+ RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 10),
+ RCAR_GP_PIN(1, 13), RCAR_GP_PIN(1, 14),
+};
+
+static const unsigned int vin4_g8_mux[] = {
+ VI4_DATA8_MARK, VI4_DATA9_MARK,
+ VI4_DATA10_MARK, VI4_DATA11_MARK,
+ VI4_DATA12_MARK, VI4_DATA13_MARK,
+ VI4_DATA14_MARK, VI4_DATA15_MARK,
+};
+
static const unsigned int vin4_sync_pins[] = {
/* HSYNC, VSYNC */
RCAR_GP_PIN(2, 25), RCAR_GP_PIN(2, 24),
@@ -3771,6 +3784,20 @@ static const unsigned int vin5_data8_b_mux[] = {
VI5_DATA6_B_MARK, VI5_DATA7_B_MARK,
};
+static const unsigned int vin5_high8_pins[] = {
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 11),
+ RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+};
+
+static const unsigned int vin5_high8_mux[] = {
+ VI5_DATA8_A_MARK, VI5_DATA9_A_MARK,
+ VI5_DATA10_A_MARK, VI5_DATA11_A_MARK,
+ VI5_DATA12_A_MARK, VI5_DATA13_A_MARK,
+ VI5_DATA14_A_MARK, VI5_DATA15_A_MARK,
+};
+
static const unsigned int vin5_sync_a_pins[] = {
/* HSYNC_N, VSYNC_N */
RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 9),
@@ -3813,7 +3840,7 @@ static const unsigned int vin5_clk_b_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[253];
+ struct sh_pfc_pin_group common[255];
#ifdef CONFIG_PINCTRL_PFC_R8A77990
struct sh_pfc_pin_group automotive[21];
#endif
@@ -4058,6 +4085,7 @@ static const struct {
SH_PFC_PIN_GROUP(vin4_data18_b),
VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
+ SH_PFC_PIN_GROUP(vin4_g8),
SH_PFC_PIN_GROUP(vin4_sync),
SH_PFC_PIN_GROUP(vin4_field),
SH_PFC_PIN_GROUP(vin4_clkenb),
@@ -4067,6 +4095,7 @@ static const struct {
VIN_DATA_PIN_GROUP(vin5_data, 12, _a),
VIN_DATA_PIN_GROUP(vin5_data, 16, _a),
SH_PFC_PIN_GROUP(vin5_data8_b),
+ SH_PFC_PIN_GROUP(vin5_high8),
SH_PFC_PIN_GROUP(vin5_sync_a),
SH_PFC_PIN_GROUP(vin5_field_a),
SH_PFC_PIN_GROUP(vin5_clkenb_a),
@@ -4516,6 +4545,7 @@ static const char * const vin4_groups[] = {
"vin4_data18_b",
"vin4_data20_b",
"vin4_data24_b",
+ "vin4_g8",
"vin4_sync",
"vin4_field",
"vin4_clkenb",
@@ -4528,6 +4558,7 @@ static const char * const vin5_groups[] = {
"vin5_data12_a",
"vin5_data16_a",
"vin5_data8_b",
+ "vin5_high8",
"vin5_sync_a",
"vin5_field_a",
"vin5_clkenb_a",
diff --git a/drivers/pinctrl/renesas/pfc-r8a77995.c b/drivers/pinctrl/renesas/pfc-r8a77995.c
index 672251d86c2d..b479f87a3b23 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77995.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77995.c
@@ -14,7 +14,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_GP(fn, sfx) \
diff --git a/drivers/pinctrl/renesas/pfc-r8a779a0.c b/drivers/pinctrl/renesas/pfc-r8a779a0.c
index 2250ccd0470a..ad6532443a78 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779a0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779a0.c
@@ -11,7 +11,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
diff --git a/drivers/pinctrl/renesas/pfc-sh73a0.c b/drivers/pinctrl/renesas/pfc-sh73a0.c
index 96b91e95b1e1..ed6db809e80d 100644
--- a/drivers/pinctrl/renesas/pfc-sh73a0.c
+++ b/drivers/pinctrl/renesas/pfc-sh73a0.c
@@ -13,7 +13,6 @@
#include <linux/regulator/machine.h>
#include <linux/slab.h>
-#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_PORT(fn, pfx, sfx) \
@@ -4310,50 +4309,14 @@ static const struct regulator_init_data sh73a0_vccq_mc0_init_data = {
* Pin bias
*/
-#define PORTnCR_PULMD_OFF (0 << 6)
-#define PORTnCR_PULMD_DOWN (2 << 6)
-#define PORTnCR_PULMD_UP (3 << 6)
-#define PORTnCR_PULMD_MASK (3 << 6)
-
static const unsigned int sh73a0_portcr_offsets[] = {
0x00000000, 0x00001000, 0x00001000, 0x00002000, 0x00002000,
0x00002000, 0x00002000, 0x00003000, 0x00003000, 0x00002000,
};
-static unsigned int sh73a0_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin)
+static void __iomem *sh73a0_pin_to_portcr(struct sh_pfc *pfc, unsigned int pin)
{
- void __iomem *addr = pfc->windows->virt
- + sh73a0_portcr_offsets[pin >> 5] + pin;
- u32 value = ioread8(addr) & PORTnCR_PULMD_MASK;
-
- switch (value) {
- case PORTnCR_PULMD_UP:
- return PIN_CONFIG_BIAS_PULL_UP;
- case PORTnCR_PULMD_DOWN:
- return PIN_CONFIG_BIAS_PULL_DOWN;
- case PORTnCR_PULMD_OFF:
- default:
- return PIN_CONFIG_BIAS_DISABLE;
- }
-}
-
-static void sh73a0_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
-{
- void __iomem *addr = pfc->windows->virt
- + sh73a0_portcr_offsets[pin >> 5] + pin;
- u32 value = ioread8(addr) & ~PORTnCR_PULMD_MASK;
-
- switch (bias) {
- case PIN_CONFIG_BIAS_PULL_UP:
- value |= PORTnCR_PULMD_UP;
- break;
- case PIN_CONFIG_BIAS_PULL_DOWN:
- value |= PORTnCR_PULMD_DOWN;
- break;
- }
-
- iowrite8(value, addr);
+ return pfc->windows->virt + sh73a0_portcr_offsets[pin >> 5] + pin;
}
/* -----------------------------------------------------------------------------
@@ -4383,8 +4346,9 @@ static int sh73a0_pinmux_soc_init(struct sh_pfc *pfc)
static const struct sh_pfc_soc_operations sh73a0_pfc_ops = {
.init = sh73a0_pinmux_soc_init,
- .get_bias = sh73a0_pinmux_get_bias,
- .set_bias = sh73a0_pinmux_set_bias,
+ .get_bias = rmobile_pinmux_get_bias,
+ .set_bias = rmobile_pinmux_set_bias,
+ .pin_to_portcr = sh73a0_pin_to_portcr,
};
const struct sh_pfc_soc_info sh73a0_pinmux_info = {
diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index a49f74730272..bb488af29862 100644
--- a/drivers/pinctrl/renesas/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
@@ -840,21 +841,48 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
return pinctrl_enable(pmx->pctl);
}
+static const struct pinmux_bias_reg *
+rcar_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
+ unsigned int *bit)
+{
+ unsigned int i, j;
+
+ for (i = 0; pfc->info->bias_regs[i].puen || pfc->info->bias_regs[i].pud; i++) {
+ for (j = 0; j < ARRAY_SIZE(pfc->info->bias_regs[i].pins); j++) {
+ if (pfc->info->bias_regs[i].pins[j] == pin) {
+ *bit = j;
+ return &pfc->info->bias_regs[i];
+ }
+ }
+ }
+
+ WARN_ONCE(1, "Pin %u is not in bias info list\n", pin);
+
+ return NULL;
+}
+
unsigned int rcar_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin)
{
const struct pinmux_bias_reg *reg;
unsigned int bit;
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ reg = rcar_pin_to_bias_reg(pfc, pin, &bit);
if (!reg)
return PIN_CONFIG_BIAS_DISABLE;
- if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
- return PIN_CONFIG_BIAS_DISABLE;
- else if (!reg->pud || (sh_pfc_read(pfc, reg->pud) & BIT(bit)))
- return PIN_CONFIG_BIAS_PULL_UP;
- else
- return PIN_CONFIG_BIAS_PULL_DOWN;
+ if (reg->puen) {
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
+ return PIN_CONFIG_BIAS_DISABLE;
+ else if (!reg->pud || (sh_pfc_read(pfc, reg->pud) & BIT(bit)))
+ return PIN_CONFIG_BIAS_PULL_UP;
+ else
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+ } else {
+ if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+ else
+ return PIN_CONFIG_BIAS_DISABLE;
+ }
}
void rcar_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
@@ -864,21 +892,68 @@ void rcar_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
u32 enable, updown;
unsigned int bit;
- reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ reg = rcar_pin_to_bias_reg(pfc, pin, &bit);
if (!reg)
return;
- enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
- if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= BIT(bit);
+ if (reg->puen) {
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
+ if (bias != PIN_CONFIG_BIAS_DISABLE)
+ enable |= BIT(bit);
- if (reg->pud) {
- updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
- if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= BIT(bit);
+ if (reg->pud) {
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
+ if (bias == PIN_CONFIG_BIAS_PULL_UP)
+ updown |= BIT(bit);
- sh_pfc_write(pfc, reg->pud, updown);
+ sh_pfc_write(pfc, reg->pud, updown);
+ }
+
+ sh_pfc_write(pfc, reg->puen, enable);
+ } else {
+ enable = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
+ if (bias == PIN_CONFIG_BIAS_PULL_DOWN)
+ enable |= BIT(bit);
+
+ sh_pfc_write(pfc, reg->pud, enable);
+ }
+}
+
+#define PORTnCR_PULMD_OFF (0 << 6)
+#define PORTnCR_PULMD_DOWN (2 << 6)
+#define PORTnCR_PULMD_UP (3 << 6)
+#define PORTnCR_PULMD_MASK (3 << 6)
+
+unsigned int rmobile_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin)
+{
+ void __iomem *reg = pfc->info->ops->pin_to_portcr(pfc, pin);
+ u32 value = ioread8(reg) & PORTnCR_PULMD_MASK;
+
+ switch (value) {
+ case PORTnCR_PULMD_UP:
+ return PIN_CONFIG_BIAS_PULL_UP;
+ case PORTnCR_PULMD_DOWN:
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+ case PORTnCR_PULMD_OFF:
+ default:
+ return PIN_CONFIG_BIAS_DISABLE;
+ }
+}
+
+void rmobile_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
+{
+ void __iomem *reg = pfc->info->ops->pin_to_portcr(pfc, pin);
+ u32 value = ioread8(reg) & ~PORTnCR_PULMD_MASK;
+
+ switch (bias) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ value |= PORTnCR_PULMD_UP;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value |= PORTnCR_PULMD_DOWN;
+ break;
}
- sh_pfc_write(pfc, reg->puen, enable);
+ iowrite8(value, reg);
}
diff --git a/drivers/pinctrl/renesas/sh_pfc.h b/drivers/pinctrl/renesas/sh_pfc.h
index 5934faeb23d7..320898861c4b 100644
--- a/drivers/pinctrl/renesas/sh_pfc.h
+++ b/drivers/pinctrl/renesas/sh_pfc.h
@@ -188,9 +188,9 @@ struct pinmux_drive_reg {
.reg = r, \
.fields =
-struct pinmux_bias_reg {
+struct pinmux_bias_reg { /* At least one of puen/pud must exist */
u32 puen; /* Pull-enable or pull-up control register */
- u32 pud; /* Pull-up/down control register (optional) */
+ u32 pud; /* Pull-up/down or pull-down control register */
const u16 pins[32];
};
@@ -273,6 +273,7 @@ struct sh_pfc_soc_operations {
void (*set_bias)(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias);
int (*pin_to_pocctrl)(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl);
+ void __iomem * (*pin_to_portcr)(struct sh_pfc *pfc, unsigned int pin);
};
struct sh_pfc_soc_info {
@@ -478,9 +479,13 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
PORT_GP_CFG_1(bank, 5, fn, sfx, cfg)
#define PORT_GP_6(bank, fn, sfx) PORT_GP_CFG_6(bank, fn, sfx, 0)
-#define PORT_GP_CFG_8(bank, fn, sfx, cfg) \
+#define PORT_GP_CFG_7(bank, fn, sfx, cfg) \
PORT_GP_CFG_6(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 6, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 6, fn, sfx, cfg)
+#define PORT_GP_7(bank, fn, sfx) PORT_GP_CFG_7(bank, fn, sfx, 0)
+
+#define PORT_GP_CFG_8(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_7(bank, fn, sfx, cfg), \
PORT_GP_CFG_1(bank, 7, fn, sfx, cfg)
#define PORT_GP_8(bank, fn, sfx) PORT_GP_CFG_8(bank, fn, sfx, 0)
@@ -773,4 +778,15 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
*/
#define RCAR_GP_PIN(bank, pin) (((bank) * 32) + (pin))
+/*
+ * Bias helpers
+ */
+unsigned int rcar_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin);
+void rcar_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias);
+
+unsigned int rmobile_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin);
+void rmobile_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias);
+
#endif /* __SH_PFC_H */
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 0cd7f33cdf25..2b99f4130e1e 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -55,7 +55,7 @@ static void exynos_irq_mask(struct irq_data *irqd)
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
- unsigned long mask;
+ unsigned int mask;
unsigned long flags;
raw_spin_lock_irqsave(&bank->slock, flags);
@@ -83,7 +83,7 @@ static void exynos_irq_unmask(struct irq_data *irqd)
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
- unsigned long mask;
+ unsigned int mask;
unsigned long flags;
/*
@@ -483,7 +483,7 @@ static void exynos_irq_eint0_15(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static inline void exynos_irq_demux_eint(unsigned long pend,
+static inline void exynos_irq_demux_eint(unsigned int pend,
struct irq_domain *domain)
{
unsigned int irq;
@@ -500,8 +500,8 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
- unsigned long pend;
- unsigned long mask;
+ unsigned int pend;
+ unsigned int mask;
int i;
chained_irq_enter(chip, desc);
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 7d9bdedcd71b..ad9eb5ed8e81 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -531,6 +531,8 @@ static bool stm32_pctrl_is_function_valid(struct stm32_pinctrl *pctl,
break;
}
+ dev_err(pctl->dev, "invalid function %d on pin %d .\n", fnum, pin_num);
+
return false;
}
@@ -545,11 +547,8 @@ static int stm32_pctrl_dt_node_to_map_func(struct stm32_pinctrl *pctl,
(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
(*map)[*num_maps].data.mux.group = grp->name;
- if (!stm32_pctrl_is_function_valid(pctl, pin, fnum)) {
- dev_err(pctl->dev, "invalid function %d on pin %d .\n",
- fnum, pin);
+ if (!stm32_pctrl_is_function_valid(pctl, pin, fnum))
return -EINVAL;
- }
(*map)[*num_maps].data.mux.function = stm32_gpio_functions[fnum];
(*num_maps)++;
@@ -620,7 +619,6 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
func = STM32_GET_PIN_FUNC(pinfunc);
if (!stm32_pctrl_is_function_valid(pctl, pin, func)) {
- dev_err(pctl->dev, "invalid function.\n");
err = -EINVAL;
goto exit;
}
@@ -821,11 +819,8 @@ static int stm32_pmx_set_mux(struct pinctrl_dev *pctldev,
int pin;
ret = stm32_pctrl_is_function_valid(pctl, g->pin, function);
- if (!ret) {
- dev_err(pctl->dev, "invalid function %d on group %d .\n",
- function, group);
+ if (!ret)
return -EINVAL;
- }
range = pinctrl_find_gpio_range_from_pin(pctldev, g->pin);
if (!range) {
@@ -1542,8 +1537,10 @@ int stm32_pctl_probe(struct platform_device *pdev)
if (of_property_read_bool(child, "gpio-controller")) {
bank->rstc = of_reset_control_get_exclusive(child,
NULL);
- if (PTR_ERR(bank->rstc) == -EPROBE_DEFER)
+ if (PTR_ERR(bank->rstc) == -EPROBE_DEFER) {
+ of_node_put(child);
return -EPROBE_DEFER;
+ }
bank->clk = of_clk_get_by_name(child, NULL);
if (IS_ERR(bank->clk)) {
@@ -1551,6 +1548,7 @@ int stm32_pctl_probe(struct platform_device *pdev)
dev_err(dev,
"failed to get clk (%ld)\n",
PTR_ERR(bank->clk));
+ of_node_put(child);
return PTR_ERR(bank->clk);
}
i++;
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index 60a67139ff0a..4e2382778d38 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -511,8 +511,10 @@ static int ti_iodelay_dt_node_to_map(struct pinctrl_dev *pctldev,
}
pins = devm_kcalloc(iod->dev, rows, sizeof(*pins), GFP_KERNEL);
- if (!pins)
+ if (!pins) {
+ error = -ENOMEM;
goto free_group;
+ }
cfg = devm_kcalloc(iod->dev, rows, sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
@@ -867,7 +869,8 @@ static int ti_iodelay_probe(struct platform_device *pdev)
goto exit_out;
}
- if (ti_iodelay_pinconf_init_dev(iod))
+ ret = ti_iodelay_pinconf_init_dev(iod);
+ if (ret)
goto exit_out;
ret = ti_iodelay_alloc_pins(dev, iod, res->start);
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 472a03daa869..4e14b4d6635d 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -52,12 +52,15 @@ struct i2c_peripheral {
enum i2c_adapter_type type;
u32 pci_devid;
+ const struct property_entry *properties;
+
struct i2c_client *client;
};
struct acpi_peripheral {
char hid[ACPI_ID_LEN];
- const struct property_entry *properties;
+ struct software_node swnode;
+ struct i2c_client *client;
};
struct chromeos_laptop {
@@ -68,7 +71,7 @@ struct chromeos_laptop {
struct i2c_peripheral *i2c_peripherals;
unsigned int num_i2c_peripherals;
- const struct acpi_peripheral *acpi_peripherals;
+ struct acpi_peripheral *acpi_peripherals;
unsigned int num_acpi_peripherals;
};
@@ -161,7 +164,7 @@ static void chromeos_laptop_check_adapter(struct i2c_adapter *adapter)
static bool chromeos_laptop_adjust_client(struct i2c_client *client)
{
- const struct acpi_peripheral *acpi_dev;
+ struct acpi_peripheral *acpi_dev;
struct acpi_device_id acpi_ids[2] = { };
int i;
int error;
@@ -175,8 +178,7 @@ static bool chromeos_laptop_adjust_client(struct i2c_client *client)
memcpy(acpi_ids[0].id, acpi_dev->hid, ACPI_ID_LEN);
if (acpi_match_device(acpi_ids, &client->dev)) {
- error = device_add_properties(&client->dev,
- acpi_dev->properties);
+ error = device_add_software_node(&client->dev, &acpi_dev->swnode);
if (error) {
dev_err(&client->dev,
"failed to add properties: %d\n",
@@ -184,6 +186,8 @@ static bool chromeos_laptop_adjust_client(struct i2c_client *client)
break;
}
+ acpi_dev->client = client;
+
return true;
}
}
@@ -193,15 +197,28 @@ static bool chromeos_laptop_adjust_client(struct i2c_client *client)
static void chromeos_laptop_detach_i2c_client(struct i2c_client *client)
{
+ struct acpi_peripheral *acpi_dev;
struct i2c_peripheral *i2c_dev;
int i;
- for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
- i2c_dev = &cros_laptop->i2c_peripherals[i];
+ if (has_acpi_companion(&client->dev))
+ for (i = 0; i < cros_laptop->num_acpi_peripherals; i++) {
+ acpi_dev = &cros_laptop->acpi_peripherals[i];
- if (i2c_dev->client == client)
- i2c_dev->client = NULL;
- }
+ if (acpi_dev->client == client) {
+ acpi_dev->client = NULL;
+ return;
+ }
+ }
+ else
+ for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
+
+ if (i2c_dev->client == client) {
+ i2c_dev->client = NULL;
+ return;
+ }
+ }
}
static int chromeos_laptop_i2c_notifier_call(struct notifier_block *nb,
@@ -302,28 +319,26 @@ static struct i2c_peripheral chromebook_pixel_peripherals[] __initdata = {
.board_info = {
I2C_BOARD_INFO("atmel_mxt_ts",
ATMEL_TS_I2C_ADDR),
- .properties =
- chromebook_atmel_touchscreen_props,
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "touchscreen",
.irqflags = IRQF_TRIGGER_FALLING,
.type = I2C_ADAPTER_PANEL,
.alt_addr = ATMEL_TS_I2C_BL_ADDR,
+ .properties = chromebook_atmel_touchscreen_props,
},
/* Touchpad. */
{
.board_info = {
I2C_BOARD_INFO("atmel_mxt_tp",
ATMEL_TP_I2C_ADDR),
- .properties =
- chromebook_pixel_trackpad_props,
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "trackpad",
.irqflags = IRQF_TRIGGER_FALLING,
.type = I2C_ADAPTER_VGADDC,
.alt_addr = ATMEL_TP_I2C_BL_ADDR,
+ .properties = chromebook_pixel_trackpad_props,
},
/* Light Sensor. */
{
@@ -414,8 +429,6 @@ static struct i2c_peripheral acer_c720_peripherals[] __initdata = {
.board_info = {
I2C_BOARD_INFO("atmel_mxt_ts",
ATMEL_TS_I2C_ADDR),
- .properties =
- chromebook_atmel_touchscreen_props,
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "touchscreen",
@@ -423,6 +436,7 @@ static struct i2c_peripheral acer_c720_peripherals[] __initdata = {
.type = I2C_ADAPTER_DESIGNWARE,
.pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x2)),
.alt_addr = ATMEL_TS_I2C_BL_ADDR,
+ .properties = chromebook_atmel_touchscreen_props,
},
/* Touchpad. */
{
@@ -498,12 +512,16 @@ static struct acpi_peripheral samus_peripherals[] __initdata = {
/* Touchpad */
{
.hid = "ATML0000",
- .properties = samus_trackpad_props,
+ .swnode = {
+ .properties = samus_trackpad_props,
+ },
},
/* Touchsceen */
{
.hid = "ATML0001",
- .properties = chromebook_atmel_touchscreen_props,
+ .swnode = {
+ .properties = chromebook_atmel_touchscreen_props,
+ },
},
};
DECLARE_ACPI_CROS_LAPTOP(samus);
@@ -512,12 +530,16 @@ static struct acpi_peripheral generic_atmel_peripherals[] __initdata = {
/* Touchpad */
{
.hid = "ATML0000",
- .properties = chromebook_pixel_trackpad_props,
+ .swnode = {
+ .properties = chromebook_pixel_trackpad_props,
+ },
},
/* Touchsceen */
{
.hid = "ATML0001",
- .properties = chromebook_atmel_touchscreen_props,
+ .swnode = {
+ .properties = chromebook_atmel_touchscreen_props,
+ },
},
};
DECLARE_ACPI_CROS_LAPTOP(generic_atmel);
@@ -743,12 +765,11 @@ chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
if (error)
goto err_out;
- /* We need to deep-copy properties */
- if (info->properties) {
- info->properties =
- property_entries_dup(info->properties);
- if (IS_ERR(info->properties)) {
- error = PTR_ERR(info->properties);
+ /* Create primary fwnode for the device - copies everything */
+ if (i2c_dev->properties) {
+ info->fwnode = fwnode_create_software_node(i2c_dev->properties, NULL);
+ if (IS_ERR(info->fwnode)) {
+ error = PTR_ERR(info->fwnode);
goto err_out;
}
}
@@ -760,8 +781,8 @@ err_out:
while (--i >= 0) {
i2c_dev = &cros_laptop->i2c_peripherals[i];
info = &i2c_dev->board_info;
- if (info->properties)
- property_entries_free(info->properties);
+ if (!IS_ERR_OR_NULL(info->fwnode))
+ fwnode_remove_software_node(info->fwnode);
}
kfree(cros_laptop->i2c_peripherals);
return error;
@@ -801,11 +822,11 @@ chromeos_laptop_prepare_acpi_peripherals(struct chromeos_laptop *cros_laptop,
*acpi_dev = *src_dev;
/* We need to deep-copy properties */
- if (src_dev->properties) {
- acpi_dev->properties =
- property_entries_dup(src_dev->properties);
- if (IS_ERR(acpi_dev->properties)) {
- error = PTR_ERR(acpi_dev->properties);
+ if (src_dev->swnode.properties) {
+ acpi_dev->swnode.properties =
+ property_entries_dup(src_dev->swnode.properties);
+ if (IS_ERR(acpi_dev->swnode.properties)) {
+ error = PTR_ERR(acpi_dev->swnode.properties);
goto err_out;
}
}
@@ -821,8 +842,8 @@ chromeos_laptop_prepare_acpi_peripherals(struct chromeos_laptop *cros_laptop,
err_out:
while (--i >= 0) {
acpi_dev = &acpi_peripherals[i];
- if (acpi_dev->properties)
- property_entries_free(acpi_dev->properties);
+ if (!IS_ERR_OR_NULL(acpi_dev->swnode.properties))
+ property_entries_free(acpi_dev->swnode.properties);
}
kfree(acpi_peripherals);
@@ -833,21 +854,20 @@ static void chromeos_laptop_destroy(const struct chromeos_laptop *cros_laptop)
{
const struct acpi_peripheral *acpi_dev;
struct i2c_peripheral *i2c_dev;
- struct i2c_board_info *info;
int i;
for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
i2c_dev = &cros_laptop->i2c_peripherals[i];
- info = &i2c_dev->board_info;
-
i2c_unregister_device(i2c_dev->client);
- property_entries_free(info->properties);
}
for (i = 0; i < cros_laptop->num_acpi_peripherals; i++) {
acpi_dev = &cros_laptop->acpi_peripherals[i];
- property_entries_free(acpi_dev->properties);
+ if (acpi_dev->client)
+ device_remove_software_node(&acpi_dev->client->dev);
+
+ property_entries_free(acpi_dev->swnode.properties);
}
kfree(cros_laptop->i2c_peripherals);
diff --git a/drivers/platform/x86/intel_cht_int33fe_microb.c b/drivers/platform/x86/intel_cht_int33fe_microb.c
index 20b11e0d9a75..673f41cd14b5 100644
--- a/drivers/platform/x86/intel_cht_int33fe_microb.c
+++ b/drivers/platform/x86/intel_cht_int33fe_microb.c
@@ -35,6 +35,10 @@ static const struct property_entry bq27xxx_props[] = {
{ }
};
+static const struct software_node bq27xxx_node = {
+ .properties = bq27xxx_props,
+};
+
int cht_int33fe_microb_probe(struct cht_int33fe_data *data)
{
struct device *dev = data->dev;
@@ -43,7 +47,7 @@ int cht_int33fe_microb_probe(struct cht_int33fe_data *data)
memset(&board_info, 0, sizeof(board_info));
strscpy(board_info.type, "bq27542", ARRAY_SIZE(board_info.type));
board_info.dev_name = "bq27542";
- board_info.properties = bq27xxx_props;
+ board_info.swnode = &bq27xxx_node;
data->battery_fg = i2c_acpi_new_device(dev, 1, &board_info);
return PTR_ERR_OR_ZERO(data->battery_fg);
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index f2edef0df40f..8c20e524e9ad 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -108,7 +108,7 @@ config PTP_1588_CLOCK_PCH
config PTP_1588_CLOCK_KVM
tristate "KVM virtual PTP clock"
depends on PTP_1588_CLOCK
- depends on KVM_GUEST && X86
+ depends on (KVM_GUEST && X86) || (HAVE_ARM_SMCCC_DISCOVERY && ARM_ARCH_TIMER)
default y
help
This driver adds support for using kvm infrastructure as a PTP
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index db5aef3bddc6..8673d1743faa 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -4,6 +4,8 @@
#
ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o
+ptp_kvm-$(CONFIG_X86) := ptp_kvm_x86.o ptp_kvm_common.o
+ptp_kvm-$(CONFIG_HAVE_ARM_SMCCC) := ptp_kvm_arm.o ptp_kvm_common.o
obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o
obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o
obj-$(CONFIG_PTP_1588_CLOCK_INES) += ptp_ines.o
diff --git a/drivers/ptp/ptp_kvm_arm.c b/drivers/ptp/ptp_kvm_arm.c
new file mode 100644
index 000000000000..b7d28c8dfb84
--- /dev/null
+++ b/drivers/ptp/ptp_kvm_arm.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Virtual PTP 1588 clock for use with KVM guests
+ * Copyright (C) 2019 ARM Ltd.
+ * All Rights Reserved
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/ptp_kvm.h>
+
+#include <asm/arch_timer.h>
+#include <asm/hypervisor.h>
+
+int kvm_arch_ptp_init(void)
+{
+ int ret;
+
+ ret = kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_PTP);
+ if (ret <= 0)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+int kvm_arch_ptp_get_clock(struct timespec64 *ts)
+{
+ return kvm_arch_ptp_get_crosststamp(NULL, ts, NULL);
+}
diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm_common.c
index 658d33fc3195..fcae32f56f25 100644
--- a/drivers/ptp/ptp_kvm.c
+++ b/drivers/ptp/ptp_kvm_common.c
@@ -8,11 +8,11 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/ptp_kvm.h>
#include <uapi/linux/kvm_para.h>
#include <asm/kvm_para.h>
-#include <asm/pvclock.h>
-#include <asm/kvmclock.h>
#include <uapi/asm/kvm_para.h>
#include <linux/ptp_clock_kernel.h>
@@ -24,56 +24,29 @@ struct kvm_ptp_clock {
static DEFINE_SPINLOCK(kvm_ptp_lock);
-static struct pvclock_vsyscall_time_info *hv_clock;
-
-static struct kvm_clock_pairing clock_pair;
-static phys_addr_t clock_pair_gpa;
-
static int ptp_kvm_get_time_fn(ktime_t *device_time,
struct system_counterval_t *system_counter,
void *ctx)
{
- unsigned long ret;
+ long ret;
+ u64 cycle;
struct timespec64 tspec;
- unsigned version;
- int cpu;
- struct pvclock_vcpu_time_info *src;
+ struct clocksource *cs;
spin_lock(&kvm_ptp_lock);
preempt_disable_notrace();
- cpu = smp_processor_id();
- src = &hv_clock[cpu].pvti;
-
- do {
- /*
- * We are using a TSC value read in the hosts
- * kvm_hc_clock_pairing handling.
- * So any changes to tsc_to_system_mul
- * and tsc_shift or any other pvclock
- * data invalidate that measurement.
- */
- version = pvclock_read_begin(src);
-
- ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
- clock_pair_gpa,
- KVM_CLOCK_PAIRING_WALLCLOCK);
- if (ret != 0) {
- pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret);
- spin_unlock(&kvm_ptp_lock);
- preempt_enable_notrace();
- return -EOPNOTSUPP;
- }
-
- tspec.tv_sec = clock_pair.sec;
- tspec.tv_nsec = clock_pair.nsec;
- ret = __pvclock_read_cycles(src, clock_pair.tsc);
- } while (pvclock_read_retry(src, version));
+ ret = kvm_arch_ptp_get_crosststamp(&cycle, &tspec, &cs);
+ if (ret) {
+ spin_unlock(&kvm_ptp_lock);
+ preempt_enable_notrace();
+ return ret;
+ }
preempt_enable_notrace();
- system_counter->cycles = ret;
- system_counter->cs = &kvm_clock;
+ system_counter->cycles = cycle;
+ system_counter->cs = cs;
*device_time = timespec64_to_ktime(tspec);
@@ -111,22 +84,17 @@ static int ptp_kvm_settime(struct ptp_clock_info *ptp,
static int ptp_kvm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
- unsigned long ret;
+ long ret;
struct timespec64 tspec;
spin_lock(&kvm_ptp_lock);
- ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
- clock_pair_gpa,
- KVM_CLOCK_PAIRING_WALLCLOCK);
- if (ret != 0) {
- pr_err_ratelimited("clock offset hypercall ret %lu\n", ret);
+ ret = kvm_arch_ptp_get_clock(&tspec);
+ if (ret) {
spin_unlock(&kvm_ptp_lock);
- return -EOPNOTSUPP;
+ return ret;
}
- tspec.tv_sec = clock_pair.sec;
- tspec.tv_nsec = clock_pair.nsec;
spin_unlock(&kvm_ptp_lock);
memcpy(ts, &tspec, sizeof(struct timespec64));
@@ -168,19 +136,12 @@ static int __init ptp_kvm_init(void)
{
long ret;
- if (!kvm_para_available())
- return -ENODEV;
-
- clock_pair_gpa = slow_virt_to_phys(&clock_pair);
- hv_clock = pvclock_get_pvti_cpu0_va();
-
- if (!hv_clock)
- return -ENODEV;
-
- ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
- KVM_CLOCK_PAIRING_WALLCLOCK);
- if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP)
- return -ENODEV;
+ ret = kvm_arch_ptp_init();
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ pr_err("fail to initialize ptp_kvm");
+ return ret;
+ }
kvm_ptp_clock.caps = ptp_kvm_caps;
diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c
new file mode 100644
index 000000000000..3dd519dfc473
--- /dev/null
+++ b/drivers/ptp/ptp_kvm_x86.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Virtual PTP 1588 clock for use with KVM guests
+ *
+ * Copyright (C) 2017 Red Hat Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <asm/pvclock.h>
+#include <asm/kvmclock.h>
+#include <linux/module.h>
+#include <uapi/asm/kvm_para.h>
+#include <uapi/linux/kvm_para.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_kvm.h>
+
+struct pvclock_vsyscall_time_info *hv_clock;
+
+static phys_addr_t clock_pair_gpa;
+static struct kvm_clock_pairing clock_pair;
+
+int kvm_arch_ptp_init(void)
+{
+ long ret;
+
+ if (!kvm_para_available())
+ return -ENODEV;
+
+ clock_pair_gpa = slow_virt_to_phys(&clock_pair);
+ hv_clock = pvclock_get_pvti_cpu0_va();
+ if (!hv_clock)
+ return -ENODEV;
+
+ ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
+ KVM_CLOCK_PAIRING_WALLCLOCK);
+ if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP)
+ return -ENODEV;
+
+ return 0;
+}
+
+int kvm_arch_ptp_get_clock(struct timespec64 *ts)
+{
+ long ret;
+
+ ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
+ clock_pair_gpa,
+ KVM_CLOCK_PAIRING_WALLCLOCK);
+ if (ret != 0) {
+ pr_err_ratelimited("clock offset hypercall ret %lu\n", ret);
+ return -EOPNOTSUPP;
+ }
+
+ ts->tv_sec = clock_pair.sec;
+ ts->tv_nsec = clock_pair.nsec;
+
+ return 0;
+}
+
+int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
+ struct clocksource **cs)
+{
+ struct pvclock_vcpu_time_info *src;
+ unsigned int version;
+ long ret;
+ int cpu;
+
+ cpu = smp_processor_id();
+ src = &hv_clock[cpu].pvti;
+
+ do {
+ /*
+ * We are using a TSC value read in the hosts
+ * kvm_hc_clock_pairing handling.
+ * So any changes to tsc_to_system_mul
+ * and tsc_shift or any other pvclock
+ * data invalidate that measurement.
+ */
+ version = pvclock_read_begin(src);
+
+ ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
+ clock_pair_gpa,
+ KVM_CLOCK_PAIRING_WALLCLOCK);
+ if (ret != 0) {
+ pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret);
+ return -EOPNOTSUPP;
+ }
+ tspec->tv_sec = clock_pair.sec;
+ tspec->tv_nsec = clock_pair.nsec;
+ *cycle = __pvclock_read_cycles(src, clock_pair.tsc);
+ } while (pvclock_read_retry(src, version));
+
+ *cs = &kvm_clock;
+
+ return 0;
+}
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index d3371ac7b871..c76adedd58c9 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -618,6 +618,15 @@ config PWM_TWL_LED
To compile this driver as a module, choose M here: the module
will be called pwm-twl-led.
+config PWM_VISCONTI
+ tristate "Toshiba Visconti PWM support"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ help
+ PWM Subsystem driver support for Toshiba Visconti SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-visconti.
+
config PWM_VT8500
tristate "vt8500 PWM support"
depends on ARCH_VT8500 || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index d3879619bd76..708840b7fba8 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -58,4 +58,5 @@ obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
obj-$(CONFIG_PWM_TWL) += pwm-twl.o
obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o
+obj-$(CONFIG_PWM_VISCONTI) += pwm-visconti.o
obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index a8eff4b3ee36..c4d5c0667137 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -37,23 +37,13 @@ static struct pwm_device *pwm_to_device(unsigned int pwm)
return radix_tree_lookup(&pwm_tree, pwm);
}
-static int alloc_pwms(int pwm, unsigned int count)
+static int alloc_pwms(unsigned int count)
{
- unsigned int from = 0;
unsigned int start;
- if (pwm >= MAX_PWMS)
- return -EINVAL;
-
- if (pwm >= 0)
- from = pwm;
-
- start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, from,
+ start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, 0,
count, 0);
- if (pwm >= 0 && start != pwm)
- return -EEXIST;
-
if (start + count > MAX_PWMS)
return -ENOSPC;
@@ -260,18 +250,14 @@ static bool pwm_ops_check(const struct pwm_chip *chip)
}
/**
- * pwmchip_add_with_polarity() - register a new PWM chip
+ * pwmchip_add() - register a new PWM chip
* @chip: the PWM chip to add
- * @polarity: initial polarity of PWM channels
*
- * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base
- * will be used. The initial polarity for all channels is specified by the
- * @polarity parameter.
+ * Register a new PWM chip.
*
* Returns: 0 on success or a negative error code on failure.
*/
-int pwmchip_add_with_polarity(struct pwm_chip *chip,
- enum pwm_polarity polarity)
+int pwmchip_add(struct pwm_chip *chip)
{
struct pwm_device *pwm;
unsigned int i;
@@ -285,25 +271,24 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
mutex_lock(&pwm_lock);
- ret = alloc_pwms(chip->base, chip->npwm);
+ ret = alloc_pwms(chip->npwm);
if (ret < 0)
goto out;
+ chip->base = ret;
+
chip->pwms = kcalloc(chip->npwm, sizeof(*pwm), GFP_KERNEL);
if (!chip->pwms) {
ret = -ENOMEM;
goto out;
}
- chip->base = ret;
-
for (i = 0; i < chip->npwm; i++) {
pwm = &chip->pwms[i];
pwm->chip = chip;
pwm->pwm = chip->base + i;
pwm->hwpwm = i;
- pwm->state.polarity = polarity;
radix_tree_insert(&pwm_tree, pwm->pwm, pwm);
}
@@ -326,21 +311,6 @@ out:
return ret;
}
-EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity);
-
-/**
- * pwmchip_add() - register a new PWM chip
- * @chip: the PWM chip to add
- *
- * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base
- * will be used. The initial polarity for all channels is normal.
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-int pwmchip_add(struct pwm_chip *chip)
-{
- return pwmchip_add_with_polarity(chip, PWM_POLARITY_NORMAL);
-}
EXPORT_SYMBOL_GPL(pwmchip_add);
/**
@@ -607,7 +577,7 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
*/
if (state->polarity != pwm->state.polarity) {
if (!chip->ops->set_polarity)
- return -ENOTSUPP;
+ return -EINVAL;
/*
* Changing the polarity of a running PWM is
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index 58c6c0f5b0ec..e2a26d9da25b 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -24,23 +24,37 @@ struct ab8500_pwm_chip {
struct pwm_chip chip;
};
-static int ab8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
- int ret = 0;
- unsigned int higher_val, lower_val;
+ int ret;
u8 reg;
+ unsigned int higher_val, lower_val;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ ret = abx500_mask_and_set_register_interruptible(chip->dev,
+ AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
+ 1 << (chip->base - 1), 0);
+
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
+ pwm->label, ret);
+ return ret;
+ }
/*
* get the first 8 bits that are be written to
* AB8500_PWM_OUT_CTRL1_REG[0:7]
*/
- lower_val = duty_ns & 0x00FF;
+ lower_val = state->duty_cycle & 0x00FF;
/*
* get bits [9:10] that are to be written to
* AB8500_PWM_OUT_CTRL2_REG[0:1]
*/
- higher_val = ((duty_ns & 0x0300) >> 8);
+ higher_val = ((state->duty_cycle & 0x0300) >> 8);
reg = AB8500_PWM_OUT_CTRL1_REG + ((chip->base - 1) * 2);
@@ -48,15 +62,11 @@ static int ab8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
reg, (u8)lower_val);
if (ret < 0)
return ret;
+
ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC,
(reg + 1), (u8)higher_val);
-
- return ret;
-}
-
-static int ab8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- int ret;
+ if (ret < 0)
+ return ret;
ret = abx500_mask_and_set_register_interruptible(chip->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
@@ -64,25 +74,12 @@ static int ab8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
if (ret < 0)
dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
pwm->label, ret);
- return ret;
-}
-static void ab8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- int ret;
-
- ret = abx500_mask_and_set_register_interruptible(chip->dev,
- AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
- 1 << (chip->base - 1), 0);
- if (ret < 0)
- dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
- pwm->label, ret);
+ return ret;
}
static const struct pwm_ops ab8500_pwm_ops = {
- .config = ab8500_pwm_config,
- .enable = ab8500_pwm_enable,
- .disable = ab8500_pwm_disable,
+ .apply = ab8500_pwm_apply,
.owner = THIS_MODULE,
};
@@ -101,7 +98,6 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
ab8500->chip.dev = &pdev->dev;
ab8500->chip.ops = &ab8500_pwm_ops;
- ab8500->chip.base = -1;
ab8500->chip.npwm = 1;
err = pwmchip_add(&ab8500->chip);
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index dcbc0489dfd4..6ab597e54005 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -265,12 +265,11 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
chip->hlcdc = hlcdc;
chip->chip.ops = &atmel_hlcdc_pwm_ops;
chip->chip.dev = dev;
- chip->chip.base = -1;
chip->chip.npwm = 1;
chip->chip.of_xlate = of_pwm_xlate_with_flags;
chip->chip.of_pwm_n_cells = 3;
- ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED);
+ ret = pwmchip_add(&chip->chip);
if (ret) {
clk_disable_unprepare(hlcdc->periph_clk);
return ret;
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 5ccc3e7420e9..8451d3e846be 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -362,20 +362,37 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
tcbpwm->div = i;
tcbpwm->duty = duty;
- /* If the PWM is enabled, call enable to apply the new conf */
- if (pwm_is_enabled(pwm))
- atmel_tcb_pwm_enable(chip, pwm);
-
return 0;
}
+static int atmel_tcb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int duty_cycle, period;
+ int ret;
+
+ /* This function only sets a flag in driver data */
+ atmel_tcb_pwm_set_polarity(chip, pwm, state->polarity);
+
+ if (!state->enabled) {
+ atmel_tcb_pwm_disable(chip, pwm);
+ return 0;
+ }
+
+ period = state->period < INT_MAX ? state->period : INT_MAX;
+ duty_cycle = state->duty_cycle < INT_MAX ? state->duty_cycle : INT_MAX;
+
+ ret = atmel_tcb_pwm_config(chip, pwm, duty_cycle, period);
+ if (ret)
+ return ret;
+
+ return atmel_tcb_pwm_enable(chip, pwm);
+}
+
static const struct pwm_ops atmel_tcb_pwm_ops = {
.request = atmel_tcb_pwm_request,
.free = atmel_tcb_pwm_free,
- .config = atmel_tcb_pwm_config,
- .set_polarity = atmel_tcb_pwm_set_polarity,
- .enable = atmel_tcb_pwm_enable,
- .disable = atmel_tcb_pwm_disable,
+ .apply = atmel_tcb_pwm_apply,
.owner = THIS_MODULE,
};
@@ -454,7 +471,6 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
tcbpwm->chip.ops = &atmel_tcb_pwm_ops;
tcbpwm->chip.of_xlate = of_pwm_xlate_with_flags;
tcbpwm->chip.of_pwm_n_cells = 3;
- tcbpwm->chip.base = -1;
tcbpwm->chip.npwm = NPWM;
tcbpwm->channel = channel;
tcbpwm->regmap = regmap;
@@ -491,14 +507,14 @@ static int atmel_tcb_pwm_remove(struct platform_device *pdev)
struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
int err;
- clk_disable_unprepare(tcbpwm->slow_clk);
- clk_put(tcbpwm->slow_clk);
- clk_put(tcbpwm->clk);
-
err = pwmchip_remove(&tcbpwm->chip);
if (err < 0)
return err;
+ clk_disable_unprepare(tcbpwm->slow_clk);
+ clk_put(tcbpwm->slow_clk);
+ clk_put(tcbpwm->clk);
+
return 0;
}
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 5813339b597b..29b5ad03f715 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -124,6 +124,7 @@ static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip,
}
static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
+ unsigned long clkrate,
const struct pwm_state *state,
unsigned long *cprd, u32 *pres)
{
@@ -132,7 +133,7 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
int shift;
/* Calculate the period cycles and prescale value */
- cycles *= clk_get_rate(atmel_pwm->clk);
+ cycles *= clkrate;
do_div(cycles, NSEC_PER_SEC);
/*
@@ -158,12 +159,14 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
}
static void atmel_pwm_calculate_cdty(const struct pwm_state *state,
- unsigned long cprd, unsigned long *cdty)
+ unsigned long clkrate, unsigned long cprd,
+ u32 pres, unsigned long *cdty)
{
unsigned long long cycles = state->duty_cycle;
- cycles *= cprd;
- do_div(cycles, state->period);
+ cycles *= clkrate;
+ do_div(cycles, NSEC_PER_SEC);
+ cycles >>= pres;
*cdty = cprd - cycles;
}
@@ -244,17 +247,23 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
pwm_get_state(pwm, &cstate);
if (state->enabled) {
+ unsigned long clkrate = clk_get_rate(atmel_pwm->clk);
+
if (cstate.enabled &&
cstate.polarity == state->polarity &&
cstate.period == state->period) {
+ u32 cmr = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
+
cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
atmel_pwm->data->regs.period);
- atmel_pwm_calculate_cdty(state, cprd, &cdty);
+ pres = cmr & PWM_CMR_CPRE_MSK;
+
+ atmel_pwm_calculate_cdty(state, clkrate, cprd, pres, &cdty);
atmel_pwm_update_cdty(chip, pwm, cdty);
return 0;
}
- ret = atmel_pwm_calculate_cprd_and_pres(chip, state, &cprd,
+ ret = atmel_pwm_calculate_cprd_and_pres(chip, clkrate, state, &cprd,
&pres);
if (ret) {
dev_err(chip->dev,
@@ -262,7 +271,7 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
- atmel_pwm_calculate_cdty(state, cprd, &cdty);
+ atmel_pwm_calculate_cdty(state, clkrate, cprd, pres, &cdty);
if (cstate.enabled) {
atmel_pwm_disable(chip, pwm, false);
@@ -319,7 +328,7 @@ static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
atmel_pwm->data->regs.duty);
- tmp = (u64)cdty * NSEC_PER_SEC;
+ tmp = (u64)(cprd - cdty) * NSEC_PER_SEC;
tmp <<= pres;
state->duty_cycle = DIV64_U64_ROUND_UP(tmp, rate);
@@ -429,7 +438,6 @@ static int atmel_pwm_probe(struct platform_device *pdev)
atmel_pwm->chip.ops = &atmel_pwm_ops;
atmel_pwm->chip.of_xlate = of_pwm_xlate_with_flags;
atmel_pwm->chip.of_pwm_n_cells = 3;
- atmel_pwm->chip.base = -1;
atmel_pwm->chip.npwm = 4;
ret = pwmchip_add(&atmel_pwm->chip);
@@ -451,10 +459,12 @@ static int atmel_pwm_remove(struct platform_device *pdev)
{
struct atmel_pwm_chip *atmel_pwm = platform_get_drvdata(pdev);
+ pwmchip_remove(&atmel_pwm->chip);
+
clk_unprepare(atmel_pwm->clk);
mutex_destroy(&atmel_pwm->isr_lock);
- return pwmchip_remove(&atmel_pwm->chip);
+ return 0;
}
static struct platform_driver atmel_pwm_driver = {
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index f4853c4a2d75..edd2ce1760ab 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -209,7 +209,6 @@ static int iproc_pwmc_probe(struct platform_device *pdev)
ip->chip.dev = &pdev->dev;
ip->chip.ops = &iproc_pwm_ops;
- ip->chip.base = -1;
ip->chip.npwm = 4;
ip->chip.of_xlate = of_pwm_xlate_with_flags;
ip->chip.of_pwm_n_cells = 3;
@@ -254,9 +253,11 @@ static int iproc_pwmc_remove(struct platform_device *pdev)
{
struct iproc_pwmc *ip = platform_get_drvdata(pdev);
+ pwmchip_remove(&ip->chip);
+
clk_disable_unprepare(ip->clk);
- return pwmchip_remove(&ip->chip);
+ return 0;
}
static const struct of_device_id bcm_iproc_pwmc_dt[] = {
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 578b3621c97e..800b9edf2e71 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -271,7 +271,6 @@ static int kona_pwmc_probe(struct platform_device *pdev)
kp->chip.dev = &pdev->dev;
kp->chip.ops = &kona_pwm_ops;
- kp->chip.base = -1;
kp->chip.npwm = 6;
kp->chip.of_xlate = of_pwm_xlate_with_flags;
kp->chip.of_pwm_n_cells = 3;
@@ -301,7 +300,7 @@ static int kona_pwmc_probe(struct platform_device *pdev)
clk_disable_unprepare(kp->clk);
- ret = pwmchip_add_with_polarity(&kp->chip, PWM_POLARITY_INVERSED);
+ ret = pwmchip_add(&kp->chip);
if (ret < 0)
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
@@ -311,11 +310,6 @@ static int kona_pwmc_probe(struct platform_device *pdev)
static int kona_pwmc_remove(struct platform_device *pdev)
{
struct kona_pwmc *kp = platform_get_drvdata(pdev);
- unsigned int chan;
-
- for (chan = 0; chan < kp->chip.npwm; chan++)
- if (pwm_is_enabled(&kp->chip.pwms[chan]))
- clk_disable_unprepare(kp->clk);
return pwmchip_remove(&kp->chip);
}
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index 6ff5f04b3e07..fc240d5b8121 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -64,8 +64,9 @@ static int bcm2835_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
unsigned long rate = clk_get_rate(pc->clk);
- unsigned long long period;
- unsigned long scaler;
+ unsigned long long period_cycles;
+ u64 max_period;
+
u32 val;
if (!rate) {
@@ -73,18 +74,36 @@ static int bcm2835_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return -EINVAL;
}
- scaler = DIV_ROUND_CLOSEST(NSEC_PER_SEC, rate);
+ /*
+ * period_cycles must be a 32 bit value, so period * rate / NSEC_PER_SEC
+ * must be <= U32_MAX. As U32_MAX * NSEC_PER_SEC < U64_MAX the
+ * multiplication period * rate doesn't overflow.
+ * To calculate the maximal possible period that guarantees the
+ * above inequality:
+ *
+ * round(period * rate / NSEC_PER_SEC) <= U32_MAX
+ * <=> period * rate / NSEC_PER_SEC < U32_MAX + 0.5
+ * <=> period * rate < (U32_MAX + 0.5) * NSEC_PER_SEC
+ * <=> period < ((U32_MAX + 0.5) * NSEC_PER_SEC) / rate
+ * <=> period < ((U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC/2) / rate
+ * <=> period <= ceil((U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC/2) / rate) - 1
+ */
+ max_period = DIV_ROUND_UP_ULL((u64)U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC / 2, rate) - 1;
+
+ if (state->period > max_period)
+ return -EINVAL;
+
/* set period */
- period = DIV_ROUND_CLOSEST_ULL(state->period, scaler);
+ period_cycles = DIV_ROUND_CLOSEST_ULL(state->period * rate, NSEC_PER_SEC);
- /* dont accept a period that is too small or has been truncated */
- if ((period < PERIOD_MIN) || (period > U32_MAX))
+ /* don't accept a period that is too small */
+ if (period_cycles < PERIOD_MIN)
return -EINVAL;
- writel(period, pc->base + PERIOD(pwm->hwpwm));
+ writel(period_cycles, pc->base + PERIOD(pwm->hwpwm));
/* set duty cycle */
- val = DIV_ROUND_CLOSEST_ULL(state->duty_cycle, scaler);
+ val = DIV_ROUND_CLOSEST_ULL(state->duty_cycle * rate, NSEC_PER_SEC);
writel(val, pc->base + DUTY(pwm->hwpwm));
/* set polarity */
@@ -139,7 +158,6 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &bcm2835_pwm_ops;
- pc->chip.base = -1;
pc->chip.npwm = 2;
pc->chip.of_xlate = of_pwm_xlate_with_flags;
pc->chip.of_pwm_n_cells = 3;
@@ -161,9 +179,11 @@ static int bcm2835_pwm_remove(struct platform_device *pdev)
{
struct bcm2835_pwm *pc = platform_get_drvdata(pdev);
+ pwmchip_remove(&pc->chip);
+
clk_disable_unprepare(pc->clk);
- return pwmchip_remove(&pc->chip);
+ return 0;
}
static const struct of_device_id bcm2835_pwm_of_match[] = {
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index fe405289e582..acb6fbc3cc32 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -206,7 +206,6 @@ static int berlin_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &berlin_pwm_ops;
- pwm->chip.base = -1;
pwm->chip.npwm = 4;
pwm->chip.of_xlate = of_pwm_xlate_with_flags;
pwm->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index 8b66f9d2f589..8b1d1e7aa856 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -258,7 +258,6 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
p->chip.dev = &pdev->dev;
p->chip.ops = &brcmstb_pwm_ops;
- p->chip.base = -1;
p->chip.npwm = 2;
p->base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index cb1af86873ee..f3d17a590305 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -128,7 +128,6 @@ static int clps711x_pwm_probe(struct platform_device *pdev)
priv->chip.ops = &clps711x_pwm_ops;
priv->chip.dev = &pdev->dev;
- priv->chip.base = -1;
priv->chip.npwm = 2;
priv->chip.of_xlate = clps711x_pwm_xlate;
priv->chip.of_pwm_n_cells = 1;
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
index 1e2276808b7a..02522a9a3073 100644
--- a/drivers/pwm/pwm-crc.c
+++ b/drivers/pwm/pwm-crc.c
@@ -168,7 +168,6 @@ static int crystalcove_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &crc_pwm_ops;
- pwm->chip.base = -1;
pwm->chip.npwm = 1;
/* get the PMIC regmap */
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index c1c337969e4e..9fffb566af5f 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -124,6 +124,9 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->period != EC_PWM_MAX_DUTY)
return -EINVAL;
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
/*
* EC doesn't separate the concept of duty cycle and enabled, but
* kernel does. Translate.
@@ -253,7 +256,6 @@ static int cros_ec_pwm_probe(struct platform_device *pdev)
chip->ops = &cros_ec_pwm_ops;
chip->of_xlate = cros_ec_pwm_xlate;
chip->of_pwm_n_cells = 1;
- chip->base = -1;
ret = cros_ec_num_pwms(ec);
if (ret < 0) {
dev_err(dev, "Couldn't find PWMs: %d\n", ret);
diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
index f6c98e0d57c2..7568300bb11e 100644
--- a/drivers/pwm/pwm-dwc.c
+++ b/drivers/pwm/pwm-dwc.c
@@ -233,7 +233,6 @@ static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
dwc->chip.dev = dev;
dwc->chip.ops = &dwc_pwm_ops;
dwc->chip.npwm = DWC_TIMERS_TOTAL;
- dwc->chip.base = -1;
ret = pwmchip_add(&dwc->chip);
if (ret)
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index c9fc6f223640..4ca70794ad96 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -185,7 +185,6 @@ static int ep93xx_pwm_probe(struct platform_device *pdev)
ep93xx_pwm->chip.dev = &pdev->dev;
ep93xx_pwm->chip.ops = &ep93xx_pwm_ops;
- ep93xx_pwm->chip.base = -1;
ep93xx_pwm->chip.npwm = 1;
ret = pwmchip_add(&ep93xx_pwm->chip);
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 2a6801226aba..0e1ae9469eda 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -453,7 +453,6 @@ static int fsl_pwm_probe(struct platform_device *pdev)
fpc->chip.ops = &fsl_pwm_ops;
fpc->chip.of_xlate = of_pwm_xlate_with_flags;
fpc->chip.of_pwm_n_cells = 3;
- fpc->chip.base = -1;
fpc->chip.npwm = 8;
ret = pwmchip_add(&fpc->chip);
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index a1900d0a872e..82d17fc75c21 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -205,7 +205,6 @@ static int hibvt_pwm_probe(struct platform_device *pdev)
pwm_chip->chip.ops = &hibvt_pwm_ops;
pwm_chip->chip.dev = &pdev->dev;
- pwm_chip->chip.base = -1;
pwm_chip->chip.npwm = soc->num_pwms;
pwm_chip->chip.of_xlate = of_pwm_xlate_with_flags;
pwm_chip->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 6faf5b5a5584..cc37054589cc 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -304,7 +304,6 @@ static int img_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &img_pwm_ops;
- pwm->chip.base = -1;
pwm->chip.npwm = IMG_PWM_NPWM;
ret = pwmchip_add(&pwm->chip);
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index aaf629bd8c35..97c9133b6876 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -363,7 +363,6 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
tpm->chip.dev = &pdev->dev;
tpm->chip.ops = &imx_tpm_pwm_ops;
- tpm->chip.base = -1;
tpm->chip.of_xlate = of_pwm_xlate_with_flags;
tpm->chip.of_pwm_n_cells = 3;
@@ -411,9 +410,7 @@ static int __maybe_unused pwm_imx_tpm_resume(struct device *dev)
ret = clk_prepare_enable(tpm->clk);
if (ret)
- dev_err(dev,
- "failed to prepare or enable clock: %d\n",
- ret);
+ dev_err(dev, "failed to prepare or enable clock: %d\n", ret);
return ret;
}
diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c
index 727e0d3e249e..c957b365448e 100644
--- a/drivers/pwm/pwm-imx1.c
+++ b/drivers/pwm/pwm-imx1.c
@@ -155,7 +155,6 @@ static int pwm_imx1_probe(struct platform_device *pdev)
imx->chip.ops = &pwm_imx1_ops;
imx->chip.dev = &pdev->dev;
- imx->chip.base = -1;
imx->chip.npwm = 1;
imx->mmio_base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index 18055326a2f3..ba695115c160 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -327,7 +327,6 @@ static int pwm_imx27_probe(struct platform_device *pdev)
imx->chip.ops = &pwm_imx27_ops;
imx->chip.dev = &pdev->dev;
- imx->chip.base = -1;
imx->chip.npwm = 1;
imx->chip.of_xlate = of_pwm_xlate_with_flags;
diff --git a/drivers/pwm/pwm-intel-lgm.c b/drivers/pwm/pwm-intel-lgm.c
index e9e54dda07aa..015f5eba09a1 100644
--- a/drivers/pwm/pwm-intel-lgm.c
+++ b/drivers/pwm/pwm-intel-lgm.c
@@ -207,7 +207,6 @@ static int lgm_pwm_probe(struct platform_device *pdev)
pc->chip.dev = dev;
pc->chip.ops = &lgm_pwm_ops;
pc->chip.npwm = 1;
- pc->chip.base = -1;
lgm_pwm_init(pc);
diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
index 957b972c458b..6c6e26d18329 100644
--- a/drivers/pwm/pwm-iqs620a.c
+++ b/drivers/pwm/pwm-iqs620a.c
@@ -206,7 +206,6 @@ static int iqs620_pwm_probe(struct platform_device *pdev)
iqs620_pwm->chip.dev = &pdev->dev;
iqs620_pwm->chip.ops = &iqs620_pwm_ops;
- iqs620_pwm->chip.base = -1;
iqs620_pwm->chip.npwm = 1;
mutex_init(&iqs620_pwm->lock);
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 00c642fa2eed..5b6bdcdcecf5 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -244,7 +244,6 @@ static int jz4740_pwm_probe(struct platform_device *pdev)
jz4740->chip.dev = dev;
jz4740->chip.ops = &jz4740_pwm_ops;
jz4740->chip.npwm = info->num_pwms;
- jz4740->chip.base = -1;
jz4740->chip.of_xlate = of_pwm_xlate_with_flags;
jz4740->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-keembay.c b/drivers/pwm/pwm-keembay.c
index cdfdef66ff8e..521a825c8ba0 100644
--- a/drivers/pwm/pwm-keembay.c
+++ b/drivers/pwm/pwm-keembay.c
@@ -203,7 +203,6 @@ static int keembay_pwm_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->chip.base = -1;
priv->chip.dev = dev;
priv->chip.ops = &keembay_pwm_ops;
priv->chip.npwm = KMB_TOTAL_PWM_CHANNELS;
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index bf3f14fb5f24..7551253ada32 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -275,7 +275,6 @@ static int lp3943_pwm_probe(struct platform_device *pdev)
lp3943_pwm->chip.dev = &pdev->dev;
lp3943_pwm->chip.ops = &lp3943_pwm_ops;
lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
- lp3943_pwm->chip.base = -1;
platform_set_drvdata(pdev, lp3943_pwm);
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index 7ef40243eb6c..b643ac61a2e7 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -370,7 +370,6 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
lpc18xx_pwm->chip.dev = &pdev->dev;
lpc18xx_pwm->chip.ops = &lpc18xx_pwm_ops;
- lpc18xx_pwm->chip.base = -1;
lpc18xx_pwm->chip.npwm = 16;
lpc18xx_pwm->chip.of_xlate = of_pwm_xlate_with_flags;
lpc18xx_pwm->chip.of_pwm_n_cells = 3;
@@ -442,13 +441,15 @@ static int lpc18xx_pwm_remove(struct platform_device *pdev)
struct lpc18xx_pwm_chip *lpc18xx_pwm = platform_get_drvdata(pdev);
u32 val;
+ pwmchip_remove(&lpc18xx_pwm->chip);
+
val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL,
val | LPC18XX_PWM_CTRL_HALT);
clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
- return pwmchip_remove(&lpc18xx_pwm->chip);
+ return 0;
}
static struct platform_driver lpc18xx_pwm_driver = {
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 6b4090436c06..2834a0f001d3 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -116,7 +116,6 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
lpc32xx->chip.dev = &pdev->dev;
lpc32xx->chip.ops = &lpc32xx_pwm_ops;
lpc32xx->chip.npwm = 1;
- lpc32xx->chip.base = -1;
ret = pwmchip_add(&lpc32xx->chip);
if (ret < 0) {
@@ -137,10 +136,6 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
static int lpc32xx_pwm_remove(struct platform_device *pdev)
{
struct lpc32xx_pwm_chip *lpc32xx = platform_get_drvdata(pdev);
- unsigned int i;
-
- for (i = 0; i < lpc32xx->chip.npwm; i++)
- pwm_disable(&lpc32xx->chip.pwms[i]);
return pwmchip_remove(&lpc32xx->chip);
}
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 939de93c157b..58b4031524af 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -234,7 +234,6 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
lpwm->chip.dev = dev;
lpwm->chip.ops = &pwm_lpss_ops;
- lpwm->chip.base = -1;
lpwm->chip.npwm = info->npwm;
ret = pwmchip_add(&lpwm->chip);
@@ -255,12 +254,6 @@ EXPORT_SYMBOL_GPL(pwm_lpss_probe);
int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
{
- int i;
-
- for (i = 0; i < lpwm->info->npwm; i++) {
- if (pwm_is_enabled(&lpwm->chip.pwms[i]))
- pm_runtime_put(lpwm->chip.dev);
- }
return pwmchip_remove(&lpwm->chip);
}
EXPORT_SYMBOL_GPL(pwm_lpss_remove);
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index fcfc3b147e5f..b4a31060bcd7 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -107,12 +107,6 @@ static void pwm_mediatek_clk_disable(struct pwm_chip *chip,
clk_disable_unprepare(pc->clk_top);
}
-static inline u32 pwm_mediatek_readl(struct pwm_mediatek_chip *chip,
- unsigned int num, unsigned int offset)
-{
- return readl(chip->regs + pwm_mediatek_reg_offset[num] + offset);
-}
-
static inline void pwm_mediatek_writel(struct pwm_mediatek_chip *chip,
unsigned int num, unsigned int offset,
u32 value)
@@ -263,7 +257,6 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &pwm_mediatek_ops;
- pc->chip.base = -1;
pc->chip.npwm = pc->soc->num_pwms;
ret = pwmchip_add(&pc->chip);
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index a3ce9789412a..9eb060613cb4 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -550,7 +550,6 @@ static int meson_pwm_probe(struct platform_device *pdev)
spin_lock_init(&meson->lock);
meson->chip.dev = &pdev->dev;
meson->chip.ops = &meson_pwm_ops;
- meson->chip.base = -1;
meson->chip.npwm = MESON_NUM_PWMS;
meson->chip.of_xlate = of_pwm_xlate_with_flags;
meson->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 87c6b4bc5d43..9b3ba401a3db 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -202,7 +202,6 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
mdp->chip.dev = &pdev->dev;
mdp->chip.ops = &mtk_disp_pwm_ops;
- mdp->chip.base = -1;
mdp->chip.npwm = 1;
ret = pwmchip_add(&mdp->chip);
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index 7ce616923c52..0266e84e982c 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -140,7 +140,6 @@ static int mxs_pwm_probe(struct platform_device *pdev)
mxs->chip.ops = &mxs_pwm_ops;
mxs->chip.of_xlate = of_pwm_xlate_with_flags;
mxs->chip.of_pwm_n_cells = 3;
- mxs->chip.base = -1;
ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm);
if (ret < 0) {
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 358db4ff9d4f..612b3c859295 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -403,7 +403,6 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
omap->chip.dev = &pdev->dev;
omap->chip.ops = &pwm_omap_dmtimer_ops;
- omap->chip.base = -1;
omap->chip.npwm = 1;
omap->chip.of_xlate = of_pwm_xlate_with_flags;
omap->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 4a55dc18656c..7c9f174de64e 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -51,7 +51,6 @@
#define PCA9685_PRESCALE_MAX 0xFF /* => min. frequency of 24 Hz */
#define PCA9685_COUNTER_RANGE 4096
-#define PCA9685_DEFAULT_PERIOD 5000000 /* Default period_ns = 1/200 Hz */
#define PCA9685_OSC_CLOCK_MHZ 25 /* Internal oscillator with 25 MHz */
#define PCA9685_NUMREGS 0xFF
@@ -71,10 +70,14 @@
#define LED_N_OFF_H(N) (PCA9685_LEDX_OFF_H + (4 * (N)))
#define LED_N_OFF_L(N) (PCA9685_LEDX_OFF_L + (4 * (N)))
+#define REG_ON_H(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_ON_H : LED_N_ON_H((C)))
+#define REG_ON_L(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_ON_L : LED_N_ON_L((C)))
+#define REG_OFF_H(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_OFF_H : LED_N_OFF_H((C)))
+#define REG_OFF_L(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_OFF_L : LED_N_OFF_L((C)))
+
struct pca9685 {
struct pwm_chip chip;
struct regmap *regmap;
- int period_ns;
#if IS_ENABLED(CONFIG_GPIOLIB)
struct mutex lock;
struct gpio_chip gpio;
@@ -87,6 +90,53 @@ static inline struct pca9685 *to_pca(struct pwm_chip *chip)
return container_of(chip, struct pca9685, chip);
}
+/* Helper function to set the duty cycle ratio to duty/4096 (e.g. duty=2048 -> 50%) */
+static void pca9685_pwm_set_duty(struct pca9685 *pca, int channel, unsigned int duty)
+{
+ if (duty == 0) {
+ /* Set the full OFF bit, which has the highest precedence */
+ regmap_write(pca->regmap, REG_OFF_H(channel), LED_FULL);
+ } else if (duty >= PCA9685_COUNTER_RANGE) {
+ /* Set the full ON bit and clear the full OFF bit */
+ regmap_write(pca->regmap, REG_ON_H(channel), LED_FULL);
+ regmap_write(pca->regmap, REG_OFF_H(channel), 0);
+ } else {
+ /* Set OFF time (clears the full OFF bit) */
+ regmap_write(pca->regmap, REG_OFF_L(channel), duty & 0xff);
+ regmap_write(pca->regmap, REG_OFF_H(channel), (duty >> 8) & 0xf);
+ /* Clear the full ON bit */
+ regmap_write(pca->regmap, REG_ON_H(channel), 0);
+ }
+}
+
+static unsigned int pca9685_pwm_get_duty(struct pca9685 *pca, int channel)
+{
+ unsigned int off_h = 0, val = 0;
+
+ if (WARN_ON(channel >= PCA9685_MAXCHAN)) {
+ /* HW does not support reading state of "all LEDs" channel */
+ return 0;
+ }
+
+ regmap_read(pca->regmap, LED_N_OFF_H(channel), &off_h);
+ if (off_h & LED_FULL) {
+ /* Full OFF bit is set */
+ return 0;
+ }
+
+ regmap_read(pca->regmap, LED_N_ON_H(channel), &val);
+ if (val & LED_FULL) {
+ /* Full ON bit is set */
+ return PCA9685_COUNTER_RANGE;
+ }
+
+ if (regmap_read(pca->regmap, LED_N_OFF_L(channel), &val)) {
+ /* Reset val to 0 in case reading LED_N_OFF_L failed */
+ val = 0;
+ }
+ return ((off_h & 0xf) << 8) | (val & 0xff);
+}
+
#if IS_ENABLED(CONFIG_GPIOLIB)
static bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, int pwm_idx)
{
@@ -138,34 +188,23 @@ static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset)
{
struct pca9685 *pca = gpiochip_get_data(gpio);
- struct pwm_device *pwm = &pca->chip.pwms[offset];
- unsigned int value;
-
- regmap_read(pca->regmap, LED_N_ON_H(pwm->hwpwm), &value);
- return value & LED_FULL;
+ return pca9685_pwm_get_duty(pca, offset) != 0;
}
static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset,
int value)
{
struct pca9685 *pca = gpiochip_get_data(gpio);
- struct pwm_device *pwm = &pca->chip.pwms[offset];
- unsigned int on = value ? LED_FULL : 0;
- /* Clear both OFF registers */
- regmap_write(pca->regmap, LED_N_OFF_L(pwm->hwpwm), 0);
- regmap_write(pca->regmap, LED_N_OFF_H(pwm->hwpwm), 0);
-
- /* Set the full ON bit */
- regmap_write(pca->regmap, LED_N_ON_H(pwm->hwpwm), on);
+ pca9685_pwm_set_duty(pca, offset, value ? PCA9685_COUNTER_RANGE : 0);
}
static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
{
struct pca9685 *pca = gpiochip_get_data(gpio);
- pca9685_pwm_gpio_set(gpio, offset, 0);
+ pca9685_pwm_set_duty(pca, offset, 0);
pm_runtime_put(pca->chip.dev);
pca9685_pwm_clear_inuse(pca, offset);
}
@@ -246,165 +285,85 @@ static void pca9685_set_sleep_mode(struct pca9685 *pca, bool enable)
}
}
-static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
struct pca9685 *pca = to_pca(chip);
- unsigned long long duty;
- unsigned int reg;
- int prescale;
-
- if (period_ns != pca->period_ns) {
- prescale = DIV_ROUND_CLOSEST(PCA9685_OSC_CLOCK_MHZ * period_ns,
- PCA9685_COUNTER_RANGE * 1000) - 1;
-
- if (prescale >= PCA9685_PRESCALE_MIN &&
- prescale <= PCA9685_PRESCALE_MAX) {
- /*
- * Putting the chip briefly into SLEEP mode
- * at this point won't interfere with the
- * pm_runtime framework, because the pm_runtime
- * state is guaranteed active here.
- */
- /* Put chip into sleep mode */
- pca9685_set_sleep_mode(pca, true);
-
- /* Change the chip-wide output frequency */
- regmap_write(pca->regmap, PCA9685_PRESCALE, prescale);
-
- /* Wake the chip up */
- pca9685_set_sleep_mode(pca, false);
-
- pca->period_ns = period_ns;
- } else {
- dev_err(chip->dev,
- "prescaler not set: period out of bounds!\n");
- return -EINVAL;
- }
- }
+ unsigned long long duty, prescale;
+ unsigned int val = 0;
- if (duty_ns < 1) {
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_H;
- else
- reg = LED_N_OFF_H(pwm->hwpwm);
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
- regmap_write(pca->regmap, reg, LED_FULL);
-
- return 0;
+ prescale = DIV_ROUND_CLOSEST_ULL(PCA9685_OSC_CLOCK_MHZ * state->period,
+ PCA9685_COUNTER_RANGE * 1000) - 1;
+ if (prescale < PCA9685_PRESCALE_MIN || prescale > PCA9685_PRESCALE_MAX) {
+ dev_err(chip->dev, "pwm not changed: period out of bounds!\n");
+ return -EINVAL;
}
- if (duty_ns == period_ns) {
- /* Clear both OFF registers */
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_L;
- else
- reg = LED_N_OFF_L(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, 0x0);
-
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_H;
- else
- reg = LED_N_OFF_H(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, 0x0);
-
- /* Set the full ON bit */
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_ON_H;
- else
- reg = LED_N_ON_H(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, LED_FULL);
-
+ if (!state->enabled) {
+ pca9685_pwm_set_duty(pca, pwm->hwpwm, 0);
return 0;
}
- duty = PCA9685_COUNTER_RANGE * (unsigned long long)duty_ns;
- duty = DIV_ROUND_UP_ULL(duty, period_ns);
-
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_L;
- else
- reg = LED_N_OFF_L(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, (int)duty & 0xff);
-
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_H;
- else
- reg = LED_N_OFF_H(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, ((int)duty >> 8) & 0xf);
+ regmap_read(pca->regmap, PCA9685_PRESCALE, &val);
+ if (prescale != val) {
+ /*
+ * Putting the chip briefly into SLEEP mode
+ * at this point won't interfere with the
+ * pm_runtime framework, because the pm_runtime
+ * state is guaranteed active here.
+ */
+ /* Put chip into sleep mode */
+ pca9685_set_sleep_mode(pca, true);
- /* Clear the full ON bit, otherwise the set OFF time has no effect */
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_ON_H;
- else
- reg = LED_N_ON_H(pwm->hwpwm);
+ /* Change the chip-wide output frequency */
+ regmap_write(pca->regmap, PCA9685_PRESCALE, prescale);
- regmap_write(pca->regmap, reg, 0);
+ /* Wake the chip up */
+ pca9685_set_sleep_mode(pca, false);
+ }
+ duty = PCA9685_COUNTER_RANGE * state->duty_cycle;
+ duty = DIV_ROUND_UP_ULL(duty, state->period);
+ pca9685_pwm_set_duty(pca, pwm->hwpwm, duty);
return 0;
}
-static int pca9685_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static void pca9685_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
{
struct pca9685 *pca = to_pca(chip);
- unsigned int reg;
-
- /*
- * The PWM subsystem does not support a pre-delay.
- * So, set the ON-timeout to 0
- */
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_ON_L;
- else
- reg = LED_N_ON_L(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, 0);
-
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_ON_H;
- else
- reg = LED_N_ON_H(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, 0);
+ unsigned long long duty;
+ unsigned int val = 0;
+ /* Calculate (chip-wide) period from prescale value */
+ regmap_read(pca->regmap, PCA9685_PRESCALE, &val);
/*
- * Clear the full-off bit.
- * It has precedence over the others and must be off.
+ * PCA9685_OSC_CLOCK_MHZ is 25, i.e. an integer divider of 1000.
+ * The following calculation is therefore only a multiplication
+ * and we are not losing precision.
*/
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_H;
- else
- reg = LED_N_OFF_H(pwm->hwpwm);
-
- regmap_update_bits(pca->regmap, reg, LED_FULL, 0x0);
+ state->period = (PCA9685_COUNTER_RANGE * 1000 / PCA9685_OSC_CLOCK_MHZ) *
+ (val + 1);
- return 0;
-}
+ /* The (per-channel) polarity is fixed */
+ state->polarity = PWM_POLARITY_NORMAL;
-static void pca9685_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct pca9685 *pca = to_pca(chip);
- unsigned int reg;
-
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_H;
- else
- reg = LED_N_OFF_H(pwm->hwpwm);
-
- regmap_write(pca->regmap, reg, LED_FULL);
-
- /* Clear the LED_OFF counter. */
- if (pwm->hwpwm >= PCA9685_MAXCHAN)
- reg = PCA9685_ALL_LED_OFF_L;
- else
- reg = LED_N_OFF_L(pwm->hwpwm);
+ if (pwm->hwpwm >= PCA9685_MAXCHAN) {
+ /*
+ * The "all LEDs" channel does not support HW readout
+ * Return 0 and disabled for backwards compatibility
+ */
+ state->duty_cycle = 0;
+ state->enabled = false;
+ return;
+ }
- regmap_write(pca->regmap, reg, 0x0);
+ state->enabled = true;
+ duty = pca9685_pwm_get_duty(pca, pwm->hwpwm);
+ state->duty_cycle = DIV_ROUND_DOWN_ULL(duty * state->period, PCA9685_COUNTER_RANGE);
}
static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -422,15 +381,14 @@ static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pca9685 *pca = to_pca(chip);
- pca9685_pwm_disable(chip, pwm);
+ pca9685_pwm_set_duty(pca, pwm->hwpwm, 0);
pm_runtime_put(chip->dev);
pca9685_pwm_clear_inuse(pca, pwm->hwpwm);
}
static const struct pwm_ops pca9685_pwm_ops = {
- .enable = pca9685_pwm_enable,
- .disable = pca9685_pwm_disable,
- .config = pca9685_pwm_config,
+ .apply = pca9685_pwm_apply,
+ .get_state = pca9685_pwm_get_state,
.request = pca9685_pwm_request,
.free = pca9685_pwm_free,
.owner = THIS_MODULE,
@@ -461,7 +419,6 @@ static int pca9685_pwm_probe(struct i2c_client *client,
ret);
return ret;
}
- pca->period_ns = PCA9685_DEFAULT_PERIOD;
i2c_set_clientdata(client, pca);
@@ -484,16 +441,15 @@ static int pca9685_pwm_probe(struct i2c_client *client,
reg &= ~(MODE1_ALLCALL | MODE1_SUB1 | MODE1_SUB2 | MODE1_SUB3);
regmap_write(pca->regmap, PCA9685_MODE1, reg);
- /* Clear all "full off" bits */
- regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_L, 0);
- regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_H, 0);
+ /* Reset OFF registers to POR default */
+ regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_L, LED_FULL);
+ regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_H, LED_FULL);
pca->chip.ops = &pca9685_pwm_ops;
/* Add an extra channel for ALL_LED */
pca->chip.npwm = PCA9685_MAXCHAN + 1;
pca->chip.dev = &client->dev;
- pca->chip.base = -1;
ret = pwmchip_add(&pca->chip);
if (ret < 0)
@@ -505,14 +461,20 @@ static int pca9685_pwm_probe(struct i2c_client *client,
return ret;
}
- /* The chip comes out of power-up in the active state */
- pm_runtime_set_active(&client->dev);
- /*
- * Enable will put the chip into suspend, which is what we
- * want as all outputs are disabled at this point
- */
pm_runtime_enable(&client->dev);
+ if (pm_runtime_enabled(&client->dev)) {
+ /*
+ * Although the chip comes out of power-up in the sleep state,
+ * we force it to sleep in case it was woken up before
+ */
+ pca9685_set_sleep_mode(pca, true);
+ pm_runtime_set_suspended(&client->dev);
+ } else {
+ /* Wake the chip up if runtime PM is disabled */
+ pca9685_set_sleep_mode(pca, false);
+ }
+
return 0;
}
@@ -524,7 +486,14 @@ static int pca9685_pwm_remove(struct i2c_client *client)
ret = pwmchip_remove(&pca->chip);
if (ret)
return ret;
+
+ if (!pm_runtime_enabled(&client->dev)) {
+ /* Put chip in sleep state if runtime PM is disabled */
+ pca9685_set_sleep_mode(pca, true);
+ }
+
pm_runtime_disable(&client->dev);
+
return 0;
}
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index d06cf60e6575..cfb683827d32 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -184,7 +184,6 @@ static int pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &pxa_pwm_ops;
- pwm->chip.base = -1;
pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
if (IS_ENABLED(CONFIG_OF)) {
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 002ab79a7ec2..9daca0c772c7 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -224,7 +224,6 @@ static int rcar_pwm_probe(struct platform_device *pdev)
rcar_pwm->chip.dev = &pdev->dev;
rcar_pwm->chip.ops = &rcar_pwm_ops;
- rcar_pwm->chip.base = -1;
rcar_pwm->chip.npwm = 1;
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index d02b24b77cdf..e2959fae0969 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -410,7 +410,6 @@ static int tpu_probe(struct platform_device *pdev)
tpu->chip.ops = &tpu_pwm_ops;
tpu->chip.of_xlate = of_pwm_xlate_with_flags;
tpu->chip.of_pwm_n_cells = 3;
- tpu->chip.base = -1;
tpu->chip.npwm = TPU_CHANNEL_MAX;
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index 6ad7d0a50aed..301785fa293e 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -352,7 +352,6 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
pc->data = id->data;
pc->chip.dev = &pdev->dev;
pc->chip.ops = &rockchip_pwm_ops;
- pc->chip.base = -1;
pc->chip.npwm = 1;
if (pc->data->supports_polarity) {
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 645d0066ff0a..515489fa4f6d 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -519,7 +519,6 @@ static int pwm_samsung_probe(struct platform_device *pdev)
chip->chip.dev = &pdev->dev;
chip->chip.ops = &pwm_samsung_ops;
- chip->chip.base = -1;
chip->chip.npwm = SAMSUNG_PWM_NUM;
chip->inverter_mask = BIT(SAMSUNG_PWM_NUM) - 1;
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 2a7cd2deaeea..688737f091ac 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -244,7 +244,6 @@ static int pwm_sifive_probe(struct platform_device *pdev)
chip->ops = &pwm_sifive_ops;
chip->of_xlate = of_pwm_xlate_with_flags;
chip->of_pwm_n_cells = 3;
- chip->base = -1;
chip->npwm = 4;
ddata->regs = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c
index 0b01ec25e2f0..7a69c1a0c060 100644
--- a/drivers/pwm/pwm-sl28cpld.c
+++ b/drivers/pwm/pwm-sl28cpld.c
@@ -229,7 +229,6 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
chip = &priv->pwm_chip;
chip->dev = &pdev->dev;
chip->ops = &sl28cpld_pwm_ops;
- chip->base = -1;
chip->npwm = 1;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index f63b54aae1b4..1a1cedfd11ce 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -193,7 +193,6 @@ static int spear_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &spear_pwm_ops;
- pc->chip.base = -1;
pc->chip.npwm = NUM_PWM;
ret = clk_prepare(pc->clk);
diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
index 5123d948efd6..98c479dfae31 100644
--- a/drivers/pwm/pwm-sprd.c
+++ b/drivers/pwm/pwm-sprd.c
@@ -164,6 +164,9 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *cstate = &pwm->state;
int ret;
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
if (state->enabled) {
if (!cstate->enabled) {
/*
@@ -268,7 +271,6 @@ static int sprd_pwm_probe(struct platform_device *pdev)
spc->chip.dev = &pdev->dev;
spc->chip.ops = &sprd_pwm_ops;
- spc->chip.base = -1;
spc->chip.npwm = spc->num_pwms;
ret = pwmchip_add(&spc->chip);
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index 99c70e07858d..f491d56254d7 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -619,7 +619,6 @@ static int sti_pwm_probe(struct platform_device *pdev)
pc->chip.dev = dev;
pc->chip.ops = &sti_pwm_ops;
- pc->chip.base = -1;
pc->chip.npwm = pc->cdata->pwm_num_devs;
ret = pwmchip_add(&pc->chip);
@@ -650,15 +649,13 @@ static int sti_pwm_probe(struct platform_device *pdev)
static int sti_pwm_remove(struct platform_device *pdev)
{
struct sti_pwm_chip *pc = platform_get_drvdata(pdev);
- unsigned int i;
- for (i = 0; i < pc->cdata->pwm_num_devs; i++)
- pwm_disable(&pc->chip.pwms[i]);
+ pwmchip_remove(&pc->chip);
clk_unprepare(pc->pwm_clk);
clk_unprepare(pc->cpt_clk);
- return pwmchip_remove(&pc->chip);
+ return 0;
}
static const struct of_device_id sti_pwm_of_match[] = {
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 134c14621ee0..af08f564ef1d 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -205,7 +205,6 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
- priv->chip.base = -1;
priv->chip.dev = &pdev->dev;
priv->chip.ops = &stm32_pwm_lp_ops;
priv->chip.npwm = 1;
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index d3be944f2ae9..c46fb90036ab 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -633,7 +633,6 @@ static int stm32_pwm_probe(struct platform_device *pdev)
stm32_pwm_detect_complementary(priv);
- priv->chip.base = -1;
priv->chip.dev = dev;
priv->chip.ops = &stm32pwm_ops;
priv->chip.npwm = stm32_pwm_detect_channels(priv);
diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c
index be5f6d7359d4..9dc983a3cbf1 100644
--- a/drivers/pwm/pwm-stmpe.c
+++ b/drivers/pwm/pwm-stmpe.c
@@ -278,7 +278,6 @@ static int __init stmpe_pwm_probe(struct platform_device *pdev)
pwm->stmpe = stmpe;
pwm->chip.dev = &pdev->dev;
- pwm->chip.base = -1;
if (stmpe->partnum == STMPE2401 || stmpe->partnum == STMPE2403) {
pwm->chip.ops = &stmpe_24xx_pwm_ops;
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index ce5c4fc8da6f..e01becd102c0 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -459,7 +459,6 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &sun4i_pwm_ops;
- pwm->chip.base = -1;
pwm->chip.npwm = pwm->data->npwm;
pwm->chip.of_xlate = of_pwm_xlate_with_flags;
pwm->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 55bc63d5a0ae..c529a170bcdd 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -285,7 +285,6 @@ static int tegra_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &tegra_pwm_ops;
- pwm->chip.base = -1;
pwm->chip.npwm = pwm->soc->num_channels;
ret = pwmchip_add(&pwm->chip);
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 2a8949014bb1..b9a17ab0c202 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -226,7 +226,6 @@ static int ecap_pwm_probe(struct platform_device *pdev)
pc->chip.ops = &ecap_pwm_ops;
pc->chip.of_xlate = of_pwm_xlate_with_flags;
pc->chip.of_pwm_n_cells = 3;
- pc->chip.base = -1;
pc->chip.npwm = 1;
pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index a7fb224d6535..90095a19bf2d 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -449,7 +449,6 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
pc->chip.ops = &ehrpwm_pwm_ops;
pc->chip.of_xlate = of_pwm_xlate_with_flags;
pc->chip.of_pwm_n_cells = 3;
- pc->chip.base = -1;
pc->chip.npwm = NUM_PWM_CHANNEL;
pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index 630b9a578820..6c8df5f4e87d 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -291,7 +291,6 @@ static int twl_pwmled_probe(struct platform_device *pdev)
}
twl->chip.dev = &pdev->dev;
- twl->chip.base = -1;
mutex_init(&twl->mutex);
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index aee67974f353..e83a826bf621 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -310,7 +310,6 @@ static int twl_pwm_probe(struct platform_device *pdev)
twl->chip.ops = &twl6030_pwm_ops;
twl->chip.dev = &pdev->dev;
- twl->chip.base = -1;
twl->chip.npwm = 2;
mutex_init(&twl->mutex);
diff --git a/drivers/pwm/pwm-visconti.c b/drivers/pwm/pwm-visconti.c
new file mode 100644
index 000000000000..46d903786366
--- /dev/null
+++ b/drivers/pwm/pwm-visconti.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Toshiba Visconti pulse-width-modulation controller driver
+ *
+ * Copyright (c) 2020 - 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2020 - 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Authors: Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ *
+ * Limitations:
+ * - The fixed input clock is running at 1 MHz and is divided by either 1,
+ * 2, 4 or 8.
+ * - When the settings of the PWM are modified, the new values are shadowed
+ * in hardware until the PIPGM_PCSR register is written and the currently
+ * running period is completed. This way the hardware switches atomically
+ * from the old setting to the new.
+ * - Disabling the hardware completes the currently running period and keeps
+ * the output at low level at all times.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+#define PIPGM_PCSR(ch) (0x400 + 4 * (ch))
+#define PIPGM_PDUT(ch) (0x420 + 4 * (ch))
+#define PIPGM_PWMC(ch) (0x440 + 4 * (ch))
+
+#define PIPGM_PWMC_PWMACT BIT(5)
+#define PIPGM_PWMC_CLK_MASK GENMASK(1, 0)
+#define PIPGM_PWMC_POLARITY_MASK GENMASK(5, 5)
+
+struct visconti_pwm_chip {
+ struct pwm_chip chip;
+ void __iomem *base;
+};
+
+static inline struct visconti_pwm_chip *visconti_pwm_from_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct visconti_pwm_chip, chip);
+}
+
+static int visconti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct visconti_pwm_chip *priv = visconti_pwm_from_chip(chip);
+ u32 period, duty_cycle, pwmc0;
+
+ if (!state->enabled) {
+ writel(0, priv->base + PIPGM_PCSR(pwm->hwpwm));
+ return 0;
+ }
+
+ /*
+ * The biggest period the hardware can provide is
+ * (0xffff << 3) * 1000 ns
+ * This value fits easily in an u32, so simplify the maths by
+ * capping the values to 32 bit integers.
+ */
+ if (state->period > (0xffff << 3) * 1000)
+ period = (0xffff << 3) * 1000;
+ else
+ period = state->period;
+
+ if (state->duty_cycle > period)
+ duty_cycle = period;
+ else
+ duty_cycle = state->duty_cycle;
+
+ /*
+ * The input clock runs fixed at 1 MHz, so we have only
+ * microsecond resolution and so can divide by
+ * NSEC_PER_SEC / CLKFREQ = 1000 without losing precision.
+ */
+ period /= 1000;
+ duty_cycle /= 1000;
+
+ if (!period)
+ return -ERANGE;
+
+ /*
+ * PWMC controls a divider that divides the input clk by a
+ * power of two between 1 and 8. As a smaller divider yields
+ * higher precision, pick the smallest possible one.
+ */
+ if (period > 0xffff) {
+ pwmc0 = ilog2(period >> 16);
+ if (WARN_ON(pwmc0 > 3))
+ return -EINVAL;
+ } else {
+ pwmc0 = 0;
+ }
+
+ period >>= pwmc0;
+ duty_cycle >>= pwmc0;
+
+ if (state->polarity == PWM_POLARITY_INVERSED)
+ pwmc0 |= PIPGM_PWMC_PWMACT;
+ writel(pwmc0, priv->base + PIPGM_PWMC(pwm->hwpwm));
+ writel(duty_cycle, priv->base + PIPGM_PDUT(pwm->hwpwm));
+ writel(period, priv->base + PIPGM_PCSR(pwm->hwpwm));
+
+ return 0;
+}
+
+static void visconti_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct visconti_pwm_chip *priv = visconti_pwm_from_chip(chip);
+ u32 period, duty, pwmc0, pwmc0_clk;
+
+ period = readl(priv->base + PIPGM_PCSR(pwm->hwpwm));
+ duty = readl(priv->base + PIPGM_PDUT(pwm->hwpwm));
+ pwmc0 = readl(priv->base + PIPGM_PWMC(pwm->hwpwm));
+ pwmc0_clk = pwmc0 & PIPGM_PWMC_CLK_MASK;
+
+ state->period = (period << pwmc0_clk) * NSEC_PER_USEC;
+ state->duty_cycle = (duty << pwmc0_clk) * NSEC_PER_USEC;
+ if (pwmc0 & PIPGM_PWMC_POLARITY_MASK)
+ state->polarity = PWM_POLARITY_INVERSED;
+ else
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ state->enabled = true;
+}
+
+static const struct pwm_ops visconti_pwm_ops = {
+ .apply = visconti_pwm_apply,
+ .get_state = visconti_pwm_get_state,
+ .owner = THIS_MODULE,
+};
+
+static int visconti_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct visconti_pwm_chip *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->chip.dev = dev;
+ priv->chip.ops = &visconti_pwm_ops;
+ priv->chip.npwm = 4;
+
+ ret = pwmchip_add(&priv->chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Cannot register visconti PWM\n");
+
+ return 0;
+}
+
+static int visconti_pwm_remove(struct platform_device *pdev)
+{
+ struct visconti_pwm_chip *priv = platform_get_drvdata(pdev);
+
+ pwmchip_remove(&priv->chip);
+
+ return 0;
+}
+
+static const struct of_device_id visconti_pwm_of_match[] = {
+ { .compatible = "toshiba,visconti-pwm", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, visconti_pwm_of_match);
+
+static struct platform_driver visconti_pwm_driver = {
+ .driver = {
+ .name = "pwm-visconti",
+ .of_match_table = visconti_pwm_of_match,
+ },
+ .probe = visconti_pwm_probe,
+ .remove = visconti_pwm_remove,
+};
+module_platform_driver(visconti_pwm_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>");
+MODULE_ALIAS("platform:pwm-visconti");
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 6e36851a22bb..52fe5d19473a 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -209,7 +209,6 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
chip->chip.ops = &vt8500_pwm_ops;
chip->chip.of_xlate = of_pwm_xlate_with_flags;
chip->chip.of_pwm_n_cells = 3;
- chip->chip.base = -1;
chip->chip.npwm = VT8500_NR_PWMS;
chip->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 15d1574d129b..e68fcedc999c 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -24,11 +24,12 @@ config REMOTEPROC_CDEV
It's safe to say N if you don't want to use this interface.
config IMX_REMOTEPROC
- tristate "IMX6/7 remoteproc support"
+ tristate "i.MX remoteproc support"
depends on ARCH_MXC
+ select MAILBOX
help
- Say y here to support iMX's remote processors (Cortex M4
- on iMX7D) via the remote processor framework.
+ Say y here to support iMX's remote processors via the remote
+ processor framework.
It's safe to say N here.
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 8957ed271d20..d6338872c6db 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -7,13 +7,18 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
+#include <linux/workqueue.h>
+
+#include "remoteproc_internal.h"
#define IMX7D_SRC_SCR 0x0C
#define IMX7D_ENABLE_M4 BIT(3)
@@ -43,7 +48,7 @@
| IMX6SX_SW_M4C_NON_SCLR_RST \
| IMX6SX_SW_M4C_RST)
-#define IMX7D_RPROC_MEM_MAX 8
+#define IMX_RPROC_MEM_MAX 32
/**
* struct imx_rproc_mem - slim internal memory structure
@@ -83,8 +88,42 @@ struct imx_rproc {
struct regmap *regmap;
struct rproc *rproc;
const struct imx_rproc_dcfg *dcfg;
- struct imx_rproc_mem mem[IMX7D_RPROC_MEM_MAX];
+ struct imx_rproc_mem mem[IMX_RPROC_MEM_MAX];
struct clk *clk;
+ struct mbox_client cl;
+ struct mbox_chan *tx_ch;
+ struct mbox_chan *rx_ch;
+ struct work_struct rproc_work;
+ struct workqueue_struct *workqueue;
+ void __iomem *rsc_table;
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx8mq[] = {
+ /* dev addr , sys addr , size , flags */
+ /* TCML - alias */
+ { 0x00000000, 0x007e0000, 0x00020000, 0 },
+ /* OCRAM_S */
+ { 0x00180000, 0x00180000, 0x00008000, 0 },
+ /* OCRAM */
+ { 0x00900000, 0x00900000, 0x00020000, 0 },
+ /* OCRAM */
+ { 0x00920000, 0x00920000, 0x00020000, 0 },
+ /* QSPI Code - alias */
+ { 0x08000000, 0x08000000, 0x08000000, 0 },
+ /* DDR (Code) - alias */
+ { 0x10000000, 0x80000000, 0x0FFE0000, 0 },
+ /* TCML */
+ { 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN },
+ /* TCMU */
+ { 0x20000000, 0x00800000, 0x00020000, ATT_OWN },
+ /* OCRAM_S */
+ { 0x20180000, 0x00180000, 0x00008000, ATT_OWN },
+ /* OCRAM */
+ { 0x20200000, 0x00900000, 0x00020000, ATT_OWN },
+ /* OCRAM */
+ { 0x20220000, 0x00920000, 0x00020000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x40000000, 0x40000000, 0x80000000, 0 },
};
static const struct imx_rproc_att imx_rproc_att_imx7d[] = {
@@ -137,6 +176,15 @@ static const struct imx_rproc_att imx_rproc_att_imx6sx[] = {
{ 0x80000000, 0x80000000, 0x60000000, 0 },
};
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
+ .src_reg = IMX7D_SRC_SCR,
+ .src_mask = IMX7D_M4_RST_MASK,
+ .src_start = IMX7D_M4_START,
+ .src_stop = IMX7D_M4_STOP,
+ .att = imx_rproc_att_imx8mq,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8mq),
+};
+
static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
.src_reg = IMX7D_SRC_SCR,
.src_mask = IMX7D_M4_RST_MASK,
@@ -208,7 +256,7 @@ static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da,
return -ENOENT;
}
-static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct imx_rproc *priv = rproc->priv;
void *va = NULL;
@@ -225,7 +273,7 @@ static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
if (imx_rproc_da_to_sys(priv, da, len, &sys))
return NULL;
- for (i = 0; i < IMX7D_RPROC_MEM_MAX; i++) {
+ for (i = 0; i < IMX_RPROC_MEM_MAX; i++) {
if (sys >= priv->mem[i].sys_addr && sys + len <
priv->mem[i].sys_addr + priv->mem[i].size) {
unsigned int offset = sys - priv->mem[i].sys_addr;
@@ -241,10 +289,143 @@ static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
return va;
}
+static int imx_rproc_mem_alloc(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct device *dev = rproc->dev.parent;
+ void *va;
+
+ dev_dbg(dev, "map memory: %p+%zx\n", &mem->dma, mem->len);
+ va = ioremap_wc(mem->dma, mem->len);
+ if (IS_ERR_OR_NULL(va)) {
+ dev_err(dev, "Unable to map memory region: %p+%zx\n",
+ &mem->dma, mem->len);
+ return -ENOMEM;
+ }
+
+ /* Update memory entry va */
+ mem->va = va;
+
+ return 0;
+}
+
+static int imx_rproc_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
+ iounmap(mem->va);
+
+ return 0;
+}
+
+static int imx_rproc_prepare(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ struct device_node *np = priv->dev->of_node;
+ struct of_phandle_iterator it;
+ struct rproc_mem_entry *mem;
+ struct reserved_mem *rmem;
+ u32 da;
+
+ /* Register associated reserved memory regions */
+ of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
+ while (of_phandle_iterator_next(&it) == 0) {
+ /*
+ * Ignore the first memory region which will be used vdev buffer.
+ * No need to do extra handlings, rproc_add_virtio_dev will handle it.
+ */
+ if (!strcmp(it.node->name, "vdev0buffer"))
+ continue;
+
+ rmem = of_reserved_mem_lookup(it.node);
+ if (!rmem) {
+ dev_err(priv->dev, "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ /* No need to translate pa to da, i.MX use same map */
+ da = rmem->base;
+
+ /* Register memory region */
+ mem = rproc_mem_entry_init(priv->dev, NULL, (dma_addr_t)rmem->base, rmem->size, da,
+ imx_rproc_mem_alloc, imx_rproc_mem_release,
+ it.node->name);
+
+ if (mem)
+ rproc_coredump_add_segment(rproc, da, rmem->size);
+ else
+ return -ENOMEM;
+
+ rproc_add_carveout(rproc, mem);
+ }
+
+ return 0;
+}
+
+static int imx_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ int ret;
+
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret)
+ dev_info(&rproc->dev, "No resource table in elf\n");
+
+ return 0;
+}
+
+static void imx_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct imx_rproc *priv = rproc->priv;
+ int err;
+ __u32 mmsg;
+
+ if (!priv->tx_ch) {
+ dev_err(priv->dev, "No initialized mbox tx channel\n");
+ return;
+ }
+
+ /*
+ * Send the index of the triggered virtqueue as the mu payload.
+ * Let remote processor know which virtqueue is used.
+ */
+ mmsg = vqid << 16;
+
+ err = mbox_send_message(priv->tx_ch, (void *)&mmsg);
+ if (err < 0)
+ dev_err(priv->dev, "%s: failed (%d, err:%d)\n",
+ __func__, vqid, err);
+}
+
+static int imx_rproc_attach(struct rproc *rproc)
+{
+ return 0;
+}
+
+static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
+{
+ struct imx_rproc *priv = rproc->priv;
+
+ /* The resource table has already been mapped in imx_rproc_addr_init */
+ if (!priv->rsc_table)
+ return NULL;
+
+ *table_sz = SZ_1K;
+ return (struct resource_table *)priv->rsc_table;
+}
+
static const struct rproc_ops imx_rproc_ops = {
+ .prepare = imx_rproc_prepare,
+ .attach = imx_rproc_attach,
.start = imx_rproc_start,
.stop = imx_rproc_stop,
+ .kick = imx_rproc_kick,
.da_to_va = imx_rproc_da_to_va,
+ .load = rproc_elf_load_segments,
+ .parse_fw = imx_rproc_parse_fw,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .get_loaded_rsc_table = imx_rproc_get_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
};
static int imx_rproc_addr_init(struct imx_rproc *priv,
@@ -262,13 +443,13 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
if (!(att->flags & ATT_OWN))
continue;
- if (b >= IMX7D_RPROC_MEM_MAX)
+ if (b >= IMX_RPROC_MEM_MAX)
break;
priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev,
att->sa, att->size);
if (!priv->mem[b].cpu_addr) {
- dev_err(dev, "devm_ioremap_resource failed\n");
+ dev_err(dev, "failed to remap %#x bytes from %#x\n", att->size, att->sa);
return -ENOMEM;
}
priv->mem[b].sys_addr = att->sa;
@@ -287,29 +468,115 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
struct resource res;
node = of_parse_phandle(np, "memory-region", a);
+ /* Not map vdev region */
+ if (!strcmp(node->name, "vdev"))
+ continue;
err = of_address_to_resource(node, 0, &res);
if (err) {
dev_err(dev, "unable to resolve memory region\n");
return err;
}
- if (b >= IMX7D_RPROC_MEM_MAX)
+ of_node_put(node);
+
+ if (b >= IMX_RPROC_MEM_MAX)
break;
- priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res);
- if (IS_ERR(priv->mem[b].cpu_addr)) {
- dev_err(dev, "devm_ioremap_resource failed\n");
- err = PTR_ERR(priv->mem[b].cpu_addr);
- return err;
+ /* Not use resource version, because we might share region */
+ priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
+ if (!priv->mem[b].cpu_addr) {
+ dev_err(dev, "failed to remap %pr\n", &res);
+ return -ENOMEM;
}
priv->mem[b].sys_addr = res.start;
priv->mem[b].size = resource_size(&res);
+ if (!strcmp(node->name, "rsc_table"))
+ priv->rsc_table = priv->mem[b].cpu_addr;
b++;
}
return 0;
}
+static void imx_rproc_vq_work(struct work_struct *work)
+{
+ struct imx_rproc *priv = container_of(work, struct imx_rproc,
+ rproc_work);
+
+ rproc_vq_interrupt(priv->rproc, 0);
+ rproc_vq_interrupt(priv->rproc, 1);
+}
+
+static void imx_rproc_rx_callback(struct mbox_client *cl, void *msg)
+{
+ struct rproc *rproc = dev_get_drvdata(cl->dev);
+ struct imx_rproc *priv = rproc->priv;
+
+ queue_work(priv->workqueue, &priv->rproc_work);
+}
+
+static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ struct device *dev = priv->dev;
+ struct mbox_client *cl;
+ int ret;
+
+ if (!of_get_property(dev->of_node, "mbox-names", NULL))
+ return 0;
+
+ cl = &priv->cl;
+ cl->dev = dev;
+ cl->tx_block = true;
+ cl->tx_tout = 100;
+ cl->knows_txdone = false;
+ cl->rx_callback = imx_rproc_rx_callback;
+
+ priv->tx_ch = mbox_request_channel_byname(cl, "tx");
+ if (IS_ERR(priv->tx_ch)) {
+ ret = PTR_ERR(priv->tx_ch);
+ return dev_err_probe(cl->dev, ret,
+ "failed to request tx mailbox channel: %d\n", ret);
+ }
+
+ priv->rx_ch = mbox_request_channel_byname(cl, "rx");
+ if (IS_ERR(priv->rx_ch)) {
+ mbox_free_channel(priv->tx_ch);
+ ret = PTR_ERR(priv->rx_ch);
+ return dev_err_probe(cl->dev, ret,
+ "failed to request rx mailbox channel: %d\n", ret);
+ }
+
+ return 0;
+}
+
+static void imx_rproc_free_mbox(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+
+ mbox_free_channel(priv->tx_ch);
+ mbox_free_channel(priv->rx_ch);
+}
+
+static int imx_rproc_detect_mode(struct imx_rproc *priv)
+{
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+ struct device *dev = priv->dev;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->regmap, dcfg->src_reg, &val);
+ if (ret) {
+ dev_err(dev, "Failed to read src\n");
+ return ret;
+ }
+
+ if (!(val & dcfg->src_stop))
+ priv->rproc->state = RPROC_DETACHED;
+
+ return 0;
+}
+
static int imx_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -347,18 +614,32 @@ static int imx_rproc_probe(struct platform_device *pdev)
priv->dev = dev;
dev_set_drvdata(dev, rproc);
+ priv->workqueue = create_workqueue(dev_name(dev));
+ if (!priv->workqueue) {
+ dev_err(dev, "cannot create workqueue\n");
+ ret = -ENOMEM;
+ goto err_put_rproc;
+ }
+
+ ret = imx_rproc_xtr_mbox_init(rproc);
+ if (ret)
+ goto err_put_wkq;
ret = imx_rproc_addr_init(priv, pdev);
if (ret) {
dev_err(dev, "failed on imx_rproc_addr_init\n");
- goto err_put_rproc;
+ goto err_put_mbox;
}
+ ret = imx_rproc_detect_mode(priv);
+ if (ret)
+ goto err_put_mbox;
+
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "Failed to get clock\n");
ret = PTR_ERR(priv->clk);
- goto err_put_rproc;
+ goto err_put_mbox;
}
/*
@@ -368,9 +649,11 @@ static int imx_rproc_probe(struct platform_device *pdev)
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(&rproc->dev, "Failed to enable clock\n");
- goto err_put_rproc;
+ goto err_put_mbox;
}
+ INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
+
ret = rproc_add(rproc);
if (ret) {
dev_err(dev, "rproc_add failed\n");
@@ -381,6 +664,10 @@ static int imx_rproc_probe(struct platform_device *pdev)
err_put_clk:
clk_disable_unprepare(priv->clk);
+err_put_mbox:
+ imx_rproc_free_mbox(rproc);
+err_put_wkq:
+ destroy_workqueue(priv->workqueue);
err_put_rproc:
rproc_free(rproc);
@@ -394,6 +681,7 @@ static int imx_rproc_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
rproc_del(rproc);
+ imx_rproc_free_mbox(rproc);
rproc_free(rproc);
return 0;
@@ -402,6 +690,8 @@ static int imx_rproc_remove(struct platform_device *pdev)
static const struct of_device_id imx_rproc_of_match[] = {
{ .compatible = "fsl,imx7d-cm4", .data = &imx_rproc_cfg_imx7d },
{ .compatible = "fsl,imx6sx-cm4", .data = &imx_rproc_cfg_imx6sx },
+ { .compatible = "fsl,imx8mq-cm4", .data = &imx_rproc_cfg_imx8mq },
+ { .compatible = "fsl,imx8mm-cm4", .data = &imx_rproc_cfg_imx8mq },
{},
};
MODULE_DEVICE_TABLE(of, imx_rproc_of_match);
@@ -418,5 +708,5 @@ static struct platform_driver imx_rproc_driver = {
module_platform_driver(imx_rproc_driver);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("IMX6SX/7D remote processor control driver");
+MODULE_DESCRIPTION("i.MX remote processor control driver");
MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c
index e2618c36eaab..a356738160a4 100644
--- a/drivers/remoteproc/ingenic_rproc.c
+++ b/drivers/remoteproc/ingenic_rproc.c
@@ -121,7 +121,7 @@ static void ingenic_rproc_kick(struct rproc *rproc, int vqid)
writel(vqid, vpu->aux_base + REG_CORE_MSG);
}
-static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct vpu *vpu = rproc->priv;
void __iomem *va = NULL;
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index cd266163a65f..54781f553f4e 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -246,7 +246,7 @@ static void keystone_rproc_kick(struct rproc *rproc, int vqid)
* can be used either by the remoteproc core for loading (when using kernel
* remoteproc loader), or by any rpmsg bus drivers.
*/
-static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct keystone_rproc *ksproc = rproc->priv;
void __iomem *va = NULL;
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index ce727598c41c..9679cc26895e 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -272,7 +272,7 @@ static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
}
/* grab the kernel address for this device address */
- ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz);
+ ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL);
if (!ptr) {
dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
ret = -EINVAL;
@@ -509,7 +509,7 @@ static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
return NULL;
}
-static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
@@ -627,7 +627,7 @@ void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
{
void *ptr;
- ptr = scp_da_to_va(scp->rproc, mem_addr, 0);
+ ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL);
if (!ptr)
return ERR_PTR(-EINVAL);
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index d94b7391bf9d..43531caa1959 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -728,7 +728,7 @@ out:
* Return: translated virtual address in kernel memory space on success,
* or NULL on failure.
*/
-static void *omap_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *omap_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct omap_rproc *oproc = rproc->priv;
int i;
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index dcb380e868df..e5778e476245 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -244,8 +244,8 @@ static int pru_rproc_debug_ss_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(pru_rproc_debug_ss_fops, pru_rproc_debug_ss_get,
- pru_rproc_debug_ss_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(pru_rproc_debug_ss_fops, pru_rproc_debug_ss_get,
+ pru_rproc_debug_ss_set, "%llu\n");
/*
* Create PRU-specific debugfs entries
@@ -266,12 +266,17 @@ static void pru_rproc_create_debug_entries(struct rproc *rproc)
static void pru_dispose_irq_mapping(struct pru_rproc *pru)
{
- while (pru->evt_count--) {
+ if (!pru->mapped_irq)
+ return;
+
+ while (pru->evt_count) {
+ pru->evt_count--;
if (pru->mapped_irq[pru->evt_count] > 0)
irq_dispose_mapping(pru->mapped_irq[pru->evt_count]);
}
kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
}
/*
@@ -284,7 +289,7 @@ static int pru_handle_intrmap(struct rproc *rproc)
struct pru_rproc *pru = rproc->priv;
struct pru_irq_rsc *rsc = pru->pru_interrupt_map;
struct irq_fwspec fwspec;
- struct device_node *irq_parent;
+ struct device_node *parent, *irq_parent;
int i, ret = 0;
/* not having pru_interrupt_map is not an error */
@@ -307,16 +312,31 @@ static int pru_handle_intrmap(struct rproc *rproc)
pru->evt_count = rsc->num_evts;
pru->mapped_irq = kcalloc(pru->evt_count, sizeof(unsigned int),
GFP_KERNEL);
- if (!pru->mapped_irq)
+ if (!pru->mapped_irq) {
+ pru->evt_count = 0;
return -ENOMEM;
+ }
/*
* parse and fill in system event to interrupt channel and
- * channel-to-host mapping
+ * channel-to-host mapping. The interrupt controller to be used
+ * for these mappings for a given PRU remoteproc is always its
+ * corresponding sibling PRUSS INTC node.
*/
- irq_parent = of_irq_find_parent(pru->dev->of_node);
+ parent = of_get_parent(dev_of_node(pru->dev));
+ if (!parent) {
+ kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
+ pru->evt_count = 0;
+ return -ENODEV;
+ }
+
+ irq_parent = of_get_child_by_name(parent, "interrupt-controller");
+ of_node_put(parent);
if (!irq_parent) {
kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
+ pru->evt_count = 0;
return -ENODEV;
}
@@ -332,16 +352,20 @@ static int pru_handle_intrmap(struct rproc *rproc)
pru->mapped_irq[i] = irq_create_fwspec_mapping(&fwspec);
if (!pru->mapped_irq[i]) {
- dev_err(dev, "failed to get virq\n");
- ret = pru->mapped_irq[i];
+ dev_err(dev, "failed to get virq for fw mapping %d: event %d chnl %d host %d\n",
+ i, fwspec.param[0], fwspec.param[1],
+ fwspec.param[2]);
+ ret = -EINVAL;
goto map_fail;
}
}
+ of_node_put(irq_parent);
return ret;
map_fail:
pru_dispose_irq_mapping(pru);
+ of_node_put(irq_parent);
return ret;
}
@@ -387,8 +411,7 @@ static int pru_rproc_stop(struct rproc *rproc)
pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
/* dispose irq mapping - new firmware can provide new mapping */
- if (pru->mapped_irq)
- pru_dispose_irq_mapping(pru);
+ pru_dispose_irq_mapping(pru);
return 0;
}
@@ -483,7 +506,7 @@ static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
* core for any PRU client drivers. The PRU Instruction RAM access is restricted
* only to the PRU loader code.
*/
-static void *pru_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *pru_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct pru_rproc *pru = rproc->priv;
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index e02450225e4a..8b0d8bbacd2e 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -281,7 +281,7 @@ static int adsp_stop(struct rproc *rproc)
return ret;
}
-static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
int offset;
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 66106ba25ba3..423b31dfa574 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -1210,6 +1210,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
goto release_firmware;
}
+ if (phdr->p_filesz > phdr->p_memsz) {
+ dev_err(qproc->dev,
+ "refusing to load segment %d with p_filesz > p_memsz\n",
+ i);
+ ret = -EINVAL;
+ goto release_firmware;
+ }
+
ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
if (!ptr) {
dev_err(qproc->dev,
@@ -1241,6 +1249,16 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
goto release_firmware;
}
+ if (seg_fw->size != phdr->p_filesz) {
+ dev_err(qproc->dev,
+ "failed to load segment %d from truncated file %s\n",
+ i, fw_name);
+ ret = -EINVAL;
+ release_firmware(seg_fw);
+ memunmap(ptr);
+ goto release_firmware;
+ }
+
release_firmware(seg_fw);
}
@@ -1661,8 +1679,10 @@ static int q6v5_probe(struct platform_device *pdev)
mba_image = desc->hexagon_mba_image;
ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
0, &mba_image);
- if (ret < 0 && ret != -EINVAL)
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "unable to read mba firmware-name\n");
return ret;
+ }
rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
mba_image, sizeof(*qproc));
@@ -1680,8 +1700,10 @@ static int q6v5_probe(struct platform_device *pdev)
qproc->hexagon_mdt_image = "modem.mdt";
ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1, &qproc->hexagon_mdt_image);
- if (ret < 0 && ret != -EINVAL)
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "unable to read mpss firmware-name\n");
goto free_rproc;
+ }
platform_set_drvdata(pdev, qproc);
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index e635454d6170..b921fc26cd04 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -242,7 +242,7 @@ static int adsp_stop(struct rproc *rproc)
return ret;
}
-static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
int offset;
@@ -785,6 +785,22 @@ static const struct adsp_data wcss_resource_init = {
.ssctl_id = 0x12,
};
+static const struct adsp_data sdx55_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x22,
+};
+
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-adsp-pil", .data = &adsp_resource_init},
@@ -797,6 +813,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init},
+ { .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
{ .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index 78ebe1168b33..20d50ec7eff1 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -4,13 +4,18 @@
* Copyright (C) 2014 Sony Mobile Communications AB
* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*/
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "qcom_common.h"
@@ -24,6 +29,9 @@
#define Q6SS_GFMUX_CTL_REG 0x020
#define Q6SS_PWR_CTL_REG 0x030
#define Q6SS_MEM_PWR_CTL 0x0B0
+#define Q6SS_STRAP_ACC 0x110
+#define Q6SS_CGC_OVERRIDE 0x034
+#define Q6SS_BCR_REG 0x6000
/* AXI Halt Register Offsets */
#define AXI_HALTREQ_REG 0x0
@@ -37,14 +45,19 @@
#define Q6SS_CORE_ARES BIT(1)
#define Q6SS_BUS_ARES_ENABLE BIT(2)
+/* Q6SS_BRC_RESET */
+#define Q6SS_BRC_BLK_ARES BIT(0)
+
/* Q6SS_GFMUX_CTL */
#define Q6SS_CLK_ENABLE BIT(1)
+#define Q6SS_SWITCH_CLK_SRC BIT(8)
/* Q6SS_PWR_CTL */
#define Q6SS_L2DATA_STBY_N BIT(18)
#define Q6SS_SLP_RET_N BIT(19)
#define Q6SS_CLAMP_IO BIT(20)
#define QDSS_BHS_ON BIT(21)
+#define QDSS_Q6_MEMORIES GENMASK(15, 0)
/* Q6SS parameters */
#define Q6SS_LDO_BYP BIT(25)
@@ -53,6 +66,7 @@
#define Q6SS_CLAMP_QMC_MEM BIT(22)
#define HALT_CHECK_MAX_LOOPS 200
#define Q6SS_XO_CBCR GENMASK(5, 3)
+#define Q6SS_SLEEP_CBCR GENMASK(5, 2)
/* Q6SS config/status registers */
#define TCSR_GLOBAL_CFG0 0x0
@@ -71,6 +85,25 @@
#define TCSR_WCSS_CLK_MASK 0x1F
#define TCSR_WCSS_CLK_ENABLE 0x14
+#define MAX_HALT_REG 3
+enum {
+ WCSS_IPQ8074,
+ WCSS_QCS404,
+};
+
+struct wcss_data {
+ const char *firmware_name;
+ unsigned int crash_reason_smem;
+ u32 version;
+ bool aon_reset_required;
+ bool wcss_q6_reset_required;
+ const char *ssr_name;
+ const char *sysmon_name;
+ int ssctl_id;
+ const struct rproc_ops *ops;
+ bool requires_force_stop;
+};
+
struct q6v5_wcss {
struct device *dev;
@@ -82,9 +115,26 @@ struct q6v5_wcss {
u32 halt_wcss;
u32 halt_nc;
+ struct clk *xo;
+ struct clk *ahbfabric_cbcr_clk;
+ struct clk *gcc_abhs_cbcr;
+ struct clk *gcc_axim_cbcr;
+ struct clk *lcc_csr_cbcr;
+ struct clk *ahbs_cbcr;
+ struct clk *tcm_slave_cbcr;
+ struct clk *qdsp6ss_abhm_cbcr;
+ struct clk *qdsp6ss_sleep_cbcr;
+ struct clk *qdsp6ss_axim_cbcr;
+ struct clk *qdsp6ss_xo_cbcr;
+ struct clk *qdsp6ss_core_gfmux;
+ struct clk *lcc_bcr_sleep;
+ struct regulator *cx_supply;
+ struct qcom_sysmon *sysmon;
+
struct reset_control *wcss_aon_reset;
struct reset_control *wcss_reset;
struct reset_control *wcss_q6_reset;
+ struct reset_control *wcss_q6_bcr_reset;
struct qcom_q6v5 q6v5;
@@ -93,6 +143,10 @@ struct q6v5_wcss {
void *mem_region;
size_t mem_size;
+ unsigned int crash_reason_smem;
+ u32 version;
+ bool requires_force_stop;
+
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_ssr ssr_subdev;
};
@@ -237,6 +291,207 @@ wcss_reset:
return ret;
}
+static int q6v5_wcss_qcs404_power_on(struct q6v5_wcss *wcss)
+{
+ unsigned long val;
+ int ret, idx;
+
+ /* Toggle the restart */
+ reset_control_assert(wcss->wcss_reset);
+ usleep_range(200, 300);
+ reset_control_deassert(wcss->wcss_reset);
+ usleep_range(200, 300);
+
+ /* Enable GCC_WDSP_Q6SS_AHBS_CBCR clock */
+ ret = clk_prepare_enable(wcss->gcc_abhs_cbcr);
+ if (ret)
+ return ret;
+
+ /* Remove reset to the WCNSS QDSP6SS */
+ reset_control_deassert(wcss->wcss_q6_bcr_reset);
+
+ /* Enable Q6SSTOP_AHBFABRIC_CBCR clock */
+ ret = clk_prepare_enable(wcss->ahbfabric_cbcr_clk);
+ if (ret)
+ goto disable_gcc_abhs_cbcr_clk;
+
+ /* Enable the LCCCSR CBC clock, Q6SSTOP_Q6SSTOP_LCC_CSR_CBCR clock */
+ ret = clk_prepare_enable(wcss->lcc_csr_cbcr);
+ if (ret)
+ goto disable_ahbfabric_cbcr_clk;
+
+ /* Enable the Q6AHBS CBC, Q6SSTOP_Q6SS_AHBS_CBCR clock */
+ ret = clk_prepare_enable(wcss->ahbs_cbcr);
+ if (ret)
+ goto disable_csr_cbcr_clk;
+
+ /* Enable the TCM slave CBC, Q6SSTOP_Q6SS_TCM_SLAVE_CBCR clock */
+ ret = clk_prepare_enable(wcss->tcm_slave_cbcr);
+ if (ret)
+ goto disable_ahbs_cbcr_clk;
+
+ /* Enable the Q6SS AHB master CBC, Q6SSTOP_Q6SS_AHBM_CBCR clock */
+ ret = clk_prepare_enable(wcss->qdsp6ss_abhm_cbcr);
+ if (ret)
+ goto disable_tcm_slave_cbcr_clk;
+
+ /* Enable the Q6SS AXI master CBC, Q6SSTOP_Q6SS_AXIM_CBCR clock */
+ ret = clk_prepare_enable(wcss->qdsp6ss_axim_cbcr);
+ if (ret)
+ goto disable_abhm_cbcr_clk;
+
+ /* Enable the Q6SS XO CBC */
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val |= BIT(0);
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+ /* Read CLKOFF bit to go low indicating CLK is enabled */
+ ret = readl_poll_timeout(wcss->reg_base + Q6SS_XO_CBCR,
+ val, !(val & BIT(31)), 1,
+ HALT_CHECK_MAX_LOOPS);
+ if (ret) {
+ dev_err(wcss->dev,
+ "xo cbcr enabling timed out (rc:%d)\n", ret);
+ return ret;
+ }
+
+ writel(0, wcss->reg_base + Q6SS_CGC_OVERRIDE);
+
+ /* Enable QDSP6 sleep clock clock */
+ val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val |= BIT(0);
+ writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
+
+ /* Enable the Enable the Q6 AXI clock, GCC_WDSP_Q6SS_AXIM_CBCR*/
+ ret = clk_prepare_enable(wcss->gcc_axim_cbcr);
+ if (ret)
+ goto disable_sleep_cbcr_clk;
+
+ /* Assert resets, stop core */
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ /* Program the QDSP6SS PWR_CTL register */
+ writel(0x01700000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ writel(0x03700000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ writel(0x03300000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ writel(0x033C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /*
+ * Enable memories by turning on the QDSP6 memory foot/head switch, one
+ * bank at a time to avoid in-rush current
+ */
+ for (idx = 28; idx >= 0; idx--) {
+ writel((readl(wcss->reg_base + Q6SS_MEM_PWR_CTL) |
+ (1 << idx)), wcss->reg_base + Q6SS_MEM_PWR_CTL);
+ }
+
+ writel(0x031C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+ writel(0x030C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val &= ~Q6SS_CORE_ARES;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ /* Enable the Q6 core clock at the GFM, Q6SSTOP_QDSP6SS_GFMUX_CTL */
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val |= Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC;
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+
+ /* Enable sleep clock branch needed for BCR circuit */
+ ret = clk_prepare_enable(wcss->lcc_bcr_sleep);
+ if (ret)
+ goto disable_core_gfmux_clk;
+
+ return 0;
+
+disable_core_gfmux_clk:
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val &= ~(Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC);
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ clk_disable_unprepare(wcss->gcc_axim_cbcr);
+disable_sleep_cbcr_clk:
+ val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val &= ~Q6SS_CLK_ENABLE;
+ writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val &= ~Q6SS_CLK_ENABLE;
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+ clk_disable_unprepare(wcss->qdsp6ss_axim_cbcr);
+disable_abhm_cbcr_clk:
+ clk_disable_unprepare(wcss->qdsp6ss_abhm_cbcr);
+disable_tcm_slave_cbcr_clk:
+ clk_disable_unprepare(wcss->tcm_slave_cbcr);
+disable_ahbs_cbcr_clk:
+ clk_disable_unprepare(wcss->ahbs_cbcr);
+disable_csr_cbcr_clk:
+ clk_disable_unprepare(wcss->lcc_csr_cbcr);
+disable_ahbfabric_cbcr_clk:
+ clk_disable_unprepare(wcss->ahbfabric_cbcr_clk);
+disable_gcc_abhs_cbcr_clk:
+ clk_disable_unprepare(wcss->gcc_abhs_cbcr);
+
+ return ret;
+}
+
+static inline int q6v5_wcss_qcs404_reset(struct q6v5_wcss *wcss)
+{
+ unsigned long val;
+
+ writel(0x80800000, wcss->reg_base + Q6SS_STRAP_ACC);
+
+ /* Start core execution */
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val &= ~Q6SS_STOP_CORE;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ return 0;
+}
+
+static int q6v5_qcs404_wcss_start(struct rproc *rproc)
+{
+ struct q6v5_wcss *wcss = rproc->priv;
+ int ret;
+
+ ret = clk_prepare_enable(wcss->xo);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(wcss->cx_supply);
+ if (ret)
+ goto disable_xo_clk;
+
+ qcom_q6v5_prepare(&wcss->q6v5);
+
+ ret = q6v5_wcss_qcs404_power_on(wcss);
+ if (ret) {
+ dev_err(wcss->dev, "wcss clk_enable failed\n");
+ goto disable_cx_supply;
+ }
+
+ writel(rproc->bootaddr >> 4, wcss->reg_base + Q6SS_RST_EVB);
+
+ q6v5_wcss_qcs404_reset(wcss);
+
+ ret = qcom_q6v5_wait_for_start(&wcss->q6v5, 5 * HZ);
+ if (ret == -ETIMEDOUT) {
+ dev_err(wcss->dev, "start timed out\n");
+ goto disable_cx_supply;
+ }
+
+ return 0;
+
+disable_cx_supply:
+ regulator_disable(wcss->cx_supply);
+disable_xo_clk:
+ clk_disable_unprepare(wcss->xo);
+
+ return ret;
+}
+
static void q6v5_wcss_halt_axi_port(struct q6v5_wcss *wcss,
struct regmap *halt_map,
u32 offset)
@@ -271,6 +526,70 @@ static void q6v5_wcss_halt_axi_port(struct q6v5_wcss *wcss,
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
}
+static int q6v5_qcs404_wcss_shutdown(struct q6v5_wcss *wcss)
+{
+ unsigned long val;
+ int ret;
+
+ q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_wcss);
+
+ /* assert clamps to avoid MX current inrush */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val |= (Q6SS_CLAMP_IO | Q6SS_CLAMP_WL | Q6SS_CLAMP_QMC_MEM);
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* Disable memories by turning off memory foot/headswitch */
+ writel((readl(wcss->reg_base + Q6SS_MEM_PWR_CTL) &
+ ~QDSS_Q6_MEMORIES),
+ wcss->reg_base + Q6SS_MEM_PWR_CTL);
+
+ /* Clear the BHS_ON bit */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val &= ~Q6SS_BHS_ON;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ clk_disable_unprepare(wcss->ahbfabric_cbcr_clk);
+ clk_disable_unprepare(wcss->lcc_csr_cbcr);
+ clk_disable_unprepare(wcss->tcm_slave_cbcr);
+ clk_disable_unprepare(wcss->qdsp6ss_abhm_cbcr);
+ clk_disable_unprepare(wcss->qdsp6ss_axim_cbcr);
+
+ val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val &= ~BIT(0);
+ writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
+
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val &= ~BIT(0);
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+
+ clk_disable_unprepare(wcss->ahbs_cbcr);
+ clk_disable_unprepare(wcss->lcc_bcr_sleep);
+
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val &= ~(Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC);
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+
+ clk_disable_unprepare(wcss->gcc_abhs_cbcr);
+
+ ret = reset_control_assert(wcss->wcss_reset);
+ if (ret) {
+ dev_err(wcss->dev, "wcss_reset failed\n");
+ return ret;
+ }
+ usleep_range(200, 300);
+
+ ret = reset_control_deassert(wcss->wcss_reset);
+ if (ret) {
+ dev_err(wcss->dev, "wcss_reset failed\n");
+ return ret;
+ }
+ usleep_range(200, 300);
+
+ clk_disable_unprepare(wcss->gcc_axim_cbcr);
+
+ return 0;
+}
+
static int q6v5_wcss_powerdown(struct q6v5_wcss *wcss)
{
int ret;
@@ -390,27 +709,35 @@ static int q6v5_wcss_stop(struct rproc *rproc)
int ret;
/* WCSS powerdown */
- ret = qcom_q6v5_request_stop(&wcss->q6v5, NULL);
- if (ret == -ETIMEDOUT) {
- dev_err(wcss->dev, "timed out on wait\n");
- return ret;
+ if (wcss->requires_force_stop) {
+ ret = qcom_q6v5_request_stop(&wcss->q6v5, NULL);
+ if (ret == -ETIMEDOUT) {
+ dev_err(wcss->dev, "timed out on wait\n");
+ return ret;
+ }
}
- ret = q6v5_wcss_powerdown(wcss);
- if (ret)
- return ret;
-
- /* Q6 Power down */
- ret = q6v5_q6_powerdown(wcss);
- if (ret)
- return ret;
+ if (wcss->version == WCSS_QCS404) {
+ ret = q6v5_qcs404_wcss_shutdown(wcss);
+ if (ret)
+ return ret;
+ } else {
+ ret = q6v5_wcss_powerdown(wcss);
+ if (ret)
+ return ret;
+
+ /* Q6 Power down */
+ ret = q6v5_q6_powerdown(wcss);
+ if (ret)
+ return ret;
+ }
qcom_q6v5_unprepare(&wcss->q6v5);
return 0;
}
-static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct q6v5_wcss *wcss = rproc->priv;
int offset;
@@ -438,7 +765,7 @@ static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw)
return ret;
}
-static const struct rproc_ops q6v5_wcss_ops = {
+static const struct rproc_ops q6v5_wcss_ipq8074_ops = {
.start = q6v5_wcss_start,
.stop = q6v5_wcss_stop,
.da_to_va = q6v5_wcss_da_to_va,
@@ -446,26 +773,46 @@ static const struct rproc_ops q6v5_wcss_ops = {
.get_boot_addr = rproc_elf_get_boot_addr,
};
-static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss)
+static const struct rproc_ops q6v5_wcss_qcs404_ops = {
+ .start = q6v5_qcs404_wcss_start,
+ .stop = q6v5_wcss_stop,
+ .da_to_va = q6v5_wcss_da_to_va,
+ .load = q6v5_wcss_load,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+ .parse_fw = qcom_register_dump_segments,
+};
+
+static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss,
+ const struct wcss_data *desc)
{
struct device *dev = wcss->dev;
- wcss->wcss_aon_reset = devm_reset_control_get(dev, "wcss_aon_reset");
- if (IS_ERR(wcss->wcss_aon_reset)) {
- dev_err(wcss->dev, "unable to acquire wcss_aon_reset\n");
- return PTR_ERR(wcss->wcss_aon_reset);
+ if (desc->aon_reset_required) {
+ wcss->wcss_aon_reset = devm_reset_control_get_exclusive(dev, "wcss_aon_reset");
+ if (IS_ERR(wcss->wcss_aon_reset)) {
+ dev_err(wcss->dev, "fail to acquire wcss_aon_reset\n");
+ return PTR_ERR(wcss->wcss_aon_reset);
+ }
}
- wcss->wcss_reset = devm_reset_control_get(dev, "wcss_reset");
+ wcss->wcss_reset = devm_reset_control_get_exclusive(dev, "wcss_reset");
if (IS_ERR(wcss->wcss_reset)) {
dev_err(wcss->dev, "unable to acquire wcss_reset\n");
return PTR_ERR(wcss->wcss_reset);
}
- wcss->wcss_q6_reset = devm_reset_control_get(dev, "wcss_q6_reset");
- if (IS_ERR(wcss->wcss_q6_reset)) {
- dev_err(wcss->dev, "unable to acquire wcss_q6_reset\n");
- return PTR_ERR(wcss->wcss_q6_reset);
+ if (desc->wcss_q6_reset_required) {
+ wcss->wcss_q6_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_reset");
+ if (IS_ERR(wcss->wcss_q6_reset)) {
+ dev_err(wcss->dev, "unable to acquire wcss_q6_reset\n");
+ return PTR_ERR(wcss->wcss_q6_reset);
+ }
+ }
+
+ wcss->wcss_q6_bcr_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_bcr_reset");
+ if (IS_ERR(wcss->wcss_q6_bcr_reset)) {
+ dev_err(wcss->dev, "unable to acquire wcss_q6_bcr_reset\n");
+ return PTR_ERR(wcss->wcss_q6_bcr_reset);
}
return 0;
@@ -474,35 +821,48 @@ static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss)
static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss,
struct platform_device *pdev)
{
- struct of_phandle_args args;
+ unsigned int halt_reg[MAX_HALT_REG] = {0};
+ struct device_node *syscon;
struct resource *res;
int ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
- wcss->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(wcss->reg_base))
- return PTR_ERR(wcss->reg_base);
+ wcss->reg_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!wcss->reg_base)
+ return -ENOMEM;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
- wcss->rmb_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(wcss->rmb_base))
- return PTR_ERR(wcss->rmb_base);
+ if (wcss->version == WCSS_IPQ8074) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
+ wcss->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wcss->rmb_base))
+ return PTR_ERR(wcss->rmb_base);
+ }
- ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
- "qcom,halt-regs", 3, 0, &args);
- if (ret < 0) {
+ syscon = of_parse_phandle(pdev->dev.of_node,
+ "qcom,halt-regs", 0);
+ if (!syscon) {
dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
return -EINVAL;
}
- wcss->halt_map = syscon_node_to_regmap(args.np);
- of_node_put(args.np);
+ wcss->halt_map = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(wcss->halt_map))
return PTR_ERR(wcss->halt_map);
- wcss->halt_q6 = args.args[0];
- wcss->halt_wcss = args.args[1];
- wcss->halt_nc = args.args[2];
+ ret = of_property_read_variable_u32_array(pdev->dev.of_node,
+ "qcom,halt-regs",
+ halt_reg, 0,
+ MAX_HALT_REG);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
+ return -EINVAL;
+ }
+
+ wcss->halt_q6 = halt_reg[0];
+ wcss->halt_wcss = halt_reg[1];
+ wcss->halt_nc = halt_reg[2];
return 0;
}
@@ -536,14 +896,120 @@ static int q6v5_alloc_memory_region(struct q6v5_wcss *wcss)
return 0;
}
+static int q6v5_wcss_init_clock(struct q6v5_wcss *wcss)
+{
+ int ret;
+
+ wcss->xo = devm_clk_get(wcss->dev, "xo");
+ if (IS_ERR(wcss->xo)) {
+ ret = PTR_ERR(wcss->xo);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get xo clock");
+ return ret;
+ }
+
+ wcss->gcc_abhs_cbcr = devm_clk_get(wcss->dev, "gcc_abhs_cbcr");
+ if (IS_ERR(wcss->gcc_abhs_cbcr)) {
+ ret = PTR_ERR(wcss->gcc_abhs_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get gcc abhs clock");
+ return ret;
+ }
+
+ wcss->gcc_axim_cbcr = devm_clk_get(wcss->dev, "gcc_axim_cbcr");
+ if (IS_ERR(wcss->gcc_axim_cbcr)) {
+ ret = PTR_ERR(wcss->gcc_axim_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get gcc axim clock\n");
+ return ret;
+ }
+
+ wcss->ahbfabric_cbcr_clk = devm_clk_get(wcss->dev,
+ "lcc_ahbfabric_cbc");
+ if (IS_ERR(wcss->ahbfabric_cbcr_clk)) {
+ ret = PTR_ERR(wcss->ahbfabric_cbcr_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get ahbfabric clock\n");
+ return ret;
+ }
+
+ wcss->lcc_csr_cbcr = devm_clk_get(wcss->dev, "tcsr_lcc_cbc");
+ if (IS_ERR(wcss->lcc_csr_cbcr)) {
+ ret = PTR_ERR(wcss->lcc_csr_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get csr cbcr clk\n");
+ return ret;
+ }
+
+ wcss->ahbs_cbcr = devm_clk_get(wcss->dev,
+ "lcc_abhs_cbc");
+ if (IS_ERR(wcss->ahbs_cbcr)) {
+ ret = PTR_ERR(wcss->ahbs_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get ahbs_cbcr clk\n");
+ return ret;
+ }
+
+ wcss->tcm_slave_cbcr = devm_clk_get(wcss->dev,
+ "lcc_tcm_slave_cbc");
+ if (IS_ERR(wcss->tcm_slave_cbcr)) {
+ ret = PTR_ERR(wcss->tcm_slave_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get tcm cbcr clk\n");
+ return ret;
+ }
+
+ wcss->qdsp6ss_abhm_cbcr = devm_clk_get(wcss->dev, "lcc_abhm_cbc");
+ if (IS_ERR(wcss->qdsp6ss_abhm_cbcr)) {
+ ret = PTR_ERR(wcss->qdsp6ss_abhm_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get abhm cbcr clk\n");
+ return ret;
+ }
+
+ wcss->qdsp6ss_axim_cbcr = devm_clk_get(wcss->dev, "lcc_axim_cbc");
+ if (IS_ERR(wcss->qdsp6ss_axim_cbcr)) {
+ ret = PTR_ERR(wcss->qdsp6ss_axim_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get axim cbcr clk\n");
+ return ret;
+ }
+
+ wcss->lcc_bcr_sleep = devm_clk_get(wcss->dev, "lcc_bcr_sleep");
+ if (IS_ERR(wcss->lcc_bcr_sleep)) {
+ ret = PTR_ERR(wcss->lcc_bcr_sleep);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get bcr cbcr clk\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int q6v5_wcss_init_regulator(struct q6v5_wcss *wcss)
+{
+ wcss->cx_supply = devm_regulator_get(wcss->dev, "cx");
+ if (IS_ERR(wcss->cx_supply))
+ return PTR_ERR(wcss->cx_supply);
+
+ regulator_set_load(wcss->cx_supply, 100000);
+
+ return 0;
+}
+
static int q6v5_wcss_probe(struct platform_device *pdev)
{
+ const struct wcss_data *desc;
struct q6v5_wcss *wcss;
struct rproc *rproc;
int ret;
- rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_wcss_ops,
- "IPQ8074/q6_fw.mdt", sizeof(*wcss));
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, desc->ops,
+ desc->firmware_name, sizeof(*wcss));
if (!rproc) {
dev_err(&pdev->dev, "failed to allocate rproc\n");
return -ENOMEM;
@@ -551,6 +1017,10 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
wcss = rproc->priv;
wcss->dev = &pdev->dev;
+ wcss->version = desc->version;
+
+ wcss->version = desc->version;
+ wcss->requires_force_stop = desc->requires_force_stop;
ret = q6v5_wcss_init_mmio(wcss, pdev);
if (ret)
@@ -560,17 +1030,33 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
if (ret)
goto free_rproc;
- ret = q6v5_wcss_init_reset(wcss);
+ if (wcss->version == WCSS_QCS404) {
+ ret = q6v5_wcss_init_clock(wcss);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_wcss_init_regulator(wcss);
+ if (ret)
+ goto free_rproc;
+ }
+
+ ret = q6v5_wcss_init_reset(wcss, desc);
if (ret)
goto free_rproc;
- ret = qcom_q6v5_init(&wcss->q6v5, pdev, rproc, WCSS_CRASH_REASON, NULL);
+ ret = qcom_q6v5_init(&wcss->q6v5, pdev, rproc, desc->crash_reason_smem,
+ NULL);
if (ret)
goto free_rproc;
qcom_add_glink_subdev(rproc, &wcss->glink_subdev, "q6wcss");
qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss");
+ if (desc->ssctl_id)
+ wcss->sysmon = qcom_add_sysmon_subdev(rproc,
+ desc->sysmon_name,
+ desc->ssctl_id);
+
ret = rproc_add(rproc);
if (ret)
goto free_rproc;
@@ -595,8 +1081,31 @@ static int q6v5_wcss_remove(struct platform_device *pdev)
return 0;
}
+static const struct wcss_data wcss_ipq8074_res_init = {
+ .firmware_name = "IPQ8074/q6_fw.mdt",
+ .crash_reason_smem = WCSS_CRASH_REASON,
+ .aon_reset_required = true,
+ .wcss_q6_reset_required = true,
+ .ops = &q6v5_wcss_ipq8074_ops,
+ .requires_force_stop = true,
+};
+
+static const struct wcss_data wcss_qcs404_res_init = {
+ .crash_reason_smem = WCSS_CRASH_REASON,
+ .firmware_name = "wcnss.mdt",
+ .version = WCSS_QCS404,
+ .aon_reset_required = false,
+ .wcss_q6_reset_required = false,
+ .ssr_name = "mpss",
+ .sysmon_name = "wcnss",
+ .ssctl_id = 0x12,
+ .ops = &q6v5_wcss_qcs404_ops,
+ .requires_force_stop = false,
+};
+
static const struct of_device_id q6v5_wcss_of_match[] = {
- { .compatible = "qcom,ipq8074-wcss-pil" },
+ { .compatible = "qcom,ipq8074-wcss-pil", .data = &wcss_ipq8074_res_init },
+ { .compatible = "qcom,qcs404-wcss-pil", .data = &wcss_qcs404_res_init },
{ },
};
MODULE_DEVICE_TABLE(of, q6v5_wcss_of_match);
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 2a6a23cb14ca..5f3455aa7e0e 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -320,7 +320,7 @@ static int wcnss_stop(struct rproc *rproc)
return ret;
}
-static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
int offset;
@@ -530,6 +530,7 @@ static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
static int wcnss_probe(struct platform_device *pdev)
{
+ const char *fw_name = WCNSS_FIRMWARE_NAME;
const struct wcnss_data *data;
struct qcom_wcnss *wcnss;
struct resource *res;
@@ -547,8 +548,13 @@ static int wcnss_probe(struct platform_device *pdev)
return -ENXIO;
}
+ ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+ &fw_name);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
- WCNSS_FIRMWARE_NAME, sizeof(*wcnss));
+ fw_name, sizeof(*wcnss));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c
index b19ea3057bde..0b8a84c04f76 100644
--- a/drivers/remoteproc/remoteproc_cdev.c
+++ b/drivers/remoteproc/remoteproc_cdev.c
@@ -32,15 +32,22 @@ static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_
return -EFAULT;
if (!strncmp(cmd, "start", len)) {
- if (rproc->state == RPROC_RUNNING)
+ if (rproc->state == RPROC_RUNNING ||
+ rproc->state == RPROC_ATTACHED)
return -EBUSY;
ret = rproc_boot(rproc);
} else if (!strncmp(cmd, "stop", len)) {
- if (rproc->state != RPROC_RUNNING)
+ if (rproc->state != RPROC_RUNNING &&
+ rproc->state != RPROC_ATTACHED)
return -EINVAL;
rproc_shutdown(rproc);
+ } else if (!strncmp(cmd, "detach", len)) {
+ if (rproc->state != RPROC_ATTACHED)
+ return -EINVAL;
+
+ ret = rproc_detach(rproc);
} else {
dev_err(&rproc->dev, "Unrecognized option\n");
ret = -EINVAL;
@@ -79,11 +86,17 @@ static long rproc_device_ioctl(struct file *filp, unsigned int ioctl, unsigned l
static int rproc_cdev_release(struct inode *inode, struct file *filp)
{
struct rproc *rproc = container_of(inode->i_cdev, struct rproc, cdev);
+ int ret = 0;
- if (rproc->cdev_put_on_release && rproc->state == RPROC_RUNNING)
+ if (!rproc->cdev_put_on_release)
+ return 0;
+
+ if (rproc->state == RPROC_RUNNING)
rproc_shutdown(rproc);
+ else if (rproc->state == RPROC_ATTACHED)
+ ret = rproc_detach(rproc);
- return 0;
+ return ret;
}
static const struct file_operations rproc_fops = {
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index ab150765d124..626a6b90fba2 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -189,13 +189,13 @@ EXPORT_SYMBOL(rproc_va_to_pa);
* here the output of the DMA API for the carveouts, which should be more
* correct.
*/
-void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct rproc_mem_entry *carveout;
void *ptr = NULL;
if (rproc->ops->da_to_va) {
- ptr = rproc->ops->da_to_va(rproc, da, len);
+ ptr = rproc->ops->da_to_va(rproc, da, len, is_iomem);
if (ptr)
goto out;
}
@@ -217,6 +217,9 @@ void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
ptr = carveout->va + offset;
+ if (is_iomem)
+ *is_iomem = carveout->is_iomem;
+
break;
}
@@ -482,7 +485,7 @@ static int copy_dma_range_map(struct device *to, struct device *from)
/**
* rproc_handle_vdev() - handle a vdev fw resource
* @rproc: the remote processor
- * @rsc: the vring resource descriptor
+ * @ptr: the vring resource descriptor
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
@@ -507,9 +510,10 @@ static int copy_dma_range_map(struct device *to, struct device *from)
*
* Returns 0 on success, or an appropriate error code otherwise
*/
-static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
+static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
int offset, int avail)
{
+ struct fw_rsc_vdev *rsc = ptr;
struct device *dev = &rproc->dev;
struct rproc_vdev *rvdev;
int i, ret;
@@ -627,7 +631,7 @@ void rproc_vdev_release(struct kref *ref)
/**
* rproc_handle_trace() - handle a shared trace buffer resource
* @rproc: the remote processor
- * @rsc: the trace resource descriptor
+ * @ptr: the trace resource descriptor
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
@@ -641,9 +645,10 @@ void rproc_vdev_release(struct kref *ref)
*
* Returns 0 on success, or an appropriate error code otherwise
*/
-static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
+static int rproc_handle_trace(struct rproc *rproc, void *ptr,
int offset, int avail)
{
+ struct fw_rsc_trace *rsc = ptr;
struct rproc_debug_trace *trace;
struct device *dev = &rproc->dev;
char name[15];
@@ -693,7 +698,7 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
/**
* rproc_handle_devmem() - handle devmem resource entry
* @rproc: remote processor handle
- * @rsc: the devmem resource entry
+ * @ptr: the devmem resource entry
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
@@ -716,9 +721,10 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
* and not allow firmwares to request access to physical addresses that
* are outside those ranges.
*/
-static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
+static int rproc_handle_devmem(struct rproc *rproc, void *ptr,
int offset, int avail)
{
+ struct fw_rsc_devmem *rsc = ptr;
struct rproc_mem_entry *mapping;
struct device *dev = &rproc->dev;
int ret;
@@ -896,7 +902,7 @@ static int rproc_release_carveout(struct rproc *rproc,
/**
* rproc_handle_carveout() - handle phys contig memory allocation requests
* @rproc: rproc handle
- * @rsc: the resource entry
+ * @ptr: the resource entry
* @offset: offset of the resource entry
* @avail: size of available data (for image validation)
*
@@ -913,9 +919,9 @@ static int rproc_release_carveout(struct rproc *rproc,
* pressure is important; it may have a substantial impact on performance.
*/
static int rproc_handle_carveout(struct rproc *rproc,
- struct fw_rsc_carveout *rsc,
- int offset, int avail)
+ void *ptr, int offset, int avail)
{
+ struct fw_rsc_carveout *rsc = ptr;
struct rproc_mem_entry *carveout;
struct device *dev = &rproc->dev;
@@ -1097,10 +1103,10 @@ EXPORT_SYMBOL(rproc_of_parse_firmware);
* enum fw_resource_type.
*/
static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
- [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
- [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
- [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
- [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
+ [RSC_CARVEOUT] = rproc_handle_carveout,
+ [RSC_DEVMEM] = rproc_handle_devmem,
+ [RSC_TRACE] = rproc_handle_trace,
+ [RSC_VDEV] = rproc_handle_vdev,
};
/* handle firmware resource entries before booting the remote processor */
@@ -1416,7 +1422,7 @@ reset_table_ptr:
return ret;
}
-static int rproc_attach(struct rproc *rproc)
+static int __rproc_attach(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret;
@@ -1444,7 +1450,7 @@ static int rproc_attach(struct rproc *rproc)
goto stop_rproc;
}
- rproc->state = RPROC_RUNNING;
+ rproc->state = RPROC_ATTACHED;
dev_info(dev, "remote processor %s is now attached\n", rproc->name);
@@ -1537,11 +1543,149 @@ disable_iommu:
return ret;
}
+static int rproc_set_rsc_table(struct rproc *rproc)
+{
+ struct resource_table *table_ptr;
+ struct device *dev = &rproc->dev;
+ size_t table_sz;
+ int ret;
+
+ table_ptr = rproc_get_loaded_rsc_table(rproc, &table_sz);
+ if (!table_ptr) {
+ /* Not having a resource table is acceptable */
+ return 0;
+ }
+
+ if (IS_ERR(table_ptr)) {
+ ret = PTR_ERR(table_ptr);
+ dev_err(dev, "can't load resource table: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * If it is possible to detach the remote processor, keep an untouched
+ * copy of the resource table. That way we can start fresh again when
+ * the remote processor is re-attached, that is:
+ *
+ * DETACHED -> ATTACHED -> DETACHED -> ATTACHED
+ *
+ * Free'd in rproc_reset_rsc_table_on_detach() and
+ * rproc_reset_rsc_table_on_stop().
+ */
+ if (rproc->ops->detach) {
+ rproc->clean_table = kmemdup(table_ptr, table_sz, GFP_KERNEL);
+ if (!rproc->clean_table)
+ return -ENOMEM;
+ } else {
+ rproc->clean_table = NULL;
+ }
+
+ rproc->cached_table = NULL;
+ rproc->table_ptr = table_ptr;
+ rproc->table_sz = table_sz;
+
+ return 0;
+}
+
+static int rproc_reset_rsc_table_on_detach(struct rproc *rproc)
+{
+ struct resource_table *table_ptr;
+
+ /* A resource table was never retrieved, nothing to do here */
+ if (!rproc->table_ptr)
+ return 0;
+
+ /*
+ * If we made it to this point a clean_table _must_ have been
+ * allocated in rproc_set_rsc_table(). If one isn't present
+ * something went really wrong and we must complain.
+ */
+ if (WARN_ON(!rproc->clean_table))
+ return -EINVAL;
+
+ /* Remember where the external entity installed the resource table */
+ table_ptr = rproc->table_ptr;
+
+ /*
+ * If we made it here the remote processor was started by another
+ * entity and a cache table doesn't exist. As such make a copy of
+ * the resource table currently used by the remote processor and
+ * use that for the rest of the shutdown process. The memory
+ * allocated here is free'd in rproc_detach().
+ */
+ rproc->cached_table = kmemdup(rproc->table_ptr,
+ rproc->table_sz, GFP_KERNEL);
+ if (!rproc->cached_table)
+ return -ENOMEM;
+
+ /*
+ * Use a copy of the resource table for the remainder of the
+ * shutdown process.
+ */
+ rproc->table_ptr = rproc->cached_table;
+
+ /*
+ * Reset the memory area where the firmware loaded the resource table
+ * to its original value. That way when we re-attach the remote
+ * processor the resource table is clean and ready to be used again.
+ */
+ memcpy(table_ptr, rproc->clean_table, rproc->table_sz);
+
+ /*
+ * The clean resource table is no longer needed. Allocated in
+ * rproc_set_rsc_table().
+ */
+ kfree(rproc->clean_table);
+
+ return 0;
+}
+
+static int rproc_reset_rsc_table_on_stop(struct rproc *rproc)
+{
+ /* A resource table was never retrieved, nothing to do here */
+ if (!rproc->table_ptr)
+ return 0;
+
+ /*
+ * If a cache table exists the remote processor was started by
+ * the remoteproc core. That cache table should be used for
+ * the rest of the shutdown process.
+ */
+ if (rproc->cached_table)
+ goto out;
+
+ /*
+ * If we made it here the remote processor was started by another
+ * entity and a cache table doesn't exist. As such make a copy of
+ * the resource table currently used by the remote processor and
+ * use that for the rest of the shutdown process. The memory
+ * allocated here is free'd in rproc_shutdown().
+ */
+ rproc->cached_table = kmemdup(rproc->table_ptr,
+ rproc->table_sz, GFP_KERNEL);
+ if (!rproc->cached_table)
+ return -ENOMEM;
+
+ /*
+ * Since the remote processor is being switched off the clean table
+ * won't be needed. Allocated in rproc_set_rsc_table().
+ */
+ kfree(rproc->clean_table);
+
+out:
+ /*
+ * Use a copy of the resource table for the remainder of the
+ * shutdown process.
+ */
+ rproc->table_ptr = rproc->cached_table;
+ return 0;
+}
+
/*
* Attach to remote processor - similar to rproc_fw_boot() but without
* the steps that deal with the firmware image.
*/
-static int rproc_actuate(struct rproc *rproc)
+static int rproc_attach(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret;
@@ -1556,6 +1700,19 @@ static int rproc_actuate(struct rproc *rproc)
return ret;
}
+ /* Do anything that is needed to boot the remote processor */
+ ret = rproc_prepare_device(rproc);
+ if (ret) {
+ dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret);
+ goto disable_iommu;
+ }
+
+ ret = rproc_set_rsc_table(rproc);
+ if (ret) {
+ dev_err(dev, "can't load resource table: %d\n", ret);
+ goto unprepare_device;
+ }
+
/* reset max_notifyid */
rproc->max_notifyid = -1;
@@ -1570,7 +1727,7 @@ static int rproc_actuate(struct rproc *rproc)
ret = rproc_handle_resources(rproc, rproc_loading_handlers);
if (ret) {
dev_err(dev, "Failed to process resources: %d\n", ret);
- goto disable_iommu;
+ goto unprepare_device;
}
/* Allocate carveout resources associated to rproc */
@@ -1581,7 +1738,7 @@ static int rproc_actuate(struct rproc *rproc)
goto clean_up_resources;
}
- ret = rproc_attach(rproc);
+ ret = __rproc_attach(rproc);
if (ret)
goto clean_up_resources;
@@ -1589,6 +1746,9 @@ static int rproc_actuate(struct rproc *rproc)
clean_up_resources:
rproc_resource_cleanup(rproc);
+unprepare_device:
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
disable_iommu:
rproc_disable_iommu(rproc);
return ret;
@@ -1642,11 +1802,20 @@ static int rproc_stop(struct rproc *rproc, bool crashed)
struct device *dev = &rproc->dev;
int ret;
+ /* No need to continue if a stop() operation has not been provided */
+ if (!rproc->ops->stop)
+ return -EINVAL;
+
/* Stop any subdevices for the remote processor */
rproc_stop_subdevices(rproc, crashed);
/* the installed resource table is no longer accessible */
- rproc->table_ptr = rproc->cached_table;
+ ret = rproc_reset_rsc_table_on_stop(rproc);
+ if (ret) {
+ dev_err(dev, "can't reset resource table: %d\n", ret);
+ return ret;
+ }
+
/* power off the remote processor */
ret = rproc->ops->stop(rproc);
@@ -1659,19 +1828,48 @@ static int rproc_stop(struct rproc *rproc, bool crashed)
rproc->state = RPROC_OFFLINE;
- /*
- * The remote processor has been stopped and is now offline, which means
- * that the next time it is brought back online the remoteproc core will
- * be responsible to load its firmware. As such it is no longer
- * autonomous.
- */
- rproc->autonomous = false;
-
dev_info(dev, "stopped remote processor %s\n", rproc->name);
return 0;
}
+/*
+ * __rproc_detach(): Does the opposite of __rproc_attach()
+ */
+static int __rproc_detach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ /* No need to continue if a detach() operation has not been provided */
+ if (!rproc->ops->detach)
+ return -EINVAL;
+
+ /* Stop any subdevices for the remote processor */
+ rproc_stop_subdevices(rproc, false);
+
+ /* the installed resource table is no longer accessible */
+ ret = rproc_reset_rsc_table_on_detach(rproc);
+ if (ret) {
+ dev_err(dev, "can't reset resource table: %d\n", ret);
+ return ret;
+ }
+
+ /* Tell the remote processor the core isn't available anymore */
+ ret = rproc->ops->detach(rproc);
+ if (ret) {
+ dev_err(dev, "can't detach from rproc: %d\n", ret);
+ return ret;
+ }
+
+ rproc_unprepare_subdevices(rproc);
+
+ rproc->state = RPROC_DETACHED;
+
+ dev_info(dev, "detached remote processor %s\n", rproc->name);
+
+ return 0;
+}
/**
* rproc_trigger_recovery() - recover a remoteproc
@@ -1802,7 +2000,7 @@ int rproc_boot(struct rproc *rproc)
if (rproc->state == RPROC_DETACHED) {
dev_info(dev, "attaching to %s\n", rproc->name);
- ret = rproc_actuate(rproc);
+ ret = rproc_attach(rproc);
} else {
dev_info(dev, "powering up %s\n", rproc->name);
@@ -1885,6 +2083,65 @@ out:
EXPORT_SYMBOL(rproc_shutdown);
/**
+ * rproc_detach() - Detach the remote processor from the
+ * remoteproc core
+ *
+ * @rproc: the remote processor
+ *
+ * Detach a remote processor (previously attached to with rproc_attach()).
+ *
+ * In case @rproc is still being used by an additional user(s), then
+ * this function will just decrement the power refcount and exit,
+ * without disconnecting the device.
+ *
+ * Function rproc_detach() calls __rproc_detach() in order to let a remote
+ * processor know that services provided by the application processor are
+ * no longer available. From there it should be possible to remove the
+ * platform driver and even power cycle the application processor (if the HW
+ * supports it) without needing to switch off the remote processor.
+ */
+int rproc_detach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
+ return ret;
+ }
+
+ /* if the remote proc is still needed, bail out */
+ if (!atomic_dec_and_test(&rproc->power)) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = __rproc_detach(rproc);
+ if (ret) {
+ atomic_inc(&rproc->power);
+ goto out;
+ }
+
+ /* clean up all acquired resources */
+ rproc_resource_cleanup(rproc);
+
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
+
+ rproc_disable_iommu(rproc);
+
+ /* Free the copy of the resource table */
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
+ rproc->table_ptr = NULL;
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+EXPORT_SYMBOL(rproc_detach);
+
+/**
* rproc_get_by_phandle() - find a remote processor by phandle
* @phandle: phandle to the rproc
*
@@ -2077,16 +2334,6 @@ int rproc_add(struct rproc *rproc)
if (ret < 0)
return ret;
- /*
- * Remind ourselves the remote processor has been attached to rather
- * than booted by the remoteproc core. This is important because the
- * RPROC_DETACHED state will be lost as soon as the remote processor
- * has been attached to. Used in firmware_show() and reset in
- * rproc_stop().
- */
- if (rproc->state == RPROC_DETACHED)
- rproc->autonomous = true;
-
/* if rproc is marked always-on, request it to boot */
if (rproc->auto_boot) {
ret = rproc_trigger_auto_boot(rproc);
@@ -2347,10 +2594,8 @@ int rproc_del(struct rproc *rproc)
if (!rproc)
return -EINVAL;
- /* if rproc is marked always-on, rproc_add() booted it */
/* TODO: make sure this works with rproc->power > 1 */
- if (rproc->auto_boot)
- rproc_shutdown(rproc);
+ rproc_shutdown(rproc);
mutex_lock(&rproc->lock);
rproc->state = RPROC_DELETED;
@@ -2492,7 +2737,11 @@ static int rproc_panic_handler(struct notifier_block *nb, unsigned long event,
rcu_read_lock();
list_for_each_entry_rcu(rproc, &rproc_list, node) {
- if (!rproc->ops->panic || rproc->state != RPROC_RUNNING)
+ if (!rproc->ops->panic)
+ continue;
+
+ if (rproc->state != RPROC_RUNNING &&
+ rproc->state != RPROC_ATTACHED)
continue;
d = rproc->ops->panic(rproc);
diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c
index 81ec154a6a5e..aee657cc08c6 100644
--- a/drivers/remoteproc/remoteproc_coredump.c
+++ b/drivers/remoteproc/remoteproc_coredump.c
@@ -153,18 +153,22 @@ static void rproc_copy_segment(struct rproc *rproc, void *dest,
size_t offset, size_t size)
{
void *ptr;
+ bool is_iomem;
if (segment->dump) {
segment->dump(rproc, segment, dest, offset, size);
} else {
- ptr = rproc_da_to_va(rproc, segment->da + offset, size);
+ ptr = rproc_da_to_va(rproc, segment->da + offset, size, &is_iomem);
if (!ptr) {
dev_err(&rproc->dev,
"invalid copy request for segment %pad with offset %zu and size %zu)\n",
&segment->da, offset, size);
memset(dest, 0xff, size);
} else {
- memcpy(dest, ptr, size);
+ if (is_iomem)
+ memcpy_fromio(dest, ptr, size);
+ else
+ memcpy(dest, ptr, size);
}
}
}
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 7e5845376e9f..b5a1e3b697d9 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -132,7 +132,7 @@ static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf,
char buf[100];
int len;
- va = rproc_da_to_va(data->rproc, trace->da, trace->len);
+ va = rproc_da_to_va(data->rproc, trace->da, trace->len, NULL);
if (!va) {
len = scnprintf(buf, sizeof(buf), "Trace %s not available\n",
diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c
index df68d87752e4..11423588965a 100644
--- a/drivers/remoteproc/remoteproc_elf_loader.c
+++ b/drivers/remoteproc/remoteproc_elf_loader.c
@@ -175,6 +175,7 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
u64 offset = elf_phdr_get_p_offset(class, phdr);
u32 type = elf_phdr_get_p_type(class, phdr);
void *ptr;
+ bool is_iomem;
if (type != PT_LOAD)
continue;
@@ -204,7 +205,7 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
}
/* grab the kernel address for this device address */
- ptr = rproc_da_to_va(rproc, da, memsz);
+ ptr = rproc_da_to_va(rproc, da, memsz, &is_iomem);
if (!ptr) {
dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da,
memsz);
@@ -213,8 +214,12 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
}
/* put the segment where the remote processor expects it */
- if (filesz)
- memcpy(ptr, elf_data + offset, filesz);
+ if (filesz) {
+ if (is_iomem)
+ memcpy_fromio(ptr, (void __iomem *)(elf_data + offset), filesz);
+ else
+ memcpy(ptr, elf_data + offset, filesz);
+ }
/*
* Zero out remaining memory for this segment.
@@ -223,8 +228,12 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
* did this for us. albeit harmless, we may consider removing
* this.
*/
- if (memsz > filesz)
- memset(ptr + filesz, 0, memsz - filesz);
+ if (memsz > filesz) {
+ if (is_iomem)
+ memset_io((void __iomem *)(ptr + filesz), 0, memsz - filesz);
+ else
+ memset(ptr + filesz, 0, memsz - filesz);
+ }
}
return ret;
@@ -377,6 +386,6 @@ struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
return NULL;
}
- return rproc_da_to_va(rproc, sh_addr, sh_size);
+ return rproc_da_to_va(rproc, sh_addr, sh_size, NULL);
}
EXPORT_SYMBOL(rproc_elf_find_loaded_rsc_table);
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index c34002888d2c..a328e634b1de 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -84,7 +84,7 @@ static inline void rproc_char_device_remove(struct rproc *rproc)
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
-void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len);
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
phys_addr_t rproc_va_to_pa(void *cpu_addr);
int rproc_trigger_recovery(struct rproc *rproc);
@@ -178,6 +178,16 @@ struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc,
}
static inline
+struct resource_table *rproc_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *size)
+{
+ if (rproc->ops->get_loaded_rsc_table)
+ return rproc->ops->get_loaded_rsc_table(rproc, size);
+
+ return NULL;
+}
+
+static inline
bool rproc_u64_fit_in_size_t(u64 val)
{
if (sizeof(size_t) == sizeof(u64))
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index 1dbef895e65e..ea8b89f97d7b 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -15,7 +15,7 @@ static ssize_t recovery_show(struct device *dev,
{
struct rproc *rproc = to_rproc(dev);
- return sprintf(buf, "%s", rproc->recovery_disabled ? "disabled\n" : "enabled\n");
+ return sysfs_emit(buf, "%s", rproc->recovery_disabled ? "disabled\n" : "enabled\n");
}
/*
@@ -82,7 +82,7 @@ static ssize_t coredump_show(struct device *dev,
{
struct rproc *rproc = to_rproc(dev);
- return sprintf(buf, "%s\n", rproc_coredump_str[rproc->dump_conf]);
+ return sysfs_emit(buf, "%s\n", rproc_coredump_str[rproc->dump_conf]);
}
/*
@@ -138,11 +138,8 @@ static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
* If the remote processor has been started by an external
* entity we have no idea of what image it is running. As such
* simply display a generic string rather then rproc->firmware.
- *
- * Here we rely on the autonomous flag because a remote processor
- * may have been attached to and currently in a running state.
*/
- if (rproc->autonomous)
+ if (rproc->state == RPROC_ATTACHED)
firmware = "unknown";
return sprintf(buf, "%s\n", firmware);
@@ -172,6 +169,7 @@ static const char * const rproc_state_string[] = {
[RPROC_RUNNING] = "running",
[RPROC_CRASHED] = "crashed",
[RPROC_DELETED] = "deleted",
+ [RPROC_ATTACHED] = "attached",
[RPROC_DETACHED] = "detached",
[RPROC_LAST] = "invalid",
};
@@ -196,17 +194,24 @@ static ssize_t state_store(struct device *dev,
int ret = 0;
if (sysfs_streq(buf, "start")) {
- if (rproc->state == RPROC_RUNNING)
+ if (rproc->state == RPROC_RUNNING ||
+ rproc->state == RPROC_ATTACHED)
return -EBUSY;
ret = rproc_boot(rproc);
if (ret)
dev_err(&rproc->dev, "Boot failed: %d\n", ret);
} else if (sysfs_streq(buf, "stop")) {
- if (rproc->state != RPROC_RUNNING)
+ if (rproc->state != RPROC_RUNNING &&
+ rproc->state != RPROC_ATTACHED)
return -EINVAL;
rproc_shutdown(rproc);
+ } else if (sysfs_streq(buf, "detach")) {
+ if (rproc->state != RPROC_ATTACHED)
+ return -EINVAL;
+
+ ret = rproc_detach(rproc);
} else {
dev_err(&rproc->dev, "Unrecognised option: %s\n", buf);
ret = -EINVAL;
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index 09bcb4d8b9e0..22096adc1ad3 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -174,7 +174,7 @@ static int slim_rproc_stop(struct rproc *rproc)
return 0;
}
-static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct st_slim_rproc *slim_rproc = rproc->priv;
void *va = NULL;
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index ccb3c14a0023..7353f9e7e7af 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -28,7 +28,7 @@
#define RELEASE_BOOT 1
#define MBOX_NB_VQ 2
-#define MBOX_NB_MBX 3
+#define MBOX_NB_MBX 4
#define STM32_SMC_RCC 0x82001000
#define STM32_SMC_REG_WRITE 0x1
@@ -38,6 +38,7 @@
#define STM32_MBX_VQ1 "vq1"
#define STM32_MBX_VQ1_ID 1
#define STM32_MBX_SHUTDOWN "shutdown"
+#define STM32_MBX_DETACH "detach"
#define RSC_TBL_SIZE 1024
@@ -207,16 +208,7 @@ static int stm32_rproc_mbox_idx(struct rproc *rproc, const unsigned char *name)
return -EINVAL;
}
-static int stm32_rproc_elf_load_rsc_table(struct rproc *rproc,
- const struct firmware *fw)
-{
- if (rproc_elf_load_rsc_table(rproc, fw))
- dev_warn(&rproc->dev, "no resource table found for this firmware\n");
-
- return 0;
-}
-
-static int stm32_rproc_parse_memory_regions(struct rproc *rproc)
+static int stm32_rproc_prepare(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
@@ -274,12 +266,10 @@ static int stm32_rproc_parse_memory_regions(struct rproc *rproc)
static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
{
- int ret = stm32_rproc_parse_memory_regions(rproc);
-
- if (ret)
- return ret;
+ if (rproc_elf_load_rsc_table(rproc, fw))
+ dev_warn(&rproc->dev, "no resource table found for this firmware\n");
- return stm32_rproc_elf_load_rsc_table(rproc, fw);
+ return 0;
}
static irqreturn_t stm32_rproc_wdg(int irq, void *data)
@@ -347,6 +337,15 @@ static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = {
.tx_done = NULL,
.tx_tout = 500, /* 500 ms time out */
},
+ },
+ {
+ .name = STM32_MBX_DETACH,
+ .vq_id = -1,
+ .client = {
+ .tx_block = true,
+ .tx_done = NULL,
+ .tx_tout = 200, /* 200 ms time out to detach should be fair enough */
+ },
}
};
@@ -472,6 +471,25 @@ static int stm32_rproc_attach(struct rproc *rproc)
return stm32_rproc_set_hold_boot(rproc, true);
}
+static int stm32_rproc_detach(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, dummy_data, idx;
+
+ /* Inform the remote processor of the detach */
+ idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_DETACH);
+ if (idx >= 0 && ddata->mb[idx].chan) {
+ /* A dummy data is sent to allow to block on transmit */
+ err = mbox_send_message(ddata->mb[idx].chan,
+ &dummy_data);
+ if (err < 0)
+ dev_warn(&rproc->dev, "warning: remote FW detach without ack\n");
+ }
+
+ /* Allow remote processor to auto-reboot */
+ return stm32_rproc_set_hold_boot(rproc, false);
+}
+
static int stm32_rproc_stop(struct rproc *rproc)
{
struct stm32_rproc *ddata = rproc->priv;
@@ -546,14 +564,89 @@ static void stm32_rproc_kick(struct rproc *rproc, int vqid)
}
}
+static int stm32_rproc_da_to_pa(struct rproc *rproc,
+ u64 da, phys_addr_t *pa)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ struct stm32_rproc_mem *p_mem;
+ unsigned int i;
+
+ for (i = 0; i < ddata->nb_rmems; i++) {
+ p_mem = &ddata->rmems[i];
+
+ if (da < p_mem->dev_addr ||
+ da >= p_mem->dev_addr + p_mem->size)
+ continue;
+
+ *pa = da - p_mem->dev_addr + p_mem->bus_addr;
+ dev_dbg(dev, "da %llx to pa %#x\n", da, *pa);
+
+ return 0;
+ }
+
+ dev_err(dev, "can't translate da %llx\n", da);
+
+ return -EINVAL;
+}
+
+static struct resource_table *
+stm32_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ phys_addr_t rsc_pa;
+ u32 rsc_da;
+ int err;
+
+ /* The resource table has already been mapped, nothing to do */
+ if (ddata->rsc_va)
+ goto done;
+
+ err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da);
+ if (err) {
+ dev_err(dev, "failed to read rsc tbl addr\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!rsc_da)
+ /* no rsc table */
+ return ERR_PTR(-ENOENT);
+
+ err = stm32_rproc_da_to_pa(rproc, rsc_da, &rsc_pa);
+ if (err)
+ return ERR_PTR(err);
+
+ ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE);
+ if (IS_ERR_OR_NULL(ddata->rsc_va)) {
+ dev_err(dev, "Unable to map memory region: %pa+%zx\n",
+ &rsc_pa, RSC_TBL_SIZE);
+ ddata->rsc_va = NULL;
+ return ERR_PTR(-ENOMEM);
+ }
+
+done:
+ /*
+ * Assuming the resource table fits in 1kB is fair.
+ * Notice for the detach, that this 1 kB memory area has to be reserved in the coprocessor
+ * firmware for the resource table. On detach, the remoteproc core re-initializes this
+ * entire area by overwriting it with the initial values stored in rproc->clean_table.
+ */
+ *table_sz = RSC_TBL_SIZE;
+ return (struct resource_table *)ddata->rsc_va;
+}
+
static const struct rproc_ops st_rproc_ops = {
+ .prepare = stm32_rproc_prepare,
.start = stm32_rproc_start,
.stop = stm32_rproc_stop,
.attach = stm32_rproc_attach,
+ .detach = stm32_rproc_detach,
.kick = stm32_rproc_kick,
.load = rproc_elf_load_segments,
.parse_fw = stm32_rproc_parse_fw,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .get_loaded_rsc_table = stm32_rproc_get_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
@@ -695,75 +788,6 @@ static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata,
return regmap_read(ddata->m4_state.map, ddata->m4_state.reg, state);
}
-static int stm32_rproc_da_to_pa(struct platform_device *pdev,
- struct stm32_rproc *ddata,
- u64 da, phys_addr_t *pa)
-{
- struct device *dev = &pdev->dev;
- struct stm32_rproc_mem *p_mem;
- unsigned int i;
-
- for (i = 0; i < ddata->nb_rmems; i++) {
- p_mem = &ddata->rmems[i];
-
- if (da < p_mem->dev_addr ||
- da >= p_mem->dev_addr + p_mem->size)
- continue;
-
- *pa = da - p_mem->dev_addr + p_mem->bus_addr;
- dev_dbg(dev, "da %llx to pa %#x\n", da, *pa);
-
- return 0;
- }
-
- dev_err(dev, "can't translate da %llx\n", da);
-
- return -EINVAL;
-}
-
-static int stm32_rproc_get_loaded_rsc_table(struct platform_device *pdev,
- struct rproc *rproc,
- struct stm32_rproc *ddata)
-{
- struct device *dev = &pdev->dev;
- phys_addr_t rsc_pa;
- u32 rsc_da;
- int err;
-
- err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da);
- if (err) {
- dev_err(dev, "failed to read rsc tbl addr\n");
- return err;
- }
-
- if (!rsc_da)
- /* no rsc table */
- return 0;
-
- err = stm32_rproc_da_to_pa(pdev, ddata, rsc_da, &rsc_pa);
- if (err)
- return err;
-
- ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE);
- if (IS_ERR_OR_NULL(ddata->rsc_va)) {
- dev_err(dev, "Unable to map memory region: %pa+%zx\n",
- &rsc_pa, RSC_TBL_SIZE);
- ddata->rsc_va = NULL;
- return -ENOMEM;
- }
-
- /*
- * The resource table is already loaded in device memory, no need
- * to work with a cached table.
- */
- rproc->cached_table = NULL;
- /* Assuming the resource table fits in 1kB is fair */
- rproc->table_sz = RSC_TBL_SIZE;
- rproc->table_ptr = (struct resource_table *)ddata->rsc_va;
-
- return 0;
-}
-
static int stm32_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -797,18 +821,9 @@ static int stm32_rproc_probe(struct platform_device *pdev)
if (ret)
goto free_rproc;
- if (state == M4_STATE_CRUN) {
+ if (state == M4_STATE_CRUN)
rproc->state = RPROC_DETACHED;
- ret = stm32_rproc_parse_memory_regions(rproc);
- if (ret)
- goto free_resources;
-
- ret = stm32_rproc_get_loaded_rsc_table(pdev, rproc, ddata);
- if (ret)
- goto free_resources;
- }
-
rproc->has_iommu = false;
ddata->workqueue = create_workqueue(dev_name(dev));
if (!ddata->workqueue) {
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index 863c0214e0a8..fd4eb67a6681 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -354,7 +354,7 @@ static int k3_dsp_rproc_stop(struct rproc *rproc)
* can be used either by the remoteproc core for loading (when using kernel
* remoteproc loader), or by any rpmsg bus drivers.
*/
-static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct k3_dsp_rproc *kproc = rproc->priv;
void __iomem *va = NULL;
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 62b5a4c29456..5cf8d030a1f0 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -590,7 +590,7 @@ out:
* present in a DSP or IPU device). The translated addresses can be used
* either by the remoteproc core for loading, or by any rpmsg bus drivers.
*/
-static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct k3_r5_core *core = kproc->core;
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index 92d387dfc03b..484f7605823e 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -89,7 +89,7 @@ static int wkup_m3_rproc_stop(struct rproc *rproc)
return error;
}
-static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct wkup_m3_rproc *wkupm3 = rproc->priv;
void *va = NULL;
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 7043c7f6dcf0..3e7f55e44d84 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -197,6 +197,7 @@ config RESET_SIMPLE
- RCC reset controller in STM32 MCUs
- Allwinner SoCs
- ZTE's zx2967 family
+ - SiFive FU740 SoCs
config RESET_STM32MP157
bool "STM32MP157 Reset Driver" if COMPILE_TEST
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index dbf881b586d9..71c1c8264b2d 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -359,6 +359,30 @@ int reset_control_reset(struct reset_control *rstc)
EXPORT_SYMBOL_GPL(reset_control_reset);
/**
+ * reset_control_bulk_reset - reset the controlled devices in order
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset controls set
+ *
+ * Issue a reset on all provided reset controls, in order.
+ *
+ * See also: reset_control_reset()
+ */
+int reset_control_bulk_reset(int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ int ret, i;
+
+ for (i = 0; i < num_rstcs; i++) {
+ ret = reset_control_reset(rstcs[i].rstc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(reset_control_bulk_reset);
+
+/**
* reset_control_rearm - allow shared reset line to be re-triggered"
* @rstc: reset controller
*
@@ -462,6 +486,36 @@ int reset_control_assert(struct reset_control *rstc)
EXPORT_SYMBOL_GPL(reset_control_assert);
/**
+ * reset_control_bulk_assert - asserts the reset lines in order
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset controls set
+ *
+ * Assert the reset lines for all provided reset controls, in order.
+ * If an assertion fails, already asserted resets are deasserted again.
+ *
+ * See also: reset_control_assert()
+ */
+int reset_control_bulk_assert(int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ int ret, i;
+
+ for (i = 0; i < num_rstcs; i++) {
+ ret = reset_control_assert(rstcs[i].rstc);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ while (i--)
+ reset_control_deassert(rstcs[i].rstc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(reset_control_bulk_assert);
+
+/**
* reset_control_deassert - deasserts the reset line
* @rstc: reset controller
*
@@ -512,6 +566,36 @@ int reset_control_deassert(struct reset_control *rstc)
EXPORT_SYMBOL_GPL(reset_control_deassert);
/**
+ * reset_control_bulk_deassert - deasserts the reset lines in reverse order
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset controls set
+ *
+ * Deassert the reset lines for all provided reset controls, in reverse order.
+ * If a deassertion fails, already deasserted resets are asserted again.
+ *
+ * See also: reset_control_deassert()
+ */
+int reset_control_bulk_deassert(int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ int ret, i;
+
+ for (i = num_rstcs - 1; i >= 0; i--) {
+ ret = reset_control_deassert(rstcs[i].rstc);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ while (i < num_rstcs)
+ reset_control_assert(rstcs[i++].rstc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(reset_control_bulk_deassert);
+
+/**
* reset_control_status - returns a negative errno if not supported, a
* positive value if the reset line is asserted, or zero if the reset
* line is not asserted or if the desc is NULL (optional reset).
@@ -589,6 +673,36 @@ int reset_control_acquire(struct reset_control *rstc)
EXPORT_SYMBOL_GPL(reset_control_acquire);
/**
+ * reset_control_bulk_acquire - acquires reset controls for exclusive use
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset controls set
+ *
+ * This is used to explicitly acquire reset controls requested with
+ * reset_control_bulk_get_exclusive_release() for temporary exclusive use.
+ *
+ * See also: reset_control_acquire(), reset_control_bulk_release()
+ */
+int reset_control_bulk_acquire(int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ int ret, i;
+
+ for (i = 0; i < num_rstcs; i++) {
+ ret = reset_control_acquire(rstcs[i].rstc);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ while (i--)
+ reset_control_release(rstcs[i].rstc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(reset_control_bulk_acquire);
+
+/**
* reset_control_release() - releases exclusive access to a reset control
* @rstc: reset control
*
@@ -610,6 +724,26 @@ void reset_control_release(struct reset_control *rstc)
}
EXPORT_SYMBOL_GPL(reset_control_release);
+/**
+ * reset_control_bulk_release() - releases exclusive access to reset controls
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset controls set
+ *
+ * Releases exclusive access right to reset controls previously obtained by a
+ * call to reset_control_bulk_acquire().
+ *
+ * See also: reset_control_release(), reset_control_bulk_acquire()
+ */
+void reset_control_bulk_release(int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ int i;
+
+ for (i = 0; i < num_rstcs; i++)
+ reset_control_release(rstcs[i].rstc);
+}
+EXPORT_SYMBOL_GPL(reset_control_bulk_release);
+
static struct reset_control *__reset_control_get_internal(
struct reset_controller_dev *rcdev,
unsigned int index, bool shared, bool acquired)
@@ -814,6 +948,32 @@ struct reset_control *__reset_control_get(struct device *dev, const char *id,
}
EXPORT_SYMBOL_GPL(__reset_control_get);
+int __reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired)
+{
+ int ret, i;
+
+ for (i = 0; i < num_rstcs; i++) {
+ rstcs[i].rstc = __reset_control_get(dev, rstcs[i].id, 0,
+ shared, optional, acquired);
+ if (IS_ERR(rstcs[i].rstc)) {
+ ret = PTR_ERR(rstcs[i].rstc);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ mutex_lock(&reset_list_mutex);
+ while (i--)
+ __reset_control_put_internal(rstcs[i].rstc);
+ mutex_unlock(&reset_list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__reset_control_bulk_get);
+
static void reset_control_array_put(struct reset_control_array *resets)
{
int i;
@@ -845,6 +1005,23 @@ void reset_control_put(struct reset_control *rstc)
}
EXPORT_SYMBOL_GPL(reset_control_put);
+/**
+ * reset_control_bulk_put - free the reset controllers
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset controls set
+ */
+void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+ mutex_lock(&reset_list_mutex);
+ while (num_rstcs--) {
+ if (IS_ERR_OR_NULL(rstcs[num_rstcs].rstc))
+ continue;
+ __reset_control_put_internal(rstcs[num_rstcs].rstc);
+ }
+ mutex_unlock(&reset_list_mutex);
+}
+EXPORT_SYMBOL_GPL(reset_control_bulk_put);
+
static void devm_reset_control_release(struct device *dev, void *res)
{
reset_control_put(*(struct reset_control **)res);
@@ -874,6 +1051,44 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
}
EXPORT_SYMBOL_GPL(__devm_reset_control_get);
+struct reset_control_bulk_devres {
+ int num_rstcs;
+ struct reset_control_bulk_data *rstcs;
+};
+
+static void devm_reset_control_bulk_release(struct device *dev, void *res)
+{
+ struct reset_control_bulk_devres *devres = res;
+
+ reset_control_bulk_put(devres->num_rstcs, devres->rstcs);
+}
+
+int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired)
+{
+ struct reset_control_bulk_devres *ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_reset_control_bulk_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = __reset_control_bulk_get(dev, num_rstcs, rstcs, shared, optional, acquired);
+ if (ret < 0) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ ptr->num_rstcs = num_rstcs;
+ ptr->rstcs = rstcs;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__devm_reset_control_bulk_get);
+
/**
* __device_reset - find reset controller associated with the device
* and perform reset
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 27a05167c18c..05533c71b10e 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -857,6 +857,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
dev_err(glink->dev,
"no intent found for channel %s intent %d",
channel->name, liid);
+ ret = -ENOENT;
goto advance_rx;
}
}
@@ -1332,6 +1333,20 @@ static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len)
return __qcom_glink_send(channel, data, len, false);
}
+static int qcom_glink_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
+{
+ struct glink_channel *channel = to_glink_channel(ept);
+
+ return __qcom_glink_send(channel, data, len, true);
+}
+
+static int qcom_glink_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
+{
+ struct glink_channel *channel = to_glink_channel(ept);
+
+ return __qcom_glink_send(channel, data, len, false);
+}
+
/*
* Finds the device_node for the glink child interested in this channel.
*/
@@ -1364,7 +1379,9 @@ static const struct rpmsg_device_ops glink_device_ops = {
static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
.destroy_ept = qcom_glink_destroy_ept,
.send = qcom_glink_send,
+ .sendto = qcom_glink_sendto,
.trysend = qcom_glink_trysend,
+ .trysendto = qcom_glink_trysendto,
};
static void qcom_glink_rpdev_release(struct device *dev)
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 19903de6268d..8da1b5cb31b3 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -974,6 +974,20 @@ static int qcom_smd_trysend(struct rpmsg_endpoint *ept, void *data, int len)
return __qcom_smd_send(qsept->qsch, data, len, false);
}
+static int qcom_smd_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
+{
+ struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
+
+ return __qcom_smd_send(qsept->qsch, data, len, true);
+}
+
+static int qcom_smd_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
+{
+ struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
+
+ return __qcom_smd_send(qsept->qsch, data, len, false);
+}
+
static __poll_t qcom_smd_poll(struct rpmsg_endpoint *ept,
struct file *filp, poll_table *wait)
{
@@ -1038,7 +1052,9 @@ static const struct rpmsg_device_ops qcom_smd_device_ops = {
static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops = {
.destroy_ept = qcom_smd_destroy_ept,
.send = qcom_smd_send,
+ .sendto = qcom_smd_sendto,
.trysend = qcom_smd_trysend,
+ .trysendto = qcom_smd_trysendto,
.poll = qcom_smd_poll,
};
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index 4bbbacdbf3bb..2bebc9b2d163 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -127,6 +127,9 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
struct rpmsg_device *rpdev = eptdev->rpdev;
struct device *dev = &eptdev->dev;
+ if (eptdev->ept)
+ return -EBUSY;
+
get_device(dev);
ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo);
@@ -239,9 +242,9 @@ static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb,
}
if (filp->f_flags & O_NONBLOCK)
- ret = rpmsg_trysend(eptdev->ept, kbuf, len);
+ ret = rpmsg_trysendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
else
- ret = rpmsg_send(eptdev->ept, kbuf, len);
+ ret = rpmsg_sendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
unlock_eptdev:
mutex_unlock(&eptdev->ept_lock);
@@ -543,7 +546,7 @@ static struct rpmsg_driver rpmsg_chrdev_driver = {
},
};
-static int rpmsg_char_init(void)
+static int rpmsg_chrdev_init(void)
{
int ret;
@@ -569,7 +572,7 @@ static int rpmsg_char_init(void)
return ret;
}
-postcore_initcall(rpmsg_char_init);
+postcore_initcall(rpmsg_chrdev_init);
static void rpmsg_chrdev_exit(void)
{
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index e87d4cf926eb..8e49a3bacfc7 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -813,14 +813,57 @@ static void rpmsg_xmit_done(struct virtqueue *svq)
wake_up_interruptible(&vrp->sendq);
}
+/*
+ * Called to expose to user a /dev/rpmsg_ctrlX interface allowing to
+ * create endpoint-to-endpoint communication without associated RPMsg channel.
+ * The endpoints are rattached to the ctrldev RPMsg device.
+ */
+static struct rpmsg_device *rpmsg_virtio_add_ctrl_dev(struct virtio_device *vdev)
+{
+ struct virtproc_info *vrp = vdev->priv;
+ struct virtio_rpmsg_channel *vch;
+ struct rpmsg_device *rpdev_ctrl;
+ int err = 0;
+
+ vch = kzalloc(sizeof(*vch), GFP_KERNEL);
+ if (!vch)
+ return ERR_PTR(-ENOMEM);
+
+ /* Link the channel to the vrp */
+ vch->vrp = vrp;
+
+ /* Assign public information to the rpmsg_device */
+ rpdev_ctrl = &vch->rpdev;
+ rpdev_ctrl->ops = &virtio_rpmsg_ops;
+
+ rpdev_ctrl->dev.parent = &vrp->vdev->dev;
+ rpdev_ctrl->dev.release = virtio_rpmsg_release_device;
+ rpdev_ctrl->little_endian = virtio_is_little_endian(vrp->vdev);
+
+ err = rpmsg_chrdev_register_device(rpdev_ctrl);
+ if (err) {
+ kfree(vch);
+ return ERR_PTR(err);
+ }
+
+ return rpdev_ctrl;
+}
+
+static void rpmsg_virtio_del_ctrl_dev(struct rpmsg_device *rpdev_ctrl)
+{
+ if (!rpdev_ctrl)
+ return;
+ kfree(to_virtio_rpmsg_channel(rpdev_ctrl));
+}
+
static int rpmsg_probe(struct virtio_device *vdev)
{
vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
static const char * const names[] = { "input", "output" };
struct virtqueue *vqs[2];
struct virtproc_info *vrp;
- struct virtio_rpmsg_channel *vch;
- struct rpmsg_device *rpdev_ns;
+ struct virtio_rpmsg_channel *vch = NULL;
+ struct rpmsg_device *rpdev_ns, *rpdev_ctrl;
void *bufs_va;
int err = 0, i;
size_t total_buf_space;
@@ -894,12 +937,18 @@ static int rpmsg_probe(struct virtio_device *vdev)
vdev->priv = vrp;
+ rpdev_ctrl = rpmsg_virtio_add_ctrl_dev(vdev);
+ if (IS_ERR(rpdev_ctrl)) {
+ err = PTR_ERR(rpdev_ctrl);
+ goto free_coherent;
+ }
+
/* if supported by the remote processor, enable the name service */
if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
vch = kzalloc(sizeof(*vch), GFP_KERNEL);
if (!vch) {
err = -ENOMEM;
- goto free_coherent;
+ goto free_ctrldev;
}
/* Link the channel to our vrp */
@@ -915,7 +964,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
err = rpmsg_ns_register_device(rpdev_ns);
if (err)
- goto free_coherent;
+ goto free_vch;
}
/*
@@ -939,8 +988,11 @@ static int rpmsg_probe(struct virtio_device *vdev)
return 0;
-free_coherent:
+free_vch:
kfree(vch);
+free_ctrldev:
+ rpmsg_virtio_del_ctrl_dev(rpdev_ctrl);
+free_coherent:
dma_free_coherent(vdev->dev.parent, total_buf_space,
bufs_va, vrp->bufs_dma);
vqs_del:
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 773386b4aeb7..d8c13fded164 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1339,6 +1339,7 @@ config RTC_DRV_DIGICOLOR
config RTC_DRV_IMXDI
tristate "Freescale IMX DryIce Real Time Clock"
depends on ARCH_MXC
+ depends on OF
help
Support for Freescale IMX DryIce RTC
@@ -1906,7 +1907,7 @@ config RTC_DRV_HID_SENSOR_TIME
config RTC_DRV_GOLDFISH
tristate "Goldfish Real Time Clock"
- depends on OF && HAS_IOMEM
+ depends on HAS_IOMEM
help
Say yes to enable RTC driver for the Goldfish based virtual platform.
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index dcb34c73319e..9a2bd4947007 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -545,7 +545,7 @@ EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable);
int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
{
- int rc = 0, err;
+ int err;
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
@@ -561,17 +561,21 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
if (rtc->uie_rtctimer.enabled == enabled)
goto out;
- if (rtc->uie_unsupported) {
- err = -EINVAL;
- goto out;
+ if (rtc->uie_unsupported || !test_bit(RTC_FEATURE_ALARM, rtc->features)) {
+ mutex_unlock(&rtc->ops_lock);
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+ return rtc_dev_update_irq_enable_emul(rtc, enabled);
+#else
+ return -EINVAL;
+#endif
}
if (enabled) {
struct rtc_time tm;
ktime_t now, onesec;
- rc = __rtc_read_time(rtc, &tm);
- if (rc)
+ err = __rtc_read_time(rtc, &tm);
+ if (err)
goto out;
onesec = ktime_set(1, 0);
now = rtc_tm_to_ktime(tm);
@@ -585,24 +589,6 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
out:
mutex_unlock(&rtc->ops_lock);
- /*
- * __rtc_read_time() failed, this probably means that the RTC time has
- * never been set or less probably there is a transient error on the
- * bus. In any case, avoid enabling emulation has this will fail when
- * reading the time too.
- */
- if (rc)
- return rc;
-
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
- /*
- * Enable emulation if the driver returned -EINVAL to signal that it has
- * been configured without interrupts or they are not available at the
- * moment.
- */
- if (err == -EINVAL)
- err = rtc_dev_update_irq_enable_emul(rtc, enabled);
-#endif
return err;
}
EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
index b20d8f26dcdb..a9b355510cd4 100644
--- a/drivers/rtc/rtc-ab-eoz9.c
+++ b/drivers/rtc/rtc-ab-eoz9.c
@@ -11,6 +11,7 @@
#include <linux/bcd.h>
#include <linux/of.h>
#include <linux/regmap.h>
+#include <linux/bitfield.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -57,6 +58,24 @@
#define ABEOZ9_SEC_LEN 7
+#define ABEOZ9_REG_ALARM_SEC 0x10
+#define ABEOZ9_BIT_ALARM_SEC GENMASK(6, 0)
+#define ABEOZ9_REG_ALARM_MIN 0x11
+#define ABEOZ9_BIT_ALARM_MIN GENMASK(6, 0)
+#define ABEOZ9_REG_ALARM_HOURS 0x12
+#define ABEOZ9_BIT_ALARM_HOURS_PM BIT(5)
+#define ABEOZ9_BIT_ALARM_HOURS GENMASK(4, 0)
+#define ABEOZ9_REG_ALARM_DAYS 0x13
+#define ABEOZ9_BIT_ALARM_DAYS GENMASK(5, 0)
+#define ABEOZ9_REG_ALARM_WEEKDAYS 0x14
+#define ABEOZ9_BIT_ALARM_WEEKDAYS GENMASK(2, 0)
+#define ABEOZ9_REG_ALARM_MONTHS 0x15
+#define ABEOZ9_BIT_ALARM_MONTHS GENMASK(4, 0)
+#define ABEOZ9_REG_ALARM_YEARS 0x16
+
+#define ABEOZ9_ALARM_LEN 7
+#define ABEOZ9_BIT_ALARM_AE BIT(7)
+
#define ABEOZ9_REG_REG_TEMP 0x20
#define ABEOZ953_TEMP_MAX 120
#define ABEOZ953_TEMP_MIN -60
@@ -186,6 +205,98 @@ static int abeoz9_rtc_set_time(struct device *dev, struct rtc_time *tm)
return abeoz9_reset_validity(regmap);
}
+static int abeoz9_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct abeoz9_rtc_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ u8 regs[ABEOZ9_ALARM_LEN];
+ u8 val[2];
+ int ret;
+
+ ret = abeoz9_check_validity(dev);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(regmap, ABEOZ9_REG_CTRL_INT, val, sizeof(val));
+ if (ret)
+ return ret;
+
+ alarm->enabled = val[0] & ABEOZ9_REG_CTRL_INT_AIE;
+ alarm->pending = val[1] & ABEOZ9_REG_CTRL_INT_FLAG_AF;
+
+ ret = regmap_bulk_read(regmap, ABEOZ9_REG_ALARM_SEC, regs, sizeof(regs));
+ if (ret)
+ return ret;
+
+ alarm->time.tm_sec = bcd2bin(FIELD_GET(ABEOZ9_BIT_ALARM_SEC, regs[0]));
+ alarm->time.tm_min = bcd2bin(FIELD_GET(ABEOZ9_BIT_ALARM_MIN, regs[1]));
+ alarm->time.tm_hour = bcd2bin(FIELD_GET(ABEOZ9_BIT_ALARM_HOURS, regs[2]));
+ if (FIELD_GET(ABEOZ9_BIT_ALARM_HOURS_PM, regs[2]))
+ alarm->time.tm_hour += 12;
+
+ alarm->time.tm_mday = bcd2bin(FIELD_GET(ABEOZ9_BIT_ALARM_DAYS, regs[3]));
+
+ return 0;
+}
+
+static int abeoz9_rtc_alarm_irq_enable(struct device *dev, u32 enable)
+{
+ struct abeoz9_rtc_data *data = dev_get_drvdata(dev);
+
+ return regmap_update_bits(data->regmap, ABEOZ9_REG_CTRL_INT,
+ ABEOZ9_REG_CTRL_INT_AIE,
+ FIELD_PREP(ABEOZ9_REG_CTRL_INT_AIE, enable));
+}
+
+static int abeoz9_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct abeoz9_rtc_data *data = dev_get_drvdata(dev);
+ u8 regs[ABEOZ9_ALARM_LEN] = {0};
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, ABEOZ9_REG_CTRL_INT_FLAG,
+ ABEOZ9_REG_CTRL_INT_FLAG_AF, 0);
+ if (ret)
+ return ret;
+
+ regs[0] = ABEOZ9_BIT_ALARM_AE | FIELD_PREP(ABEOZ9_BIT_ALARM_SEC,
+ bin2bcd(alarm->time.tm_sec));
+ regs[1] = ABEOZ9_BIT_ALARM_AE | FIELD_PREP(ABEOZ9_BIT_ALARM_MIN,
+ bin2bcd(alarm->time.tm_min));
+ regs[2] = ABEOZ9_BIT_ALARM_AE | FIELD_PREP(ABEOZ9_BIT_ALARM_HOURS,
+ bin2bcd(alarm->time.tm_hour));
+ regs[3] = ABEOZ9_BIT_ALARM_AE | FIELD_PREP(ABEOZ9_BIT_ALARM_DAYS,
+ bin2bcd(alarm->time.tm_mday));
+
+ ret = regmap_bulk_write(data->regmap, ABEOZ9_REG_ALARM_SEC, regs,
+ sizeof(regs));
+ if (ret)
+ return ret;
+
+ return abeoz9_rtc_alarm_irq_enable(dev, alarm->enabled);
+}
+
+static irqreturn_t abeoz9_rtc_irq(int irq, void *dev)
+{
+ struct abeoz9_rtc_data *data = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, ABEOZ9_REG_CTRL_INT_FLAG, &val);
+ if (ret)
+ return IRQ_NONE;
+
+ if (!FIELD_GET(ABEOZ9_REG_CTRL_INT_FLAG_AF, val))
+ return IRQ_NONE;
+
+ regmap_update_bits(data->regmap, ABEOZ9_REG_CTRL_INT_FLAG,
+ ABEOZ9_REG_CTRL_INT_FLAG_AF, 0);
+
+ rtc_update_irq(data->rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
static int abeoz9_trickle_parse_dt(struct device_node *node)
{
u32 ohms = 0;
@@ -258,12 +369,16 @@ static int abeoz9_rtc_setup(struct device *dev, struct device_node *node)
static const struct rtc_class_ops rtc_ops = {
.read_time = abeoz9_rtc_get_time,
- .set_time = abeoz9_rtc_set_time,
+ .set_time = abeoz9_rtc_set_time,
+ .read_alarm = abeoz9_rtc_read_alarm,
+ .set_alarm = abeoz9_rtc_set_alarm,
+ .alarm_irq_enable = abeoz9_rtc_alarm_irq_enable,
};
static const struct regmap_config abeoz9_rtc_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
+ .max_register = 0x3f,
};
#if IS_REACHABLE(CONFIG_HWMON)
@@ -419,6 +534,24 @@ static int abeoz9_probe(struct i2c_client *client,
data->rtc->ops = &rtc_ops;
data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
data->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ data->rtc->uie_unsupported = 1;
+ clear_bit(RTC_FEATURE_ALARM, data->rtc->features);
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ abeoz9_rtc_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ dev_name(dev), dev);
+ if (ret) {
+ dev_err(dev, "failed to request alarm irq\n");
+ return ret;
+ }
+ }
+
+ if (client->irq > 0 || device_property_read_bool(dev, "wakeup-source")) {
+ ret = device_init_wakeup(dev, true);
+ set_bit(RTC_FEATURE_ALARM, data->rtc->features);
+ }
ret = devm_rtc_register_device(data->rtc);
if (ret)
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index cd8e438bc9c4..336cb9aa5e33 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -169,9 +169,6 @@ enum ds_type {
struct ds1307 {
enum ds_type type;
- unsigned long flags;
-#define HAS_NVRAM 0 /* bit 0 == sysfs file active */
-#define HAS_ALARM 1 /* bit 1 == irq claimed */
struct device *dev;
struct regmap *regmap;
const char *name;
@@ -296,7 +293,11 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f);
tmp = regs[DS1307_REG_HOUR] & 0x3f;
t->tm_hour = bcd2bin(tmp);
- t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
+ /* rx8130 is bit position, not BCD */
+ if (ds1307->type == rx_8130)
+ t->tm_wday = fls(regs[DS1307_REG_WDAY] & 0x7f);
+ else
+ t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
t->tm_mday = bcd2bin(regs[DS1307_REG_MDAY] & 0x3f);
tmp = regs[DS1307_REG_MONTH] & 0x1f;
t->tm_mon = bcd2bin(tmp) - 1;
@@ -343,7 +344,11 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
regs[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
regs[DS1307_REG_MIN] = bin2bcd(t->tm_min);
regs[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
- regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
+ /* rx8130 is bit position, not BCD */
+ if (ds1307->type == rx_8130)
+ regs[DS1307_REG_WDAY] = 1 << t->tm_wday;
+ else
+ regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
regs[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
@@ -411,9 +416,6 @@ static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t)
int ret;
u8 regs[9];
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
/* read all ALARM1, ALARM2, and status registers at once */
ret = regmap_bulk_read(ds1307->regmap, DS1339_REG_ALARM1_SECS,
regs, sizeof(regs));
@@ -454,9 +456,6 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
u8 control, status;
int ret;
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, enabled=%d, pending=%d\n",
"alarm set", t->time.tm_sec, t->time.tm_min,
@@ -512,9 +511,6 @@ static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -ENOTTY;
-
return regmap_update_bits(ds1307->regmap, DS1337_REG_CONTROL,
DS1337_BIT_A1IE,
enabled ? DS1337_BIT_A1IE : 0);
@@ -592,9 +588,6 @@ static int rx8130_read_alarm(struct device *dev, struct rtc_wkalrm *t)
u8 ald[3], ctl[3];
int ret;
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
/* Read alarm registers. */
ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_ALARM_MIN, ald,
sizeof(ald));
@@ -634,9 +627,6 @@ static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t)
u8 ald[3], ctl[3];
int ret;
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d "
"enabled=%d pending=%d\n", __func__,
t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
@@ -681,9 +671,6 @@ static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled)
struct ds1307 *ds1307 = dev_get_drvdata(dev);
int ret, reg;
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
ret = regmap_read(ds1307->regmap, RX8130_REG_CONTROL0, &reg);
if (ret < 0)
return ret;
@@ -735,9 +722,6 @@ static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
u8 regs[10];
int ret;
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
/* Read control and alarm 0 registers. */
ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs,
sizeof(regs));
@@ -793,9 +777,6 @@ static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
unsigned char regs[10];
int wday, ret;
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
wday = mcp794xx_alm_weekday(dev, &t->time);
if (wday < 0)
return wday;
@@ -842,9 +823,6 @@ static int mcp794xx_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -EINVAL;
-
return regmap_update_bits(ds1307->regmap, MCP794XX_REG_CONTROL,
MCP794XX_BIT_ALM0_EN,
enabled ? MCP794XX_BIT_ALM0_EN : 0);
@@ -1641,7 +1619,7 @@ static int ds3231_clks_register(struct ds1307 *ds1307)
* Interrupt signal due to alarm conditions and square-wave
* output share same pin, so don't initialize both.
*/
- if (i == DS3231_CLK_SQW && test_bit(HAS_ALARM, &ds1307->flags))
+ if (i == DS3231_CLK_SQW && test_bit(RTC_FEATURE_ALARM, ds1307->rtc->features))
continue;
init.name = ds3231_clks_names[i];
@@ -1964,15 +1942,15 @@ static int ds1307_probe(struct i2c_client *client,
bin2bcd(tmp));
}
- if (want_irq || ds1307_can_wakeup_device) {
- device_set_wakeup_capable(ds1307->dev, true);
- set_bit(HAS_ALARM, &ds1307->flags);
- }
-
ds1307->rtc = devm_rtc_allocate_device(ds1307->dev);
if (IS_ERR(ds1307->rtc))
return PTR_ERR(ds1307->rtc);
+ if (want_irq || ds1307_can_wakeup_device)
+ device_set_wakeup_capable(ds1307->dev, true);
+ else
+ clear_bit(RTC_FEATURE_ALARM, ds1307->rtc->features);
+
if (ds1307_can_wakeup_device && !want_irq) {
dev_info(ds1307->dev,
"'wakeup-source' is set, request for an IRQ is disabled!\n");
@@ -1988,7 +1966,7 @@ static int ds1307_probe(struct i2c_client *client,
if (err) {
client->irq = 0;
device_set_wakeup_capable(ds1307->dev, false);
- clear_bit(HAS_ALARM, &ds1307->flags);
+ clear_bit(RTC_FEATURE_ALARM, ds1307->rtc->features);
dev_err(ds1307->dev, "unable to request IRQ!\n");
} else {
dev_dbg(ds1307->dev, "got IRQ %d\n", client->irq);
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index bda884333082..1109cad83838 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -104,12 +104,6 @@ rtc_write(uint8_t val, uint32_t reg)
writeb(val, ds1511_base + (reg * reg_spacing));
}
-static inline void
-rtc_write_alarm(uint8_t val, enum ds1511reg reg)
-{
- rtc_write((val | 0x80), reg);
-}
-
static noinline uint8_t
rtc_read(enum ds1511reg reg)
{
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index 57cc09d0a806..c0df49fb978c 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -310,6 +310,7 @@ static const struct of_device_id ftm_rtc_match[] = {
{ .compatible = "fsl,lx2160a-ftm-alarm", },
{ },
};
+MODULE_DEVICE_TABLE(of, ftm_rtc_match);
static const struct acpi_device_id ftm_imx_acpi_ids[] = {
{"NXP0014",},
diff --git a/drivers/rtc/rtc-imx-sc.c b/drivers/rtc/rtc-imx-sc.c
index cc9fbab49999..814d516645e2 100644
--- a/drivers/rtc/rtc-imx-sc.c
+++ b/drivers/rtc/rtc-imx-sc.c
@@ -80,16 +80,6 @@ static int imx_sc_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
return imx_scu_irq_group_enable(SC_IRQ_GROUP_RTC, SC_IRQ_RTC, enable);
}
-static int imx_sc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- /*
- * SCU firmware does NOT provide read alarm API, but .read_alarm
- * callback is required by RTC framework to support alarm function,
- * so just return here.
- */
- return 0;
-}
-
static int imx_sc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct imx_sc_msg_timer_rtc_set_alarm msg;
@@ -127,7 +117,6 @@ static int imx_sc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static const struct rtc_class_ops imx_sc_rtc_ops = {
.read_time = imx_sc_rtc_read_time,
.set_time = imx_sc_rtc_set_time,
- .read_alarm = imx_sc_rtc_read_alarm,
.set_alarm = imx_sc_rtc_set_alarm,
.alarm_irq_enable = imx_sc_rtc_alarm_irq_enable,
};
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index c2692da74e09..c1806f4d68e7 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -840,19 +840,17 @@ static int __exit dryice_rtc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id dryice_dt_ids[] = {
{ .compatible = "fsl,imx25-rtc" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, dryice_dt_ids);
-#endif
static struct platform_driver dryice_rtc_driver = {
.driver = {
.name = "imxdi_rtc",
- .of_match_table = of_match_ptr(dryice_dt_ids),
+ .of_match_table = dryice_dt_ids,
},
.remove = __exit_p(dryice_rtc_remove),
};
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 1d2e99a70fce..f0f6b9b6daec 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -421,7 +421,7 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
/* Try to get irq number. We also can work in
* the mode without IRQ.
*/
- m48t59->irq = platform_get_irq(pdev, 0);
+ m48t59->irq = platform_get_irq_optional(pdev, 0);
if (m48t59->irq <= 0)
m48t59->irq = NO_IRQ;
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index db57dda7ab97..0f08f22df869 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -415,7 +415,7 @@ static int mxc_rtc_probe(struct platform_device *pdev)
static struct platform_driver mxc_rtc_driver = {
.driver = {
.name = "mxc_rtc",
- .of_match_table = of_match_ptr(imx_rtc_dt_ids),
+ .of_match_table = imx_rtc_dt_ids,
},
.probe = mxc_rtc_probe,
};
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index dc7db2477f88..d46e0f0cc502 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -786,8 +786,7 @@ static int omap_rtc_probe(struct platform_device *pdev)
/* enable RTC functional clock */
if (rtc->type->has_32kclk_en) {
reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
- rtc_writel(rtc, OMAP_RTC_OSC_REG,
- reg | OMAP_RTC_OSC_32KCLK_EN);
+ rtc_write(rtc, OMAP_RTC_OSC_REG, reg | OMAP_RTC_OSC_32KCLK_EN);
}
/* clear old status */
@@ -845,7 +844,7 @@ static int omap_rtc_probe(struct platform_device *pdev)
reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE;
reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC;
- rtc_writel(rtc, OMAP_RTC_OSC_REG, reg);
+ rtc_write(rtc, OMAP_RTC_OSC_REG, reg);
}
rtc->type->lock(rtc);
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index aef6c1ee8bb0..82becae14229 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -478,6 +478,7 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
{
struct clk *clk;
struct clk_init_data init;
+ struct device_node *node = pcf85063->rtc->dev.parent->of_node;
init.name = "pcf85063-clkout";
init.ops = &pcf85063_clkout_ops;
@@ -487,15 +488,13 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
pcf85063->clkout_hw.init = &init;
/* optional override of the clockname */
- of_property_read_string(pcf85063->rtc->dev.of_node,
- "clock-output-names", &init.name);
+ of_property_read_string(node, "clock-output-names", &init.name);
/* register the clock */
clk = devm_clk_register(&pcf85063->rtc->dev, &pcf85063->clkout_hw);
if (!IS_ERR(clk))
- of_clk_add_provider(pcf85063->rtc->dev.of_node,
- of_clk_src_simple_get, clk);
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
return clk;
}
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 5e1e7b2a8c9a..740e2136ca98 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -8,12 +8,15 @@
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/of.h>
-
-#define DRIVER_NAME "rtc-pcf8523"
+#include <linux/pm_wakeirq.h>
#define REG_CONTROL1 0x00
#define REG_CONTROL1_CAP_SEL BIT(7)
#define REG_CONTROL1_STOP BIT(5)
+#define REG_CONTROL1_AIE BIT(1)
+
+#define REG_CONTROL2 0x01
+#define REG_CONTROL2_AF BIT(3)
#define REG_CONTROL3 0x02
#define REG_CONTROL3_PM_BLD BIT(7) /* battery low detection disabled */
@@ -32,9 +35,22 @@
#define REG_MONTHS 0x08
#define REG_YEARS 0x09
+#define REG_MINUTE_ALARM 0x0a
+#define REG_HOUR_ALARM 0x0b
+#define REG_DAY_ALARM 0x0c
+#define REG_WEEKDAY_ALARM 0x0d
+#define ALARM_DIS BIT(7)
+
#define REG_OFFSET 0x0e
#define REG_OFFSET_MODE BIT(7)
+#define REG_TMR_CLKOUT_CTRL 0x0f
+
+struct pcf8523 {
+ struct rtc_device *rtc;
+ struct i2c_client *client;
+};
+
static int pcf8523_read(struct i2c_client *client, u8 reg, u8 *valuep)
{
struct i2c_msg msgs[2];
@@ -140,6 +156,27 @@ static int pcf8523_set_pm(struct i2c_client *client, u8 pm)
return 0;
}
+static irqreturn_t pcf8523_irq(int irq, void *dev_id)
+{
+ struct pcf8523 *pcf8523 = i2c_get_clientdata(dev_id);
+ u8 value;
+ int err;
+
+ err = pcf8523_read(pcf8523->client, REG_CONTROL2, &value);
+ if (err < 0)
+ return IRQ_HANDLED;
+
+ if (value & REG_CONTROL2_AF) {
+ value &= ~REG_CONTROL2_AF;
+ pcf8523_write(pcf8523->client, REG_CONTROL2, value);
+ rtc_update_irq(pcf8523->rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
static int pcf8523_stop_rtc(struct i2c_client *client)
{
u8 value;
@@ -259,11 +296,118 @@ static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
return pcf8523_start_rtc(client);
}
+static int pcf8523_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 start = REG_MINUTE_ALARM, regs[4];
+ struct i2c_msg msgs[2];
+ u8 value;
+ int err;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 1;
+ msgs[0].buf = &start;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = sizeof(regs);
+ msgs[1].buf = regs;
+
+ err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (err < 0)
+ return err;
+
+ tm->time.tm_sec = 0;
+ tm->time.tm_min = bcd2bin(regs[0] & 0x7F);
+ tm->time.tm_hour = bcd2bin(regs[1] & 0x3F);
+ tm->time.tm_mday = bcd2bin(regs[2] & 0x3F);
+ tm->time.tm_wday = bcd2bin(regs[3] & 0x7);
+
+ err = pcf8523_read(client, REG_CONTROL1, &value);
+ if (err < 0)
+ return err;
+ tm->enabled = !!(value & REG_CONTROL1_AIE);
+
+ err = pcf8523_read(client, REG_CONTROL2, &value);
+ if (err < 0)
+ return err;
+ tm->pending = !!(value & REG_CONTROL2_AF);
+
+ return 0;
+}
+
+static int pcf8523_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 value;
+ int err;
+
+ err = pcf8523_read(client, REG_CONTROL1, &value);
+ if (err < 0)
+ return err;
+
+ value &= REG_CONTROL1_AIE;
+
+ if (enabled)
+ value |= REG_CONTROL1_AIE;
+
+ err = pcf8523_write(client, REG_CONTROL1, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_msg msg;
+ u8 regs[5];
+ int err;
+
+ err = pcf8523_irq_enable(dev, 0);
+ if (err)
+ return err;
+
+ err = pcf8523_write(client, REG_CONTROL2, 0);
+ if (err < 0)
+ return err;
+
+ /* The alarm has no seconds, round up to nearest minute */
+ if (tm->time.tm_sec) {
+ time64_t alarm_time = rtc_tm_to_time64(&tm->time);
+
+ alarm_time += 60 - tm->time.tm_sec;
+ rtc_time64_to_tm(alarm_time, &tm->time);
+ }
+
+ regs[0] = REG_MINUTE_ALARM;
+ regs[1] = bin2bcd(tm->time.tm_min);
+ regs[2] = bin2bcd(tm->time.tm_hour);
+ regs[3] = bin2bcd(tm->time.tm_mday);
+ regs[4] = ALARM_DIS;
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = sizeof(regs);
+ msg.buf = regs;
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err < 0)
+ return err;
+
+ if (tm->enabled)
+ return pcf8523_irq_enable(dev, tm->enabled);
+
+ return 0;
+}
+
#ifdef CONFIG_RTC_INTF_DEV
static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
unsigned long arg)
{
struct i2c_client *client = to_i2c_client(dev);
+ unsigned int flags = 0;
+ u8 value;
int ret;
switch (cmd) {
@@ -272,9 +416,16 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
if (ret < 0)
return ret;
if (ret)
- ret = RTC_VL_BACKUP_LOW;
+ flags |= RTC_VL_BACKUP_LOW;
- return put_user(ret, (unsigned int __user *)arg);
+ ret = pcf8523_read(client, REG_SECONDS, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value & REG_SECONDS_OS)
+ flags |= RTC_VL_DATA_INVALID;
+
+ return put_user(flags, (unsigned int __user *)arg);
default:
return -ENOIOCTLCMD;
@@ -322,6 +473,9 @@ static int pcf8523_rtc_set_offset(struct device *dev, long offset)
static const struct rtc_class_ops pcf8523_rtc_ops = {
.read_time = pcf8523_rtc_read_time,
.set_time = pcf8523_rtc_set_time,
+ .read_alarm = pcf8523_rtc_read_alarm,
+ .set_alarm = pcf8523_rtc_set_alarm,
+ .alarm_irq_enable = pcf8523_irq_enable,
.ioctl = pcf8523_rtc_ioctl,
.read_offset = pcf8523_rtc_read_offset,
.set_offset = pcf8523_rtc_set_offset,
@@ -330,12 +484,21 @@ static const struct rtc_class_ops pcf8523_rtc_ops = {
static int pcf8523_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct pcf8523 *pcf8523;
struct rtc_device *rtc;
+ bool wakeup_source = false;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
+ pcf8523 = devm_kzalloc(&client->dev, sizeof(struct pcf8523), GFP_KERNEL);
+ if (!pcf8523)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, pcf8523);
+ pcf8523->client = client;
+
err = pcf8523_load_capacitance(client);
if (err < 0)
dev_warn(&client->dev, "failed to set xtal load capacitance: %d",
@@ -349,9 +512,32 @@ static int pcf8523_probe(struct i2c_client *client,
if (IS_ERR(rtc))
return PTR_ERR(rtc);
+ pcf8523->rtc = rtc;
rtc->ops = &pcf8523_rtc_ops;
rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->uie_unsupported = 1;
+
+ if (client->irq > 0) {
+ err = pcf8523_write(client, REG_TMR_CLKOUT_CTRL, 0x38);
+ if (err < 0)
+ return err;
+
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, pcf8523_irq,
+ IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ dev_name(&rtc->dev), client);
+ if (err)
+ return err;
+
+ dev_pm_set_wake_irq(&client->dev, client->irq);
+ }
+
+#ifdef CONFIG_OF
+ wakeup_source = of_property_read_bool(client->dev.of_node, "wakeup-source");
+#endif
+ if (client->irq > 0 || wakeup_source)
+ device_init_wakeup(&client->dev, true);
return devm_rtc_register_device(rtc);
}
@@ -373,7 +559,7 @@ MODULE_DEVICE_TABLE(of, pcf8523_of_match);
static struct i2c_driver pcf8523_driver = {
.driver = {
- .name = DRIVER_NAME,
+ .name = "rtc-pcf8523",
.of_match_table = of_match_ptr(pcf8523_of_match),
},
.probe = pcf8523_probe,
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index eb206597a8fa..29a1c65661e9 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -445,6 +445,16 @@ static const struct pm8xxx_rtc_regs pm8941_regs = {
.alarm_en = BIT(7),
};
+static const struct pm8xxx_rtc_regs pmk8350_regs = {
+ .ctrl = 0x6146,
+ .write = 0x6140,
+ .read = 0x6148,
+ .alarm_rw = 0x6240,
+ .alarm_ctrl = 0x6246,
+ .alarm_ctrl2 = 0x6248,
+ .alarm_en = BIT(7),
+};
+
/*
* Hardcoded RTC bases until IORESOURCE_REG mapping is figured out
*/
@@ -453,6 +463,7 @@ static const struct of_device_id pm8xxx_id_table[] = {
{ .compatible = "qcom,pm8018-rtc", .data = &pm8921_regs },
{ .compatible = "qcom,pm8058-rtc", .data = &pm8058_regs },
{ .compatible = "qcom,pm8941-rtc", .data = &pm8941_regs },
+ { .compatible = "qcom,pmk8350-rtc", .data = &pmk8350_regs },
{ },
};
MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index 0c48d980d06a..12c807306893 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -320,7 +320,7 @@ static int rv3028_get_time(struct device *dev, struct rtc_time *tm)
tm->tm_sec = bcd2bin(date[RV3028_SEC] & 0x7f);
tm->tm_min = bcd2bin(date[RV3028_MIN] & 0x7f);
tm->tm_hour = bcd2bin(date[RV3028_HOUR] & 0x3f);
- tm->tm_wday = ilog2(date[RV3028_WDAY] & 0x7f);
+ tm->tm_wday = date[RV3028_WDAY] & 0x7f;
tm->tm_mday = bcd2bin(date[RV3028_DAY] & 0x3f);
tm->tm_mon = bcd2bin(date[RV3028_MONTH] & 0x1f) - 1;
tm->tm_year = bcd2bin(date[RV3028_YEAR]) + 100;
@@ -337,7 +337,7 @@ static int rv3028_set_time(struct device *dev, struct rtc_time *tm)
date[RV3028_SEC] = bin2bcd(tm->tm_sec);
date[RV3028_MIN] = bin2bcd(tm->tm_min);
date[RV3028_HOUR] = bin2bcd(tm->tm_hour);
- date[RV3028_WDAY] = 1 << (tm->tm_wday);
+ date[RV3028_WDAY] = tm->tm_wday;
date[RV3028_DAY] = bin2bcd(tm->tm_mday);
date[RV3028_MONTH] = bin2bcd(tm->tm_mon + 1);
date[RV3028_YEAR] = bin2bcd(tm->tm_year - 100);
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 79161d4c6ce4..f4d425002f7f 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -447,6 +447,12 @@ static int rx6110_i2c_probe(struct i2c_client *client,
return rx6110_probe(rx6110, &client->dev);
}
+static const struct acpi_device_id rx6110_i2c_acpi_match[] = {
+ { "SECC6110" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, rx6110_i2c_acpi_match);
+
static const struct i2c_device_id rx6110_i2c_id[] = {
{ "rx6110", 0 },
{ }
@@ -456,6 +462,7 @@ MODULE_DEVICE_TABLE(i2c, rx6110_i2c_id);
static struct i2c_driver rx6110_i2c_driver = {
.driver = {
.name = RX6110_DRIVER_NAME,
+ .acpi_match_table = rx6110_i2c_acpi_match,
},
.probe = rx6110_i2c_probe,
.id_table = rx6110_i2c_id,
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 80b66f16db89..038269a6b08c 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -713,16 +713,10 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
static int s5m_rtc_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *s5m87xx = dev_get_drvdata(pdev->dev.parent);
- struct sec_platform_data *pdata = s5m87xx->pdata;
struct s5m_rtc_info *info;
const struct regmap_config *regmap_cfg;
int ret, alarm_irq;
- if (!pdata) {
- dev_err(pdev->dev.parent, "Platform data not supplied\n");
- return -ENODEV;
- }
-
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index 833daeb7b60e..ee721e53c155 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -153,12 +153,12 @@ static void rtc_wait_not_busy(struct spear_rtc_config *config)
static irqreturn_t spear_rtc_irq(int irq, void *dev_id)
{
struct spear_rtc_config *config = dev_id;
- unsigned long flags, events = 0;
+ unsigned long events = 0;
unsigned int irq_data;
- spin_lock_irqsave(&config->lock, flags);
+ spin_lock(&config->lock);
irq_data = readl(config->ioaddr + STATUS_REG);
- spin_unlock_irqrestore(&config->lock, flags);
+ spin_unlock(&config->lock);
if ((irq_data & RTC_INT_MASK)) {
spear_rtc_clear_interrupt(config);
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 288abb1abdb8..bc89c62ccb9b 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -18,6 +18,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/math64.h>
+#include <linux/property.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/mfd/tps65910.h>
diff --git a/drivers/rtc/sysfs.c b/drivers/rtc/sysfs.c
index 8a957d31a1a4..74026f67fdfb 100644
--- a/drivers/rtc/sysfs.c
+++ b/drivers/rtc/sysfs.c
@@ -273,7 +273,7 @@ static bool rtc_does_wakealarm(struct rtc_device *rtc)
if (!device_can_wakeup(rtc->dev.parent))
return false;
- return rtc->ops->set_alarm != NULL;
+ return !!test_bit(RTC_FEATURE_ALARM, rtc->features);
}
static umode_t rtc_attr_is_visible(struct kobject *kobj,
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index dc36531d589e..222593bc2afe 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -1649,8 +1649,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
}
/* Get the read only section offset */
- ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
- PCI_VPD_LRDT_RO_DATA);
+ ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
if (unlikely(ro_start < 0)) {
dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
rc = -ENODEV;
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index 4274bd1b0f99..96f74a1dc603 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -46,9 +46,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
{
#ifdef CONFIG_FSL_PAMU
struct device *dev = pcfg->dev;
- int window_count = 1;
- struct iommu_domain_geometry geom_attr;
- struct pamu_stash_attribute stash_attr;
int ret;
pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
@@ -56,38 +53,9 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
goto no_iommu;
}
- geom_attr.aperture_start = 0;
- geom_attr.aperture_end =
- ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
- geom_attr.force_aperture = true;
- ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
- &geom_attr);
+ ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu);
if (ret < 0) {
- dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
- ret);
- goto out_domain_free;
- }
- ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
- &window_count);
- if (ret < 0) {
- dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
- ret);
- goto out_domain_free;
- }
- stash_attr.cpu = cpu;
- stash_attr.cache = PAMU_ATTR_CACHE_L1;
- ret = iommu_domain_set_attr(pcfg->iommu_domain,
- DOMAIN_ATTR_FSL_PAMU_STASH,
- &stash_attr);
- if (ret < 0) {
- dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
- __func__, ret);
- goto out_domain_free;
- }
- ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
- IOMMU_READ | IOMMU_WRITE);
- if (ret < 0) {
- dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
+ dev_err(dev, "%s(): fsl_pamu_configure_l1_stash() = %d",
__func__, ret);
goto out_domain_free;
}
@@ -97,14 +65,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
ret);
goto out_domain_free;
}
- ret = iommu_domain_set_attr(pcfg->iommu_domain,
- DOMAIN_ATTR_FSL_PAMU_ENABLE,
- &window_count);
- if (ret < 0) {
- dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
- ret);
- goto out_detach_device;
- }
no_iommu:
#endif
@@ -113,8 +73,6 @@ no_iommu:
return;
#ifdef CONFIG_FSL_PAMU
-out_detach_device:
- iommu_detach_device(pcfg->iommu_domain, NULL);
out_domain_free:
iommu_domain_free(pcfg->iommu_domain);
pcfg->iommu_domain = NULL;
@@ -169,15 +127,8 @@ static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
unsigned int cpu)
{
#ifdef CONFIG_FSL_PAMU /* TODO */
- struct pamu_stash_attribute stash_attr;
- int ret;
-
if (pcfg->iommu_domain) {
- stash_attr.cpu = cpu;
- stash_attr.cache = PAMU_ATTR_CACHE_L1;
- ret = iommu_domain_set_attr(pcfg->iommu_domain,
- DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
- if (ret < 0) {
+ if (fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu) < 0) {
dev_err(pcfg->dev,
"Failed to update pamu stash setting\n");
return;
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 6bd22359d411..8e3b78bb2ac2 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -1904,7 +1904,7 @@ static int tegra_io_pad_pinconf_get(struct pinctrl_dev *pctl_dev,
arg = ret;
break;
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
ret = tegra_io_pad_is_powered(pmc, pad->id);
if (ret < 0)
return ret;
@@ -1941,7 +1941,7 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev,
arg = pinconf_to_config_argument(configs[i]);
switch (param) {
- case PIN_CONFIG_LOW_POWER_MODE:
+ case PIN_CONFIG_MODE_LOW_POWER:
if (arg)
err = tegra_io_pad_power_disable(pad->id);
else
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 64e8f0828e85..39dc02e366f4 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1553,13 +1553,12 @@ static int spi_imx_slave_abort(struct spi_master *master)
static int spi_imx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *of_id =
- of_match_device(spi_imx_dt_ids, &pdev->dev);
struct spi_master *master;
struct spi_imx_data *spi_imx;
struct resource *res;
int ret, irq, spi_drctl;
- const struct spi_imx_devtype_data *devtype_data = of_id->data;
+ const struct spi_imx_devtype_data *devtype_data =
+ of_device_get_match_data(&pdev->dev);
bool slave_mode;
u32 val;
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
index 7a09061cda57..df5ca3596470 100644
--- a/drivers/staging/media/tegra-video/vi.c
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -1131,8 +1131,8 @@ static void tegra_channel_host1x_syncpts_free(struct tegra_vi_channel *chan)
int i;
for (i = 0; i < chan->numgangports; i++) {
- host1x_syncpt_free(chan->mw_ack_sp[i]);
- host1x_syncpt_free(chan->frame_start_sp[i]);
+ host1x_syncpt_put(chan->mw_ack_sp[i]);
+ host1x_syncpt_put(chan->frame_start_sp[i]);
}
}
@@ -1177,7 +1177,7 @@ static int tegra_channel_host1x_syncpt_init(struct tegra_vi_channel *chan)
mw_sp = host1x_syncpt_request(&vi->client, flags);
if (!mw_sp) {
dev_err(vi->dev, "failed to request memory ack syncpoint\n");
- host1x_syncpt_free(fs_sp);
+ host1x_syncpt_put(fs_sp);
ret = -ENOMEM;
goto free_syncpts;
}
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
index dffe3ba8c7c4..e61b91d14ad1 100644
--- a/drivers/thermal/amlogic_thermal.c
+++ b/drivers/thermal/amlogic_thermal.c
@@ -254,10 +254,8 @@ static int amlogic_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pdata);
base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base)) {
- dev_err(dev, "failed to get io address\n");
+ if (IS_ERR(base))
return PTR_ERR(base);
- }
pdata->regmap = devm_regmap_init_mmio(dev, base,
pdata->data->regmap_config);
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index 3199977f1e73..c8e4344d5a3d 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -184,7 +184,6 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
data->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(data->regs)) {
err = PTR_ERR(data->regs);
- dev_err(&pdev->dev, "Could not get registers: %d\n", err);
return err;
}
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 10af3341e5ea..eeb4e4b76c0b 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -13,10 +13,10 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpu_cooling.h>
+#include <linux/device.h>
#include <linux/energy_model.h>
#include <linux/err.h>
#include <linux/export.h>
-#include <linux/idr.h>
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
@@ -50,8 +50,6 @@ struct time_in_idle {
/**
* struct cpufreq_cooling_device - data for cooling device with cpufreq
- * @id: unique integer value corresponding to each cpufreq_cooling_device
- * registered.
* @last_load: load measured by the latest call to cpufreq_get_requested_power()
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
@@ -61,7 +59,6 @@ struct time_in_idle {
* @cdev: thermal_cooling_device pointer to keep track of the
* registered cooling device.
* @policy: cpufreq policy.
- * @node: list_head to link all cpufreq_cooling_device together.
* @idle_time: idle time stats
* @qos_req: PM QoS contraint to apply
*
@@ -69,23 +66,17 @@ struct time_in_idle {
* cpufreq_cooling_device.
*/
struct cpufreq_cooling_device {
- int id;
u32 last_load;
unsigned int cpufreq_state;
unsigned int max_level;
struct em_perf_domain *em;
struct cpufreq_policy *policy;
- struct list_head node;
#ifndef CONFIG_SMP
struct time_in_idle *idle_time;
#endif
struct freq_qos_request qos_req;
};
-static DEFINE_IDA(cpufreq_ida);
-static DEFINE_MUTEX(cooling_list_lock);
-static LIST_HEAD(cpufreq_cdev_list);
-
#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
/**
* get_level: Find the level for a particular frequency
@@ -125,7 +116,7 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
{
int i;
- for (i = cpufreq_cdev->max_level; i >= 0; i--) {
+ for (i = cpufreq_cdev->max_level; i > 0; i--) {
if (power >= cpufreq_cdev->em->table[i].power)
break;
}
@@ -528,11 +519,11 @@ __cpufreq_cooling_register(struct device_node *np,
{
struct thermal_cooling_device *cdev;
struct cpufreq_cooling_device *cpufreq_cdev;
- char dev_name[THERMAL_NAME_LENGTH];
unsigned int i;
struct device *dev;
int ret;
struct thermal_cooling_device_ops *cooling_ops;
+ char *name;
dev = get_cpu_device(policy->cpu);
if (unlikely(!dev)) {
@@ -567,16 +558,6 @@ __cpufreq_cooling_register(struct device_node *np,
/* max_level is an index, not a counter */
cpufreq_cdev->max_level = i - 1;
- ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL);
- if (ret < 0) {
- cdev = ERR_PTR(ret);
- goto free_idle_time;
- }
- cpufreq_cdev->id = ret;
-
- snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
- cpufreq_cdev->id);
-
cooling_ops = &cpufreq_cooling_ops;
#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
@@ -591,7 +572,7 @@ __cpufreq_cooling_register(struct device_node *np,
pr_err("%s: unsorted frequency tables are not supported\n",
__func__);
cdev = ERR_PTR(-EINVAL);
- goto remove_ida;
+ goto free_idle_time;
}
ret = freq_qos_add_request(&policy->constraints,
@@ -601,24 +582,25 @@ __cpufreq_cooling_register(struct device_node *np,
pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
ret);
cdev = ERR_PTR(ret);
- goto remove_ida;
+ goto free_idle_time;
}
- cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
+ cdev = ERR_PTR(-ENOMEM);
+ name = kasprintf(GFP_KERNEL, "cpufreq-%s", dev_name(dev));
+ if (!name)
+ goto remove_qos_req;
+
+ cdev = thermal_of_cooling_device_register(np, name, cpufreq_cdev,
cooling_ops);
+ kfree(name);
+
if (IS_ERR(cdev))
goto remove_qos_req;
- mutex_lock(&cooling_list_lock);
- list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
- mutex_unlock(&cooling_list_lock);
-
return cdev;
remove_qos_req:
freq_qos_remove_request(&cpufreq_cdev->qos_req);
-remove_ida:
- ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
free_idle_time:
free_idle_time(cpufreq_cdev);
free_cdev:
@@ -706,13 +688,8 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
cpufreq_cdev = cdev->devdata;
- mutex_lock(&cooling_list_lock);
- list_del(&cpufreq_cdev->node);
- mutex_unlock(&cooling_list_lock);
-
thermal_cooling_device_unregister(cdev);
freq_qos_remove_request(&cpufreq_cdev->qos_req);
- ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
free_idle_time(cpufreq_cdev);
kfree(cpufreq_cdev);
}
diff --git a/drivers/thermal/cpuidle_cooling.c b/drivers/thermal/cpuidle_cooling.c
index 7ecab4b16b29..4f41102e8b16 100644
--- a/drivers/thermal/cpuidle_cooling.c
+++ b/drivers/thermal/cpuidle_cooling.c
@@ -9,9 +9,9 @@
#include <linux/cpu_cooling.h>
#include <linux/cpuidle.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/idle_inject.h>
-#include <linux/idr.h>
#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/thermal.h>
@@ -26,8 +26,6 @@ struct cpuidle_cooling_device {
unsigned long state;
};
-static DEFINE_IDA(cpuidle_ida);
-
/**
* cpuidle_cooling_runtime - Running time computation
* @idle_duration_us: CPU idle time to inject in microseconds
@@ -174,10 +172,11 @@ static int __cpuidle_cooling_register(struct device_node *np,
struct idle_inject_device *ii_dev;
struct cpuidle_cooling_device *idle_cdev;
struct thermal_cooling_device *cdev;
+ struct device *dev;
unsigned int idle_duration_us = TICK_USEC;
unsigned int latency_us = UINT_MAX;
- char dev_name[THERMAL_NAME_LENGTH];
- int id, ret;
+ char *name;
+ int ret;
idle_cdev = kzalloc(sizeof(*idle_cdev), GFP_KERNEL);
if (!idle_cdev) {
@@ -185,16 +184,10 @@ static int __cpuidle_cooling_register(struct device_node *np,
goto out;
}
- id = ida_simple_get(&cpuidle_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- ret = id;
- goto out_kfree;
- }
-
ii_dev = idle_inject_register(drv->cpumask);
if (!ii_dev) {
ret = -EINVAL;
- goto out_id;
+ goto out_kfree;
}
of_property_read_u32(np, "duration-us", &idle_duration_us);
@@ -205,24 +198,32 @@ static int __cpuidle_cooling_register(struct device_node *np,
idle_cdev->ii_dev = ii_dev;
- snprintf(dev_name, sizeof(dev_name), "thermal-idle-%d", id);
+ dev = get_cpu_device(cpumask_first(drv->cpumask));
- cdev = thermal_of_cooling_device_register(np, dev_name, idle_cdev,
+ name = kasprintf(GFP_KERNEL, "idle-%s", dev_name(dev));
+ if (!name) {
+ ret = -ENOMEM;
+ goto out_unregister;
+ }
+
+ cdev = thermal_of_cooling_device_register(np, name, idle_cdev,
&cpuidle_cooling_ops);
if (IS_ERR(cdev)) {
ret = PTR_ERR(cdev);
- goto out_unregister;
+ goto out_kfree_name;
}
pr_debug("%s: Idle injection set with idle duration=%u, latency=%u\n",
- dev_name, idle_duration_us, latency_us);
+ name, idle_duration_us, latency_us);
+
+ kfree(name);
return 0;
+out_kfree_name:
+ kfree(name);
out_unregister:
idle_inject_unregister(ii_dev);
-out_id:
- ida_simple_remove(&cpuidle_ida, id);
out_kfree:
kfree(idle_cdev);
out:
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index fed3121ff2a1..3a788ac4f525 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -14,7 +14,6 @@
#include <linux/devfreq_cooling.h>
#include <linux/energy_model.h>
#include <linux/export.h>
-#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
@@ -25,11 +24,8 @@
#define HZ_PER_KHZ 1000
#define SCALE_ERROR_MITIGATION 100
-static DEFINE_IDA(devfreq_ida);
-
/**
* struct devfreq_cooling_device - Devfreq cooling device
- * @id: unique integer value corresponding to each
* devfreq_cooling_device registered.
* @cdev: Pointer to associated thermal cooling device.
* @devfreq: Pointer to associated devfreq device.
@@ -51,7 +47,6 @@ static DEFINE_IDA(devfreq_ida);
* @em_pd: Energy Model for the associated Devfreq device
*/
struct devfreq_cooling_device {
- int id;
struct thermal_cooling_device *cdev;
struct devfreq *devfreq;
unsigned long cooling_state;
@@ -363,7 +358,7 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
struct thermal_cooling_device *cdev;
struct device *dev = df->dev.parent;
struct devfreq_cooling_device *dfc;
- char dev_name[THERMAL_NAME_LENGTH];
+ char *name;
int err, num_opps;
dfc = kzalloc(sizeof(*dfc), GFP_KERNEL);
@@ -407,30 +402,27 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
if (err < 0)
goto free_table;
- err = ida_simple_get(&devfreq_ida, 0, 0, GFP_KERNEL);
- if (err < 0)
+ err = -ENOMEM;
+ name = kasprintf(GFP_KERNEL, "devfreq-%s", dev_name(dev));
+ if (!name)
goto remove_qos_req;
- dfc->id = err;
-
- snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d", dfc->id);
-
- cdev = thermal_of_cooling_device_register(np, dev_name, dfc,
+ cdev = thermal_of_cooling_device_register(np, name, dfc,
&devfreq_cooling_ops);
+ kfree(name);
+
if (IS_ERR(cdev)) {
err = PTR_ERR(cdev);
dev_err(dev,
"Failed to register devfreq cooling device (%d)\n",
err);
- goto release_ida;
+ goto remove_qos_req;
}
dfc->cdev = cdev;
return cdev;
-release_ida:
- ida_simple_remove(&devfreq_ida, dfc->id);
remove_qos_req:
dev_pm_qos_remove_request(&dfc->req_max_freq);
free_table:
@@ -527,7 +519,6 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
dev = dfc->devfreq->dev.parent;
thermal_cooling_device_unregister(dfc->cdev);
- ida_simple_remove(&devfreq_ida, dfc->id);
dev_pm_qos_remove_request(&dfc->req_max_freq);
em_dev_unregister_perf_domain(dev);
diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
index aaa07180ab48..1e5abf4822be 100644
--- a/drivers/thermal/gov_fair_share.c
+++ b/drivers/thermal/gov_fair_share.c
@@ -82,6 +82,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
int total_instance = 0;
int cur_trip_level = get_trip_level(tz);
+ mutex_lock(&tz->lock);
+
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if (instance->trip != trip)
continue;
@@ -105,11 +107,12 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
instance->target = get_target_state(tz, cdev, percentage,
cur_trip_level);
- mutex_lock(&instance->cdev->lock);
- instance->cdev->updated = false;
- mutex_unlock(&instance->cdev->lock);
- thermal_cdev_update(cdev);
+ mutex_lock(&cdev->lock);
+ __thermal_cdev_update(cdev);
+ mutex_unlock(&cdev->lock);
}
+
+ mutex_unlock(&tz->lock);
return 0;
}
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 92acae53df49..13e375751d22 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -301,9 +301,8 @@ power_actor_set_power(struct thermal_cooling_device *cdev,
instance->target = clamp_val(state, instance->lower, instance->upper);
mutex_lock(&cdev->lock);
- cdev->updated = false;
+ __thermal_cdev_update(cdev);
mutex_unlock(&cdev->lock);
- thermal_cdev_update(cdev);
return 0;
}
@@ -374,9 +373,11 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
*/
extra_power = min(extra_power, capped_extra_power);
if (capped_extra_power > 0)
- for (i = 0; i < num_actors; i++)
- granted_power[i] += (extra_actor_power[i] *
- extra_power) / capped_extra_power;
+ for (i = 0; i < num_actors; i++) {
+ u64 extra_range = (u64)extra_actor_power[i] * extra_power;
+ granted_power[i] += DIV_ROUND_CLOSEST_ULL(extra_range,
+ capped_extra_power);
+ }
}
static int allocate_power(struct thermal_zone_device *tz,
@@ -569,22 +570,33 @@ static void reset_pid_controller(struct power_allocator_params *params)
params->prev_err = 0;
}
-static void allow_maximum_power(struct thermal_zone_device *tz)
+static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
{
struct thermal_instance *instance;
struct power_allocator_params *params = tz->governor_data;
+ u32 req_power;
mutex_lock(&tz->lock);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ struct thermal_cooling_device *cdev = instance->cdev;
+
if ((instance->trip != params->trip_max_desired_temperature) ||
(!cdev_is_power_actor(instance->cdev)))
continue;
instance->target = 0;
mutex_lock(&instance->cdev->lock);
- instance->cdev->updated = false;
+ /*
+ * Call for updating the cooling devices local stats and avoid
+ * periods of dozen of seconds when those have not been
+ * maintained.
+ */
+ cdev->ops->get_requested_power(cdev, &req_power);
+
+ if (update)
+ __thermal_cdev_update(instance->cdev);
+
mutex_unlock(&instance->cdev->lock);
- thermal_cdev_update(instance->cdev);
}
mutex_unlock(&tz->lock);
}
@@ -698,6 +710,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
int ret;
int switch_on_temp, control_temp;
struct power_allocator_params *params = tz->governor_data;
+ bool update;
/*
* We get called for every trip point but we only need to do
@@ -709,9 +722,10 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
&switch_on_temp);
if (!ret && (tz->temperature < switch_on_temp)) {
+ update = (tz->last_temperature >= switch_on_temp);
tz->passive = 0;
reset_pid_controller(params);
- allow_maximum_power(tz);
+ allow_maximum_power(tz, update);
return 0;
}
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index ee05950afd2f..9a21ac0ceb11 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -1,7 +1,7 @@
/*
- * Hisilicon thermal sensor driver
+ * HiSilicon thermal sensor driver
*
- * Copyright (c) 2014-2015 Hisilicon Limited.
+ * Copyright (c) 2014-2015 HiSilicon Limited.
* Copyright (c) 2014-2015 Linaro Limited.
*
* Xinwei Kong <kong.kongxinwei@hisilicon.com>
@@ -572,10 +572,8 @@ static int hisi_thermal_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->regs)) {
- dev_err(dev, "failed to get io address\n");
+ if (IS_ERR(data->regs))
return PTR_ERR(data->regs);
- }
ret = data->ops->probe(data);
if (ret)
@@ -672,5 +670,5 @@ module_platform_driver(hisi_thermal_driver);
MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>");
MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
-MODULE_DESCRIPTION("Hisilicon thermal driver");
+MODULE_DESCRIPTION("HiSilicon thermal driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index ce4f59213c7a..e4299ca3423c 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -79,3 +79,14 @@ config INTEL_PCH_THERMAL
Enable this to support thermal reporting on certain intel PCHs.
Thermal reporting device will provide temperature reading,
programmable trip points and other information.
+
+config INTEL_TCC_COOLING
+ tristate "Intel TCC offset cooling Driver"
+ depends on X86
+ help
+ Enable this to support system cooling by adjusting the effective TCC
+ activation temperature via the TCC Offset register, which is widely
+ supported on modern Intel platforms.
+ Note that, on different platforms, the behavior might be different
+ on how fast the setting takes effect, and how much the CPU frequency
+ is reduced.
diff --git a/drivers/thermal/intel/Makefile b/drivers/thermal/intel/Makefile
index ff2ad30ef397..5ff2afa388f7 100644
--- a/drivers/thermal/intel/Makefile
+++ b/drivers/thermal/intel/Makefile
@@ -10,4 +10,5 @@ obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
+obj-$(CONFIG_INTEL_TCC_COOLING) += intel_tcc_cooling.o
obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c
new file mode 100644
index 000000000000..8ec10d55d421
--- /dev/null
+++ b/drivers/thermal/intel/intel_tcc_cooling.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * cooling device driver that activates the processor throttling by
+ * programming the TCC Offset register.
+ * Copyright (c) 2021, Intel Corporation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/thermal.h>
+#include <asm/cpu_device_id.h>
+
+#define TCC_SHIFT 24
+#define TCC_MASK (0x3fULL<<24)
+#define TCC_PROGRAMMABLE BIT(30)
+
+static struct thermal_cooling_device *tcc_cdev;
+
+static int tcc_get_max_state(struct thermal_cooling_device *cdev, unsigned long
+ *state)
+{
+ *state = TCC_MASK >> TCC_SHIFT;
+ return 0;
+}
+
+static int tcc_offset_update(int tcc)
+{
+ u64 val;
+ int err;
+
+ err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
+ if (err)
+ return err;
+
+ val &= ~TCC_MASK;
+ val |= tcc << TCC_SHIFT;
+
+ err = wrmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tcc_get_cur_state(struct thermal_cooling_device *cdev, unsigned long
+ *state)
+{
+ u64 val;
+ int err;
+
+ err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
+ if (err)
+ return err;
+
+ *state = (val & TCC_MASK) >> TCC_SHIFT;
+ return 0;
+}
+
+static int tcc_set_cur_state(struct thermal_cooling_device *cdev, unsigned long
+ state)
+{
+ return tcc_offset_update(state);
+}
+
+static const struct thermal_cooling_device_ops tcc_cooling_ops = {
+ .get_max_state = tcc_get_max_state,
+ .get_cur_state = tcc_get_cur_state,
+ .set_cur_state = tcc_set_cur_state,
+};
+
+static const struct x86_cpu_id tcc_ids[] __initconst = {
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
+ {}
+};
+
+MODULE_DEVICE_TABLE(x86cpu, tcc_ids);
+
+static int __init tcc_cooling_init(void)
+{
+ int ret;
+ u64 val;
+ const struct x86_cpu_id *id;
+
+ int err;
+
+ id = x86_match_cpu(tcc_ids);
+ if (!id)
+ return -ENODEV;
+
+ err = rdmsrl_safe(MSR_PLATFORM_INFO, &val);
+ if (err)
+ return err;
+
+ if (!(val & TCC_PROGRAMMABLE))
+ return -ENODEV;
+
+ pr_info("Programmable TCC Offset detected\n");
+
+ tcc_cdev =
+ thermal_cooling_device_register("TCC Offset", NULL,
+ &tcc_cooling_ops);
+ if (IS_ERR(tcc_cdev)) {
+ ret = PTR_ERR(tcc_cdev);
+ return ret;
+ }
+ return 0;
+}
+
+module_init(tcc_cooling_init)
+
+static void __exit tcc_cooling_exit(void)
+{
+ thermal_cooling_device_unregister(tcc_cdev);
+}
+
+module_exit(tcc_cooling_exit)
+
+MODULE_DESCRIPTION("TCC offset cooling device Driver");
+MODULE_AUTHOR("Zhang Rui <rui.zhang@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 149c6d7fd5a0..97e8678ccf0e 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -573,12 +573,12 @@ static int raw_to_mcelsius_v1(struct mtk_thermal *mt, int sensno, s32 raw)
static int raw_to_mcelsius_v2(struct mtk_thermal *mt, int sensno, s32 raw)
{
- s32 format_1 = 0;
- s32 format_2 = 0;
- s32 g_oe = 1;
- s32 g_gain = 1;
- s32 g_x_roomt = 0;
- s32 tmp = 0;
+ s32 format_1;
+ s32 format_2;
+ s32 g_oe;
+ s32 g_gain;
+ s32 g_x_roomt;
+ s32 tmp;
if (raw == 0)
return 0;
diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
index 6dc879fea9c8..7419e196dbb0 100644
--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
@@ -17,6 +17,7 @@
#include "../thermal_core.h"
+#define QPNP_TM_REG_DIG_MAJOR 0x01
#define QPNP_TM_REG_TYPE 0x04
#define QPNP_TM_REG_SUBTYPE 0x05
#define QPNP_TM_REG_STATUS 0x08
@@ -38,26 +39,30 @@
#define ALARM_CTRL_FORCE_ENABLE BIT(7)
-/*
- * Trip point values based on threshold control
- * 0 = {105 C, 125 C, 145 C}
- * 1 = {110 C, 130 C, 150 C}
- * 2 = {115 C, 135 C, 155 C}
- * 3 = {120 C, 140 C, 160 C}
-*/
-#define TEMP_STAGE_STEP 20000 /* Stage step: 20.000 C */
-#define TEMP_STAGE_HYSTERESIS 2000
+#define THRESH_COUNT 4
+#define STAGE_COUNT 3
+
+/* Over-temperature trip point values in mC */
+static const long temp_map_gen1[THRESH_COUNT][STAGE_COUNT] = {
+ { 105000, 125000, 145000 },
+ { 110000, 130000, 150000 },
+ { 115000, 135000, 155000 },
+ { 120000, 140000, 160000 },
+};
+
+static const long temp_map_gen2_v1[THRESH_COUNT][STAGE_COUNT] = {
+ { 90000, 110000, 140000 },
+ { 95000, 115000, 145000 },
+ { 100000, 120000, 150000 },
+ { 105000, 125000, 155000 },
+};
-#define TEMP_THRESH_MIN 105000 /* Threshold Min: 105 C */
-#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */
+#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */
#define THRESH_MIN 0
#define THRESH_MAX 3
-/* Stage 2 Threshold Min: 125 C */
-#define STAGE2_THRESHOLD_MIN 125000
-/* Stage 2 Threshold Max: 140 C */
-#define STAGE2_THRESHOLD_MAX 140000
+#define TEMP_STAGE_HYSTERESIS 2000
/* Temperature in Milli Celsius reported during stage 0 if no ADC is present */
#define DEFAULT_TEMP 37000
@@ -77,6 +82,7 @@ struct qpnp_tm_chip {
bool initialized;
struct iio_channel *adc;
+ const long (*temp_map)[THRESH_COUNT][STAGE_COUNT];
};
/* This array maps from GEN2 alarm state to GEN1 alarm stage */
@@ -101,6 +107,23 @@ static int qpnp_tm_write(struct qpnp_tm_chip *chip, u16 addr, u8 data)
}
/**
+ * qpnp_tm_decode_temp() - return temperature in mC corresponding to the
+ * specified over-temperature stage
+ * @chip: Pointer to the qpnp_tm chip
+ * @stage: Over-temperature stage
+ *
+ * Return: temperature in mC
+ */
+static long qpnp_tm_decode_temp(struct qpnp_tm_chip *chip, unsigned int stage)
+{
+ if (!chip->temp_map || chip->thresh >= THRESH_COUNT || stage == 0 ||
+ stage > STAGE_COUNT)
+ return 0;
+
+ return (*chip->temp_map)[chip->thresh][stage - 1];
+}
+
+/**
* qpnp_tm_get_temp_stage() - return over-temperature stage
* @chip: Pointer to the qpnp_tm chip
*
@@ -149,14 +172,12 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
if (stage_new > stage_old) {
/* increasing stage, use lower bound */
- chip->temp = (stage_new - 1) * TEMP_STAGE_STEP +
- chip->thresh * TEMP_THRESH_STEP +
- TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
+ chip->temp = qpnp_tm_decode_temp(chip, stage_new)
+ + TEMP_STAGE_HYSTERESIS;
} else if (stage_new < stage_old) {
/* decreasing stage, use upper bound */
- chip->temp = stage_new * TEMP_STAGE_STEP +
- chip->thresh * TEMP_THRESH_STEP -
- TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
+ chip->temp = qpnp_tm_decode_temp(chip, stage_new + 1)
+ - TEMP_STAGE_HYSTERESIS;
}
chip->stage = stage;
@@ -199,26 +220,28 @@ static int qpnp_tm_get_temp(void *data, int *temp)
static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip,
int temp)
{
- u8 reg;
+ long stage2_threshold_min = (*chip->temp_map)[THRESH_MIN][1];
+ long stage2_threshold_max = (*chip->temp_map)[THRESH_MAX][1];
bool disable_s2_shutdown = false;
+ u8 reg;
WARN_ON(!mutex_is_locked(&chip->lock));
/*
* Default: S2 and S3 shutdown enabled, thresholds at
- * 105C/125C/145C, monitoring at 25Hz
+ * lowest threshold set, monitoring at 25Hz
*/
reg = SHUTDOWN_CTRL1_RATE_25HZ;
if (temp == THERMAL_TEMP_INVALID ||
- temp < STAGE2_THRESHOLD_MIN) {
+ temp < stage2_threshold_min) {
chip->thresh = THRESH_MIN;
goto skip;
}
- if (temp <= STAGE2_THRESHOLD_MAX) {
+ if (temp <= stage2_threshold_max) {
chip->thresh = THRESH_MAX -
- ((STAGE2_THRESHOLD_MAX - temp) /
+ ((stage2_threshold_max - temp) /
TEMP_THRESH_STEP);
disable_s2_shutdown = true;
} else {
@@ -326,9 +349,7 @@ static int qpnp_tm_init(struct qpnp_tm_chip *chip)
? chip->stage : alarm_state_map[chip->stage];
if (stage)
- chip->temp = chip->thresh * TEMP_THRESH_STEP +
- (stage - 1) * TEMP_STAGE_STEP +
- TEMP_THRESH_MIN;
+ chip->temp = qpnp_tm_decode_temp(chip, stage);
crit_temp = qpnp_tm_get_critical_trip_temp(chip);
ret = qpnp_tm_update_critical_trip_temp(chip, crit_temp);
@@ -350,7 +371,7 @@ static int qpnp_tm_probe(struct platform_device *pdev)
{
struct qpnp_tm_chip *chip;
struct device_node *node;
- u8 type, subtype;
+ u8 type, subtype, dig_major;
u32 res;
int ret, irq;
@@ -400,6 +421,12 @@ static int qpnp_tm_probe(struct platform_device *pdev)
return ret;
}
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_DIG_MAJOR, &dig_major);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not read dig_major\n");
+ return ret;
+ }
+
if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1
&& subtype != QPNP_TM_SUBTYPE_GEN2)) {
dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n",
@@ -408,6 +435,10 @@ static int qpnp_tm_probe(struct platform_device *pdev)
}
chip->subtype = subtype;
+ if (subtype == QPNP_TM_SUBTYPE_GEN2 && dig_major >= 1)
+ chip->temp_map = &temp_map_gen2_v1;
+ else
+ chip->temp_map = &temp_map_gen1;
/*
* Register the sensor before initializing the hardware to be able to
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
index 2a28a5af209e..67c1748cdf73 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -10,8 +10,6 @@
#include <linux/thermal.h>
#include "tsens.h"
-#define CAL_MDEGC 30000
-
#define CONFIG_ADDR 0x3640
#define CONFIG_ADDR_8660 0x3620
/* CONFIG_ADDR bitmasks */
@@ -21,40 +19,38 @@
#define CONFIG_SHIFT_8660 28
#define CONFIG_MASK_8660 (3 << CONFIG_SHIFT_8660)
-#define STATUS_CNTL_ADDR_8064 0x3660
#define CNTL_ADDR 0x3620
/* CNTL_ADDR bitmasks */
#define EN BIT(0)
#define SW_RST BIT(1)
-#define SENSOR0_EN BIT(3)
+
+#define MEASURE_PERIOD BIT(18)
#define SLP_CLK_ENA BIT(26)
#define SLP_CLK_ENA_8660 BIT(24)
-#define MEASURE_PERIOD 1
#define SENSOR0_SHIFT 3
-/* INT_STATUS_ADDR bitmasks */
-#define MIN_STATUS_MASK BIT(0)
-#define LOWER_STATUS_CLR BIT(1)
-#define UPPER_STATUS_CLR BIT(2)
-#define MAX_STATUS_MASK BIT(3)
-
#define THRESHOLD_ADDR 0x3624
-/* THRESHOLD_ADDR bitmasks */
-#define THRESHOLD_MAX_LIMIT_SHIFT 24
-#define THRESHOLD_MIN_LIMIT_SHIFT 16
-#define THRESHOLD_UPPER_LIMIT_SHIFT 8
-#define THRESHOLD_LOWER_LIMIT_SHIFT 0
-
-/* Initial temperature threshold values */
-#define LOWER_LIMIT_TH 0x50
-#define UPPER_LIMIT_TH 0xdf
-#define MIN_LIMIT_TH 0x0
-#define MAX_LIMIT_TH 0xff
-
-#define S0_STATUS_ADDR 0x3628
+
#define INT_STATUS_ADDR 0x363c
-#define TRDY_MASK BIT(7)
-#define TIMEOUT_US 100
+
+#define S0_STATUS_OFF 0x3628
+#define S1_STATUS_OFF 0x362c
+#define S2_STATUS_OFF 0x3630
+#define S3_STATUS_OFF 0x3634
+#define S4_STATUS_OFF 0x3638
+#define S5_STATUS_OFF 0x3664 /* Sensors 5-10 found on apq8064/msm8960 */
+#define S6_STATUS_OFF 0x3668
+#define S7_STATUS_OFF 0x366c
+#define S8_STATUS_OFF 0x3670
+#define S9_STATUS_OFF 0x3674
+#define S10_STATUS_OFF 0x3678
+
+/* Original slope - 350 to compensate mC to C inaccuracy */
+static u32 tsens_msm8960_slope[] = {
+ 826, 826, 804, 826,
+ 761, 782, 782, 849,
+ 782, 849, 782
+ };
static int suspend_8960(struct tsens_priv *priv)
{
@@ -115,17 +111,34 @@ static int resume_8960(struct tsens_priv *priv)
static int enable_8960(struct tsens_priv *priv, int id)
{
int ret;
- u32 reg, mask;
+ u32 reg, mask = BIT(id);
ret = regmap_read(priv->tm_map, CNTL_ADDR, &reg);
if (ret)
return ret;
- mask = BIT(id + SENSOR0_SHIFT);
+ /* HARDWARE BUG:
+ * On platforms with more than 6 sensors, all remaining sensors
+ * must be enabled together, otherwise undefined results are expected.
+ * (Sensor 6-7 disabled, Sensor 3 disabled...) In the original driver,
+ * all the sensors are enabled in one step hence this bug is not
+ * triggered.
+ */
+ if (id > 5)
+ mask = GENMASK(10, 6);
+
+ mask <<= SENSOR0_SHIFT;
+
+ /* Sensors already enabled. Skip. */
+ if ((reg & mask) == mask)
+ return 0;
+
ret = regmap_write(priv->tm_map, CNTL_ADDR, reg | SW_RST);
if (ret)
return ret;
+ reg |= MEASURE_PERIOD;
+
if (priv->num_sensors > 1)
reg |= mask | SLP_CLK_ENA | EN;
else
@@ -162,63 +175,11 @@ static void disable_8960(struct tsens_priv *priv)
regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
}
-static int init_8960(struct tsens_priv *priv)
-{
- int ret, i;
- u32 reg_cntl;
-
- priv->tm_map = dev_get_regmap(priv->dev, NULL);
- if (!priv->tm_map)
- return -ENODEV;
-
- /*
- * The status registers for each sensor are discontiguous
- * because some SoCs have 5 sensors while others have more
- * but the control registers stay in the same place, i.e
- * directly after the first 5 status registers.
- */
- for (i = 0; i < priv->num_sensors; i++) {
- if (i >= 5)
- priv->sensor[i].status = S0_STATUS_ADDR + 40;
- priv->sensor[i].status += i * 4;
- }
-
- reg_cntl = SW_RST;
- ret = regmap_update_bits(priv->tm_map, CNTL_ADDR, SW_RST, reg_cntl);
- if (ret)
- return ret;
-
- if (priv->num_sensors > 1) {
- reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18);
- reg_cntl &= ~SW_RST;
- ret = regmap_update_bits(priv->tm_map, CONFIG_ADDR,
- CONFIG_MASK, CONFIG);
- } else {
- reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16);
- reg_cntl &= ~CONFIG_MASK_8660;
- reg_cntl |= CONFIG_8660 << CONFIG_SHIFT_8660;
- }
-
- reg_cntl |= GENMASK(priv->num_sensors - 1, 0) << SENSOR0_SHIFT;
- ret = regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
- if (ret)
- return ret;
-
- reg_cntl |= EN;
- ret = regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
- if (ret)
- return ret;
-
- return 0;
-}
-
static int calibrate_8960(struct tsens_priv *priv)
{
int i;
char *data;
-
- ssize_t num_read = priv->num_sensors;
- struct tsens_sensor *s = priv->sensor;
+ u32 p1[11];
data = qfprom_read(priv->dev, "calib");
if (IS_ERR(data))
@@ -226,60 +187,96 @@ static int calibrate_8960(struct tsens_priv *priv)
if (IS_ERR(data))
return PTR_ERR(data);
- for (i = 0; i < num_read; i++, s++)
- s->offset = data[i];
+ for (i = 0; i < priv->num_sensors; i++) {
+ p1[i] = data[i];
+ priv->sensor[i].slope = tsens_msm8960_slope[i];
+ }
+
+ compute_intercept_slope(priv, p1, NULL, ONE_PT_CALIB);
kfree(data);
return 0;
}
-/* Temperature on y axis and ADC-code on x-axis */
-static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s)
-{
- int slope, offset;
-
- slope = thermal_zone_get_slope(s->tzd);
- offset = CAL_MDEGC - slope * s->offset;
-
- return adc_code * slope + offset;
-}
-
-static int get_temp_8960(const struct tsens_sensor *s, int *temp)
-{
- int ret;
- u32 code, trdy;
- struct tsens_priv *priv = s->priv;
- unsigned long timeout;
-
- timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
- do {
- ret = regmap_read(priv->tm_map, INT_STATUS_ADDR, &trdy);
- if (ret)
- return ret;
- if (!(trdy & TRDY_MASK))
- continue;
- ret = regmap_read(priv->tm_map, s->status, &code);
- if (ret)
- return ret;
- *temp = code_to_mdegC(code, s);
- return 0;
- } while (time_before(jiffies, timeout));
-
- return -ETIMEDOUT;
-}
+static const struct reg_field tsens_8960_regfields[MAX_REGFIELDS] = {
+ /* ----- SROT ------ */
+ /* No VERSION information */
+
+ /* CNTL */
+ [TSENS_EN] = REG_FIELD(CNTL_ADDR, 0, 0),
+ [TSENS_SW_RST] = REG_FIELD(CNTL_ADDR, 1, 1),
+ /* 8960 has 5 sensors, 8660 has 11, we only handle 5 */
+ [SENSOR_EN] = REG_FIELD(CNTL_ADDR, 3, 7),
+
+ /* ----- TM ------ */
+ /* INTERRUPT ENABLE */
+ /* NO INTERRUPT ENABLE */
+
+ /* Single UPPER/LOWER TEMPERATURE THRESHOLD for all sensors */
+ [LOW_THRESH_0] = REG_FIELD(THRESHOLD_ADDR, 0, 7),
+ [UP_THRESH_0] = REG_FIELD(THRESHOLD_ADDR, 8, 15),
+ /* MIN_THRESH_0 and MAX_THRESH_0 are not present in the regfield
+ * Recycle CRIT_THRESH_0 and 1 to set the required regs to hardcoded temp
+ * MIN_THRESH_0 -> CRIT_THRESH_1
+ * MAX_THRESH_0 -> CRIT_THRESH_0
+ */
+ [CRIT_THRESH_1] = REG_FIELD(THRESHOLD_ADDR, 16, 23),
+ [CRIT_THRESH_0] = REG_FIELD(THRESHOLD_ADDR, 24, 31),
+
+ /* UPPER/LOWER INTERRUPT [CLEAR/STATUS] */
+ /* 1 == clear, 0 == normal operation */
+ [LOW_INT_CLEAR_0] = REG_FIELD(CNTL_ADDR, 9, 9),
+ [UP_INT_CLEAR_0] = REG_FIELD(CNTL_ADDR, 10, 10),
+
+ /* NO CRITICAL INTERRUPT SUPPORT on 8960 */
+
+ /* Sn_STATUS */
+ [LAST_TEMP_0] = REG_FIELD(S0_STATUS_OFF, 0, 7),
+ [LAST_TEMP_1] = REG_FIELD(S1_STATUS_OFF, 0, 7),
+ [LAST_TEMP_2] = REG_FIELD(S2_STATUS_OFF, 0, 7),
+ [LAST_TEMP_3] = REG_FIELD(S3_STATUS_OFF, 0, 7),
+ [LAST_TEMP_4] = REG_FIELD(S4_STATUS_OFF, 0, 7),
+ [LAST_TEMP_5] = REG_FIELD(S5_STATUS_OFF, 0, 7),
+ [LAST_TEMP_6] = REG_FIELD(S6_STATUS_OFF, 0, 7),
+ [LAST_TEMP_7] = REG_FIELD(S7_STATUS_OFF, 0, 7),
+ [LAST_TEMP_8] = REG_FIELD(S8_STATUS_OFF, 0, 7),
+ [LAST_TEMP_9] = REG_FIELD(S9_STATUS_OFF, 0, 7),
+ [LAST_TEMP_10] = REG_FIELD(S10_STATUS_OFF, 0, 7),
+
+ /* No VALID field on 8960 */
+ /* TSENS_INT_STATUS bits: 1 == threshold violated */
+ [MIN_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 0, 0),
+ [LOWER_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 1, 1),
+ [UPPER_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 2, 2),
+ /* No CRITICAL field on 8960 */
+ [MAX_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 3, 3),
+
+ /* TRDY: 1=ready, 0=in progress */
+ [TRDY] = REG_FIELD(INT_STATUS_ADDR, 7, 7),
+};
static const struct tsens_ops ops_8960 = {
- .init = init_8960,
+ .init = init_common,
.calibrate = calibrate_8960,
- .get_temp = get_temp_8960,
+ .get_temp = get_temp_common,
.enable = enable_8960,
.disable = disable_8960,
.suspend = suspend_8960,
.resume = resume_8960,
};
+static struct tsens_features tsens_8960_feat = {
+ .ver_major = VER_0,
+ .crit_int = 0,
+ .adc = 1,
+ .srot_split = 0,
+ .max_sensors = 11,
+};
+
struct tsens_plat_data data_8960 = {
.num_sensors = 11,
.ops = &ops_8960,
+ .feat = &tsens_8960_feat,
+ .fields = tsens_8960_regfields,
};
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index 4ffa2e2c0145..f136cb350238 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -190,6 +190,39 @@
#define BIT_APPEND 0x3
+/* eeprom layout data for mdm9607 */
+#define MDM9607_BASE0_MASK 0x000000ff
+#define MDM9607_BASE1_MASK 0x000ff000
+#define MDM9607_BASE0_SHIFT 0
+#define MDM9607_BASE1_SHIFT 12
+
+#define MDM9607_S0_P1_MASK 0x00003f00
+#define MDM9607_S1_P1_MASK 0x03f00000
+#define MDM9607_S2_P1_MASK 0x0000003f
+#define MDM9607_S3_P1_MASK 0x0003f000
+#define MDM9607_S4_P1_MASK 0x0000003f
+
+#define MDM9607_S0_P2_MASK 0x000fc000
+#define MDM9607_S1_P2_MASK 0xfc000000
+#define MDM9607_S2_P2_MASK 0x00000fc0
+#define MDM9607_S3_P2_MASK 0x00fc0000
+#define MDM9607_S4_P2_MASK 0x00000fc0
+
+#define MDM9607_S0_P1_SHIFT 8
+#define MDM9607_S1_P1_SHIFT 20
+#define MDM9607_S2_P1_SHIFT 0
+#define MDM9607_S3_P1_SHIFT 12
+#define MDM9607_S4_P1_SHIFT 0
+
+#define MDM9607_S0_P2_SHIFT 14
+#define MDM9607_S1_P2_SHIFT 26
+#define MDM9607_S2_P2_SHIFT 6
+#define MDM9607_S3_P2_SHIFT 18
+#define MDM9607_S4_P2_SHIFT 6
+
+#define MDM9607_CAL_SEL_MASK 0x00700000
+#define MDM9607_CAL_SEL_SHIFT 20
+
static int calibrate_8916(struct tsens_priv *priv)
{
int base0 = 0, base1 = 0, i;
@@ -452,7 +485,56 @@ static int calibrate_8974(struct tsens_priv *priv)
return 0;
}
-/* v0.1: 8916, 8939, 8974 */
+static int calibrate_9607(struct tsens_priv *priv)
+{
+ int base, i;
+ u32 p1[5], p2[5];
+ int mode = 0;
+ u32 *qfprom_cdata;
+
+ qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
+ if (IS_ERR(qfprom_cdata))
+ return PTR_ERR(qfprom_cdata);
+
+ mode = (qfprom_cdata[2] & MDM9607_CAL_SEL_MASK) >> MDM9607_CAL_SEL_SHIFT;
+ dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base = (qfprom_cdata[2] & MDM9607_BASE1_MASK) >> MDM9607_BASE1_SHIFT;
+ p2[0] = (qfprom_cdata[0] & MDM9607_S0_P2_MASK) >> MDM9607_S0_P2_SHIFT;
+ p2[1] = (qfprom_cdata[0] & MDM9607_S1_P2_MASK) >> MDM9607_S1_P2_SHIFT;
+ p2[2] = (qfprom_cdata[1] & MDM9607_S2_P2_MASK) >> MDM9607_S2_P2_SHIFT;
+ p2[3] = (qfprom_cdata[1] & MDM9607_S3_P2_MASK) >> MDM9607_S3_P2_SHIFT;
+ p2[4] = (qfprom_cdata[2] & MDM9607_S4_P2_MASK) >> MDM9607_S4_P2_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p2[i] = ((base + p2[i]) << 2);
+ fallthrough;
+ case ONE_PT_CALIB2:
+ base = (qfprom_cdata[0] & MDM9607_BASE0_MASK);
+ p1[0] = (qfprom_cdata[0] & MDM9607_S0_P1_MASK) >> MDM9607_S0_P1_SHIFT;
+ p1[1] = (qfprom_cdata[0] & MDM9607_S1_P1_MASK) >> MDM9607_S1_P1_SHIFT;
+ p1[2] = (qfprom_cdata[1] & MDM9607_S2_P1_MASK) >> MDM9607_S2_P1_SHIFT;
+ p1[3] = (qfprom_cdata[1] & MDM9607_S3_P1_MASK) >> MDM9607_S3_P1_SHIFT;
+ p1[4] = (qfprom_cdata[2] & MDM9607_S4_P1_MASK) >> MDM9607_S4_P1_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] = ((base + p1[i]) << 2);
+ break;
+ default:
+ for (i = 0; i < priv->num_sensors; i++) {
+ p1[i] = 500;
+ p2[i] = 780;
+ }
+ break;
+ }
+
+ compute_intercept_slope(priv, p1, p2, mode);
+ kfree(qfprom_cdata);
+
+ return 0;
+}
+
+/* v0.1: 8916, 8939, 8974, 9607 */
static struct tsens_features tsens_v0_1_feat = {
.ver_major = VER_0_1,
@@ -540,3 +622,17 @@ struct tsens_plat_data data_8974 = {
.feat = &tsens_v0_1_feat,
.fields = tsens_v0_1_regfields,
};
+
+static const struct tsens_ops ops_9607 = {
+ .init = init_common,
+ .calibrate = calibrate_9607,
+ .get_temp = get_temp_common,
+};
+
+struct tsens_plat_data data_9607 = {
+ .num_sensors = 5,
+ .ops = &ops_9607,
+ .hw_ids = (unsigned int []){ 0, 1, 2, 3, 4 },
+ .feat = &tsens_v0_1_feat,
+ .fields = tsens_v0_1_regfields,
+};
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
index 3c19a3800c6d..573e261ccca7 100644
--- a/drivers/thermal/qcom/tsens-v1.c
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -380,11 +380,11 @@ static const struct tsens_ops ops_8976 = {
.get_temp = get_temp_tsens_valid,
};
-/* Valid for both MSM8956 and MSM8976. Sensor ID 3 is unused. */
+/* Valid for both MSM8956 and MSM8976. */
struct tsens_plat_data data_8976 = {
.num_sensors = 11,
.ops = &ops_8976,
- .hw_ids = (unsigned int[]){0, 1, 2, 4, 5, 6, 7, 8, 9, 10},
+ .hw_ids = (unsigned int[]){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
.feat = &tsens_v1_feat,
.fields = tsens_v1_regfields,
};
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index d8ce3a687b80..4c7ebd1d3f9c 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
@@ -85,7 +86,8 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
"%s: sensor%d - data_point1:%#x data_point2:%#x\n",
__func__, i, p1[i], p2[i]);
- priv->sensor[i].slope = SLOPE_DEFAULT;
+ if (!priv->sensor[i].slope)
+ priv->sensor[i].slope = SLOPE_DEFAULT;
if (mode == TWO_PT_CALIB) {
/*
* slope (m) = adc_code2 - adc_code1 (y2 - y1)/
@@ -515,6 +517,15 @@ static irqreturn_t tsens_irq_thread(int irq, void *data)
dev_dbg(priv->dev, "[%u] %s: no violation: %d\n",
hw_id, __func__, temp);
}
+
+ if (tsens_version(priv) < VER_0_1) {
+ /* Constraint: There is only 1 interrupt control register for all
+ * 11 temperature sensor. So monitoring more than 1 sensor based
+ * on interrupts will yield inconsistent result. To overcome this
+ * issue we will monitor only sensor 0 which is the master sensor.
+ */
+ break;
+ }
}
return IRQ_HANDLED;
@@ -530,6 +541,13 @@ static int tsens_set_trips(void *_sensor, int low, int high)
int high_val, low_val, cl_high, cl_low;
u32 hw_id = s->hw_id;
+ if (tsens_version(priv) < VER_0_1) {
+ /* Pre v0.1 IP had a single register for each type of interrupt
+ * and thresholds
+ */
+ hw_id = 0;
+ }
+
dev_dbg(dev, "[%u] %s: proposed thresholds: (%d:%d)\n",
hw_id, __func__, low, high);
@@ -584,18 +602,21 @@ int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp)
u32 valid;
int ret;
- ret = regmap_field_read(priv->rf[valid_idx], &valid);
- if (ret)
- return ret;
- while (!valid) {
- /* Valid bit is 0 for 6 AHB clock cycles.
- * At 19.2MHz, 1 AHB clock is ~60ns.
- * We should enter this loop very, very rarely.
- */
- ndelay(400);
+ /* VER_0 doesn't have VALID bit */
+ if (tsens_version(priv) >= VER_0_1) {
ret = regmap_field_read(priv->rf[valid_idx], &valid);
if (ret)
return ret;
+ while (!valid) {
+ /* Valid bit is 0 for 6 AHB clock cycles.
+ * At 19.2MHz, 1 AHB clock is ~60ns.
+ * We should enter this loop very, very rarely.
+ */
+ ndelay(400);
+ ret = regmap_field_read(priv->rf[valid_idx], &valid);
+ if (ret)
+ return ret;
+ }
}
/* Valid bit is set, OK to read the temperature */
@@ -608,15 +629,29 @@ int get_temp_common(const struct tsens_sensor *s, int *temp)
{
struct tsens_priv *priv = s->priv;
int hw_id = s->hw_id;
- int last_temp = 0, ret;
+ int last_temp = 0, ret, trdy;
+ unsigned long timeout;
- ret = regmap_field_read(priv->rf[LAST_TEMP_0 + hw_id], &last_temp);
- if (ret)
- return ret;
+ timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
+ do {
+ if (tsens_version(priv) == VER_0) {
+ ret = regmap_field_read(priv->rf[TRDY], &trdy);
+ if (ret)
+ return ret;
+ if (!trdy)
+ continue;
+ }
- *temp = code_to_degc(last_temp, s) * 1000;
+ ret = regmap_field_read(priv->rf[LAST_TEMP_0 + hw_id], &last_temp);
+ if (ret)
+ return ret;
- return 0;
+ *temp = code_to_degc(last_temp, s) * 1000;
+
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return -ETIMEDOUT;
}
#ifdef CONFIG_DEBUG_FS
@@ -738,25 +773,42 @@ int __init init_common(struct tsens_priv *priv)
priv->tm_offset = 0x1000;
}
- res = platform_get_resource(op, IORESOURCE_MEM, 0);
- tm_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(tm_base)) {
- ret = PTR_ERR(tm_base);
- goto err_put_device;
+ if (tsens_version(priv) >= VER_0_1) {
+ res = platform_get_resource(op, IORESOURCE_MEM, 0);
+ tm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(tm_base)) {
+ ret = PTR_ERR(tm_base);
+ goto err_put_device;
+ }
+
+ priv->tm_map = devm_regmap_init_mmio(dev, tm_base, &tsens_config);
+ } else { /* VER_0 share the same gcc regs using a syscon */
+ struct device *parent = priv->dev->parent;
+
+ if (parent)
+ priv->tm_map = syscon_node_to_regmap(parent->of_node);
}
- priv->tm_map = devm_regmap_init_mmio(dev, tm_base, &tsens_config);
- if (IS_ERR(priv->tm_map)) {
- ret = PTR_ERR(priv->tm_map);
+ if (IS_ERR_OR_NULL(priv->tm_map)) {
+ if (!priv->tm_map)
+ ret = -ENODEV;
+ else
+ ret = PTR_ERR(priv->tm_map);
goto err_put_device;
}
+ /* VER_0 have only tm_map */
+ if (!priv->srot_map)
+ priv->srot_map = priv->tm_map;
+
if (tsens_version(priv) > VER_0_1) {
for (i = VER_MAJOR; i <= VER_STEP; i++) {
priv->rf[i] = devm_regmap_field_alloc(dev, priv->srot_map,
priv->fields[i]);
- if (IS_ERR(priv->rf[i]))
- return PTR_ERR(priv->rf[i]);
+ if (IS_ERR(priv->rf[i])) {
+ ret = PTR_ERR(priv->rf[i]);
+ goto err_put_device;
+ }
}
ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor);
if (ret)
@@ -769,6 +821,10 @@ int __init init_common(struct tsens_priv *priv)
ret = PTR_ERR(priv->rf[TSENS_EN]);
goto err_put_device;
}
+ /* in VER_0 TSENS need to be explicitly enabled */
+ if (tsens_version(priv) == VER_0)
+ regmap_field_write(priv->rf[TSENS_EN], 1);
+
ret = regmap_field_read(priv->rf[TSENS_EN], &enabled);
if (ret)
goto err_put_device;
@@ -791,6 +847,19 @@ int __init init_common(struct tsens_priv *priv)
goto err_put_device;
}
+ priv->rf[TSENS_SW_RST] =
+ devm_regmap_field_alloc(dev, priv->srot_map, priv->fields[TSENS_SW_RST]);
+ if (IS_ERR(priv->rf[TSENS_SW_RST])) {
+ ret = PTR_ERR(priv->rf[TSENS_SW_RST]);
+ goto err_put_device;
+ }
+
+ priv->rf[TRDY] = devm_regmap_field_alloc(dev, priv->tm_map, priv->fields[TRDY]);
+ if (IS_ERR(priv->rf[TRDY])) {
+ ret = PTR_ERR(priv->rf[TRDY]);
+ goto err_put_device;
+ }
+
/* This loop might need changes if enum regfield_ids is reordered */
for (j = LAST_TEMP_0; j <= UP_THRESH_15; j += 16) {
for (i = 0; i < priv->feat->max_sensors; i++) {
@@ -806,7 +875,7 @@ int __init init_common(struct tsens_priv *priv)
}
}
- if (priv->feat->crit_int) {
+ if (priv->feat->crit_int || tsens_version(priv) < VER_0_1) {
/* Loop might need changes if enum regfield_ids is reordered */
for (j = CRITICAL_STATUS_0; j <= CRIT_THRESH_15; j += 16) {
for (i = 0; i < priv->feat->max_sensors; i++) {
@@ -844,7 +913,11 @@ int __init init_common(struct tsens_priv *priv)
}
spin_lock_init(&priv->ul_lock);
- tsens_enable_irq(priv);
+
+ /* VER_0 interrupt doesn't need to be enabled */
+ if (tsens_version(priv) >= VER_0_1)
+ tsens_enable_irq(priv);
+
tsens_debug_init(op);
err_put_device:
@@ -895,6 +968,12 @@ static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume);
static const struct of_device_id tsens_table[] = {
{
+ .compatible = "qcom,ipq8064-tsens",
+ .data = &data_8960,
+ }, {
+ .compatible = "qcom,mdm9607-tsens",
+ .data = &data_9607,
+ }, {
.compatible = "qcom,msm8916-tsens",
.data = &data_8916,
}, {
@@ -943,10 +1022,19 @@ static int tsens_register_irq(struct tsens_priv *priv, char *irqname,
if (irq == -ENXIO)
ret = 0;
} else {
- ret = devm_request_threaded_irq(&pdev->dev, irq,
- NULL, thread_fn,
- IRQF_ONESHOT,
- dev_name(&pdev->dev), priv);
+ /* VER_0 interrupt is TRIGGER_RISING, VER_0_1 and up is ONESHOT */
+ if (tsens_version(priv) == VER_0)
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ thread_fn, NULL,
+ IRQF_TRIGGER_RISING,
+ dev_name(&pdev->dev),
+ priv);
+ else
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ thread_fn, IRQF_ONESHOT,
+ dev_name(&pdev->dev),
+ priv);
+
if (ret)
dev_err(&pdev->dev, "%s: failed to get irq\n",
__func__);
@@ -975,6 +1063,19 @@ static int tsens_register(struct tsens_priv *priv)
priv->ops->enable(priv, i);
}
+ /* VER_0 require to set MIN and MAX THRESH
+ * These 2 regs are set using the:
+ * - CRIT_THRESH_0 for MAX THRESH hardcoded to 120°C
+ * - CRIT_THRESH_1 for MIN THRESH hardcoded to 0°C
+ */
+ if (tsens_version(priv) < VER_0_1) {
+ regmap_field_write(priv->rf[CRIT_THRESH_0],
+ tsens_mC_to_hw(priv->sensor, 120000));
+
+ regmap_field_write(priv->rf[CRIT_THRESH_1],
+ tsens_mC_to_hw(priv->sensor, 0));
+ }
+
ret = tsens_register_irq(priv, "uplow", tsens_irq_thread);
if (ret < 0)
return ret;
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index f40b625f897e..1471a2c00f15 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -13,6 +13,7 @@
#define CAL_DEGC_PT2 120
#define SLOPE_FACTOR 1000
#define SLOPE_DEFAULT 3200
+#define TIMEOUT_US 100
#define THRESHOLD_MAX_ADC_CODE 0x3ff
#define THRESHOLD_MIN_ADC_CODE 0x0
@@ -25,7 +26,8 @@ struct tsens_priv;
/* IP version numbers in ascending order */
enum tsens_ver {
- VER_0_1 = 0,
+ VER_0 = 0,
+ VER_0_1,
VER_1_X,
VER_2_X,
};
@@ -585,7 +587,7 @@ int get_temp_common(const struct tsens_sensor *s, int *temp);
extern struct tsens_plat_data data_8960;
/* TSENS v0.1 targets */
-extern struct tsens_plat_data data_8916, data_8939, data_8974;
+extern struct tsens_plat_data data_8916, data_8939, data_8974, data_9607;
/* TSENS v1 targets */
extern struct tsens_plat_data data_tsens_v1, data_8976;
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 75c69fe6e955..e1e412348076 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -60,7 +60,7 @@
#define MCELSIUS(temp) ((temp) * 1000)
#define GEN3_FUSE_MASK 0xFFF
-#define TSC_MAX_NUM 4
+#define TSC_MAX_NUM 5
/* default THCODE values if FUSEs are missing */
static const int thcodes[TSC_MAX_NUM][3] = {
@@ -68,6 +68,7 @@ static const int thcodes[TSC_MAX_NUM][3] = {
{ 3393, 2795, 2216 },
{ 3389, 2805, 2237 },
{ 3415, 2694, 2195 },
+ { 3356, 2724, 2244 },
};
/* Structure for thermal temperature calculation */
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index 8c80bd06dd9f..d9cd23cbb671 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -300,7 +300,7 @@ static int sun8i_ths_calibrate(struct ths_device *tmdev)
* or 0x8xx, so they won't be away from the default value
* for a lot.
*
- * So here we do not return error if the calibartion data is
+ * So here we do not return error if the calibration data is
* not available, except the probe needs deferring.
*/
goto out;
@@ -418,7 +418,7 @@ static int sun8i_h3_thermal_init(struct ths_device *tmdev)
}
/*
- * Without this undocummented value, the returned temperatures would
+ * Without this undocumented value, the returned temperatures would
* be higher than real ones by about 20C.
*/
#define SUN50I_H6_CTRL0_UNK 0x0000002f
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 66e0639da4bf..8e303e9d1dc0 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -2118,7 +2118,6 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
struct tegra_soctherm *tegra;
struct thermal_zone_device *z;
struct tsensor_shared_calib shared_calib;
- struct resource *res;
struct tegra_soctherm_soc *soc;
unsigned int i;
int err;
@@ -2140,26 +2139,20 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
tegra->soc = soc;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "soctherm-reg");
- tegra->regs = devm_ioremap_resource(&pdev->dev, res);
+ tegra->regs = devm_platform_ioremap_resource_byname(pdev, "soctherm-reg");
if (IS_ERR(tegra->regs)) {
dev_err(&pdev->dev, "can't get soctherm registers");
return PTR_ERR(tegra->regs);
}
if (!tegra->soc->use_ccroc) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "car-reg");
- tegra->clk_regs = devm_ioremap_resource(&pdev->dev, res);
+ tegra->clk_regs = devm_platform_ioremap_resource_byname(pdev, "car-reg");
if (IS_ERR(tegra->clk_regs)) {
dev_err(&pdev->dev, "can't get car clk registers");
return PTR_ERR(tegra->clk_regs);
}
} else {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "ccroc-reg");
- tegra->ccroc_regs = devm_ioremap_resource(&pdev->dev, res);
+ tegra->ccroc_regs = devm_platform_ioremap_resource_byname(pdev, "ccroc-reg");
if (IS_ERR(tegra->ccroc_regs)) {
dev_err(&pdev->dev, "can't get ccroc registers");
return PTR_ERR(tegra->ccroc_regs);
@@ -2195,7 +2188,7 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
if (err)
return err;
- /* calculate tsensor calibaration data */
+ /* calculate tsensor calibration data */
for (i = 0; i < soc->num_tsensors; ++i) {
err = tegra_calc_tsensor_calib(&soc->tsensors[i],
&shared_calib,
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 996c038f83a4..d20b25f40d19 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -561,24 +561,6 @@ void thermal_zone_device_update(struct thermal_zone_device *tz,
}
EXPORT_SYMBOL_GPL(thermal_zone_device_update);
-/**
- * thermal_notify_framework - Sensor drivers use this API to notify framework
- * @tz: thermal zone device
- * @trip: indicates which trip point has been crossed
- *
- * This function handles the trip events from sensor drivers. It starts
- * throttling the cooling devices according to the policy configured.
- * For CRITICAL and HOT trip points, this notifies the respective drivers,
- * and does actual throttling for other trip points i.e ACTIVE and PASSIVE.
- * The throttling policy is based on the configured platform data; if no
- * platform data is provided, this uses the step_wise throttling policy.
- */
-void thermal_notify_framework(struct thermal_zone_device *tz, int trip)
-{
- handle_thermal_trip(tz, trip);
-}
-EXPORT_SYMBOL_GPL(thermal_notify_framework);
-
static void thermal_zone_device_check(struct work_struct *work)
{
struct thermal_zone_device *tz = container_of(work, struct
@@ -960,10 +942,7 @@ __thermal_cooling_device_register(struct device_node *np,
{
struct thermal_cooling_device *cdev;
struct thermal_zone_device *pos = NULL;
- int result;
-
- if (type && strlen(type) >= THERMAL_NAME_LENGTH)
- return ERR_PTR(-EINVAL);
+ int ret;
if (!ops || !ops->get_max_state || !ops->get_cur_state ||
!ops->set_cur_state)
@@ -973,14 +952,17 @@ __thermal_cooling_device_register(struct device_node *np,
if (!cdev)
return ERR_PTR(-ENOMEM);
- result = ida_simple_get(&thermal_cdev_ida, 0, 0, GFP_KERNEL);
- if (result < 0) {
- kfree(cdev);
- return ERR_PTR(result);
+ ret = ida_simple_get(&thermal_cdev_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto out_kfree_cdev;
+ cdev->id = ret;
+
+ cdev->type = kstrdup(type ? type : "", GFP_KERNEL);
+ if (!cdev->type) {
+ ret = -ENOMEM;
+ goto out_ida_remove;
}
- cdev->id = result;
- strlcpy(cdev->type, type ? : "", sizeof(cdev->type));
mutex_init(&cdev->lock);
INIT_LIST_HEAD(&cdev->thermal_instances);
cdev->np = np;
@@ -990,12 +972,9 @@ __thermal_cooling_device_register(struct device_node *np,
cdev->devdata = devdata;
thermal_cooling_device_setup_sysfs(cdev);
dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
- result = device_register(&cdev->device);
- if (result) {
- ida_simple_remove(&thermal_cdev_ida, cdev->id);
- put_device(&cdev->device);
- return ERR_PTR(result);
- }
+ ret = device_register(&cdev->device);
+ if (ret)
+ goto out_kfree_type;
/* Add 'this' new cdev to the global cdev list */
mutex_lock(&thermal_list_lock);
@@ -1013,6 +992,15 @@ __thermal_cooling_device_register(struct device_node *np,
mutex_unlock(&thermal_list_lock);
return cdev;
+
+out_kfree_type:
+ kfree(cdev->type);
+ put_device(&cdev->device);
+out_ida_remove:
+ ida_simple_remove(&thermal_cdev_ida, cdev->id);
+out_kfree_cdev:
+ kfree(cdev);
+ return ERR_PTR(ret);
}
/**
@@ -1171,6 +1159,7 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
ida_simple_remove(&thermal_cdev_ida, cdev->id);
device_del(&cdev->device);
thermal_cooling_device_destroy_sysfs(cdev);
+ kfree(cdev->type);
put_device(&cdev->device);
}
EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 86b8cef7310e..726e327b4205 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -66,6 +66,7 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
}
void thermal_cdev_update(struct thermal_cooling_device *);
+void __thermal_cdev_update(struct thermal_cooling_device *cdev);
/**
* struct thermal_trip - representation of a point in temperature domain
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index 7f50f412e02a..3edd047e144f 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -192,18 +192,11 @@ static void thermal_cdev_set_cur_state(struct thermal_cooling_device *cdev,
thermal_cooling_device_stats_update(cdev, target);
}
-void thermal_cdev_update(struct thermal_cooling_device *cdev)
+void __thermal_cdev_update(struct thermal_cooling_device *cdev)
{
struct thermal_instance *instance;
unsigned long target = 0;
- mutex_lock(&cdev->lock);
- /* cooling device is updated*/
- if (cdev->updated) {
- mutex_unlock(&cdev->lock);
- return;
- }
-
/* Make sure cdev enters the deepest cooling state */
list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
dev_dbg(&cdev->device, "zone%d->target=%lu\n",
@@ -216,11 +209,25 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
thermal_cdev_set_cur_state(cdev, target);
- cdev->updated = true;
- mutex_unlock(&cdev->lock);
trace_cdev_update(cdev, target);
dev_dbg(&cdev->device, "set to state %lu\n", target);
}
+
+/**
+ * thermal_cdev_update - update cooling device state if needed
+ * @cdev: pointer to struct thermal_cooling_device
+ *
+ * Update the cooling device state if there is a need.
+ */
+void thermal_cdev_update(struct thermal_cooling_device *cdev)
+{
+ mutex_lock(&cdev->lock);
+ if (!cdev->updated) {
+ __thermal_cdev_update(cdev);
+ cdev->updated = true;
+ }
+ mutex_unlock(&cdev->lock);
+}
EXPORT_SYMBOL(thermal_cdev_update);
/**
diff --git a/drivers/thermal/thermal_mmio.c b/drivers/thermal/thermal_mmio.c
index d0bdf1ea3331..ded1dd0d4ef7 100644
--- a/drivers/thermal/thermal_mmio.c
+++ b/drivers/thermal/thermal_mmio.c
@@ -54,11 +54,8 @@ static int thermal_mmio_probe(struct platform_device *pdev)
resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sensor->mmio_base = devm_ioremap_resource(&pdev->dev, resource);
- if (IS_ERR(sensor->mmio_base)) {
- dev_err(&pdev->dev, "failed to ioremap memory (%ld)\n",
- PTR_ERR(sensor->mmio_base));
+ if (IS_ERR(sensor->mmio_base))
return PTR_ERR(sensor->mmio_base);
- }
sensor_init_func = device_get_match_data(&pdev->dev);
if (sensor_init_func) {
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 69ef12f852b7..5b76f9a1280d 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -704,14 +704,17 @@ static int thermal_of_populate_bind_params(struct device_node *np,
count = of_count_phandle_with_args(np, "cooling-device",
"#cooling-cells");
- if (!count) {
+ if (count <= 0) {
pr_err("Add a cooling_device property with at least one device\n");
+ ret = -ENOENT;
goto end;
}
__tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
- if (!__tcbp)
+ if (!__tcbp) {
+ ret = -ENOMEM;
goto end;
+ }
for (i = 0; i < count; i++) {
ret = of_parse_phandle_with_args(np, "cooling-device",
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 8a3646e26ddd..ebe7cb70bfb6 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -9,30 +9,29 @@
* Eduardo Valentin <eduardo.valentin@ti.com>
*/
-#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/cpu_pm.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/export.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
-#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/sys_soc.h>
-#include <linux/reboot.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <linux/cpu_pm.h>
-#include <linux/device.h>
-#include <linux/pm_runtime.h>
-#include <linux/pm.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/reboot.h>
+#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
+#include <linux/types.h>
#include "ti-bandgap.h"
@@ -1143,14 +1142,10 @@ static int ti_bandgap_restore_ctxt(struct ti_bandgap *bgp)
for (i = 0; i < bgp->conf->sensor_count; i++) {
struct temp_sensor_registers *tsr;
struct temp_sensor_regval *rval;
- u32 val = 0;
rval = &bgp->regval[i];
tsr = bgp->conf->sensors[i].registers;
- if (TI_BANDGAP_HAS(bgp, COUNTER))
- val = ti_bandgap_readl(bgp, tsr->bgap_counter);
-
if (TI_BANDGAP_HAS(bgp, TSHUT_CONFIG))
ti_bandgap_writel(bgp, rval->tshut_threshold,
tsr->tshut_threshold);
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index ffd1e098bfd2..a503c1b2bfd9 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -14,6 +14,7 @@ config VDPA_SIM
depends on RUNTIME_TESTING_MENU && HAS_DMA
select DMA_OPS
select VHOST_RING
+ select IOMMU_IOVA
help
Enable this module to support vDPA device simulators. These devices
are used for testing, prototyping and development of vDPA.
@@ -25,6 +26,13 @@ config VDPA_SIM_NET
help
vDPA networking device simulator which loops TX traffic back to RX.
+config VDPA_SIM_BLOCK
+ tristate "vDPA simulator for block device"
+ depends on VDPA_SIM
+ help
+ vDPA block device simulator which terminates IO request in a
+ memory buffer.
+
config IFCVF
tristate "Intel IFC VF vDPA driver"
depends on PCI_MSI
@@ -52,4 +60,11 @@ config MLX5_VDPA_NET
be executed by the hardware. It also supports a variety of stateless
offloads depending on the actual device used and firmware version.
+config VP_VDPA
+ tristate "Virtio PCI bridge vDPA driver"
+ select VIRTIO_PCI_LIB
+ depends on PCI_MSI
+ help
+ This kernel module bridges virtio PCI device to vDPA bus.
+
endif # VDPA
diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile
index d160e9b63a66..67fe7f3d6943 100644
--- a/drivers/vdpa/Makefile
+++ b/drivers/vdpa/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_VDPA) += vdpa.o
obj-$(CONFIG_VDPA_SIM) += vdpa_sim/
obj-$(CONFIG_IFCVF) += ifcvf/
obj-$(CONFIG_MLX5_VDPA) += mlx5/
+obj-$(CONFIG_VP_VDPA) += virtio_pci/
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index f2a128e56de5..1a661ab45af5 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -202,10 +202,11 @@ static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
ifcvf_get_status(hw);
}
-u64 ifcvf_get_features(struct ifcvf_hw *hw)
+u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
u32 features_lo, features_hi;
+ u64 features;
ifc_iowrite32(0, &cfg->device_feature_select);
features_lo = ifc_ioread32(&cfg->device_feature);
@@ -213,7 +214,26 @@ u64 ifcvf_get_features(struct ifcvf_hw *hw)
ifc_iowrite32(1, &cfg->device_feature_select);
features_hi = ifc_ioread32(&cfg->device_feature);
- return ((u64)features_hi << 32) | features_lo;
+ features = ((u64)features_hi << 32) | features_lo;
+
+ return features;
+}
+
+u64 ifcvf_get_features(struct ifcvf_hw *hw)
+{
+ return hw->hw_features;
+}
+
+int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
+{
+ struct ifcvf_adapter *ifcvf = vf_to_adapter(hw);
+
+ if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
+ IFCVF_ERR(ifcvf->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 64696d63fe07..0111bfdeb342 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -15,15 +15,26 @@
#include <linux/pci_regs.h>
#include <linux/vdpa.h>
#include <uapi/linux/virtio_net.h>
+#include <uapi/linux/virtio_blk.h>
#include <uapi/linux/virtio_config.h>
#include <uapi/linux/virtio_pci.h>
-#define IFCVF_VENDOR_ID 0x1AF4
-#define IFCVF_DEVICE_ID 0x1041
-#define IFCVF_SUBSYS_VENDOR_ID 0x8086
-#define IFCVF_SUBSYS_DEVICE_ID 0x001A
+#define N3000_VENDOR_ID 0x1AF4
+#define N3000_DEVICE_ID 0x1041
+#define N3000_SUBSYS_VENDOR_ID 0x8086
+#define N3000_SUBSYS_DEVICE_ID 0x001A
-#define IFCVF_SUPPORTED_FEATURES \
+#define C5000X_PL_VENDOR_ID 0x1AF4
+#define C5000X_PL_DEVICE_ID 0x1000
+#define C5000X_PL_SUBSYS_VENDOR_ID 0x8086
+#define C5000X_PL_SUBSYS_DEVICE_ID 0x0001
+
+#define C5000X_PL_BLK_VENDOR_ID 0x1AF4
+#define C5000X_PL_BLK_DEVICE_ID 0x1001
+#define C5000X_PL_BLK_SUBSYS_VENDOR_ID 0x8086
+#define C5000X_PL_BLK_SUBSYS_DEVICE_ID 0x0002
+
+#define IFCVF_NET_SUPPORTED_FEATURES \
((1ULL << VIRTIO_NET_F_MAC) | \
(1ULL << VIRTIO_F_ANY_LAYOUT) | \
(1ULL << VIRTIO_F_VERSION_1) | \
@@ -78,6 +89,8 @@ struct ifcvf_hw {
void __iomem *notify_base;
u32 notify_off_multiplier;
u64 req_features;
+ u64 hw_features;
+ u32 dev_type;
struct virtio_pci_common_cfg __iomem *common_cfg;
void __iomem *net_cfg;
struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2];
@@ -116,7 +129,10 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
void ifcvf_reset(struct ifcvf_hw *hw);
u64 ifcvf_get_features(struct ifcvf_hw *hw);
+u64 ifcvf_get_hw_features(struct ifcvf_hw *hw);
+int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features);
u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
+int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
#endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index d555a6a5d1ba..ab0ab5cf0f6e 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -14,7 +14,6 @@
#include <linux/sysfs.h>
#include "ifcvf_base.h"
-#define VERSION_STRING "0.1"
#define DRIVER_AUTHOR "Intel Corporation"
#define IFCVF_DRIVER_NAME "ifcvf"
@@ -169,10 +168,23 @@ static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
{
+ struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ struct pci_dev *pdev = adapter->pdev;
+
u64 features;
- features = ifcvf_get_features(vf) & IFCVF_SUPPORTED_FEATURES;
+ switch (vf->dev_type) {
+ case VIRTIO_ID_NET:
+ features = ifcvf_get_features(vf) & IFCVF_NET_SUPPORTED_FEATURES;
+ break;
+ case VIRTIO_ID_BLOCK:
+ features = ifcvf_get_features(vf);
+ break;
+ default:
+ features = 0;
+ IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
+ }
return features;
}
@@ -180,6 +192,11 @@ static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ int ret;
+
+ ret = ifcvf_verify_min_features(vf, features);
+ if (ret)
+ return ret;
vf->req_features = features;
@@ -319,12 +336,17 @@ static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
{
- return VIRTIO_ID_NET;
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return vf->dev_type;
}
static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
{
- return IFCVF_SUBSYS_VENDOR_ID;
+ struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
+ struct pci_dev *pdev = adapter->pdev;
+
+ return pdev->subsystem_vendor;
}
static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
@@ -332,6 +354,28 @@ static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
return IFCVF_QUEUE_ALIGNMENT;
}
+static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ struct pci_dev *pdev = adapter->pdev;
+ size_t size;
+
+ switch (vf->dev_type) {
+ case VIRTIO_ID_NET:
+ size = sizeof(struct virtio_net_config);
+ break;
+ case VIRTIO_ID_BLOCK:
+ size = sizeof(struct virtio_blk_config);
+ break;
+ default:
+ size = 0;
+ IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
+ }
+
+ return size;
+}
+
static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
unsigned int offset,
void *buf, unsigned int len)
@@ -392,6 +436,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.get_device_id = ifcvf_vdpa_get_device_id,
.get_vendor_id = ifcvf_vdpa_get_vendor_id,
.get_vq_align = ifcvf_vdpa_get_vq_align,
+ .get_config_size = ifcvf_vdpa_get_config_size,
.get_config = ifcvf_vdpa_get_config,
.set_config = ifcvf_vdpa_set_config,
.set_config_cb = ifcvf_vdpa_set_config_cb,
@@ -441,6 +486,19 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, adapter);
vf = &adapter->vf;
+
+ /* This drirver drives both modern virtio devices and transitional
+ * devices in modern mode.
+ * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
+ * so legacy devices and transitional devices in legacy
+ * mode will not work for vDPA, this driver will not
+ * drive devices with legacy interface.
+ */
+ if (pdev->device < 0x1040)
+ vf->dev_type = pdev->subsystem_device;
+ else
+ vf->dev_type = pdev->device - 0x1040;
+
vf->base = pcim_iomap_table(pdev);
adapter->pdev = pdev;
@@ -455,6 +513,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
vf->vring[i].irq = -EINVAL;
+ vf->hw_features = ifcvf_get_hw_features(vf);
+
ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
if (ret) {
IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
@@ -476,10 +536,19 @@ static void ifcvf_remove(struct pci_dev *pdev)
}
static struct pci_device_id ifcvf_pci_ids[] = {
- { PCI_DEVICE_SUB(IFCVF_VENDOR_ID,
- IFCVF_DEVICE_ID,
- IFCVF_SUBSYS_VENDOR_ID,
- IFCVF_SUBSYS_DEVICE_ID) },
+ { PCI_DEVICE_SUB(N3000_VENDOR_ID,
+ N3000_DEVICE_ID,
+ N3000_SUBSYS_VENDOR_ID,
+ N3000_SUBSYS_DEVICE_ID) },
+ { PCI_DEVICE_SUB(C5000X_PL_VENDOR_ID,
+ C5000X_PL_DEVICE_ID,
+ C5000X_PL_SUBSYS_VENDOR_ID,
+ C5000X_PL_SUBSYS_DEVICE_ID) },
+ { PCI_DEVICE_SUB(C5000X_PL_BLK_VENDOR_ID,
+ C5000X_PL_BLK_DEVICE_ID,
+ C5000X_PL_BLK_SUBSYS_VENDOR_ID,
+ C5000X_PL_BLK_SUBSYS_DEVICE_ID) },
+
{ 0 },
};
MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
@@ -494,4 +563,3 @@ static struct pci_driver ifcvf_driver = {
module_pci_driver(ifcvf_driver);
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(VERSION_STRING);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 4d2809c7d4e3..189e4385df40 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1809,6 +1809,11 @@ err_setup:
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
}
+static size_t mlx5_vdpa_get_config_size(struct vdpa_device *vdev)
+{
+ return sizeof(struct virtio_net_config);
+}
+
static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void *buf,
unsigned int len)
{
@@ -1895,6 +1900,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_vendor_id = mlx5_vdpa_get_vendor_id,
.get_status = mlx5_vdpa_get_status,
.set_status = mlx5_vdpa_set_status,
+ .get_config_size = mlx5_vdpa_get_config_size,
.get_config = mlx5_vdpa_get_config,
.set_config = mlx5_vdpa_set_config,
.get_generation = mlx5_vdpa_get_generation,
@@ -1974,23 +1980,32 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
}
}
-static int mlx5v_probe(struct auxiliary_device *adev,
- const struct auxiliary_device_id *id)
+struct mlx5_vdpa_mgmtdev {
+ struct vdpa_mgmt_dev mgtdev;
+ struct mlx5_adev *madev;
+ struct mlx5_vdpa_net *ndev;
+};
+
+static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
{
- struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev);
- struct mlx5_core_dev *mdev = madev->mdev;
+ struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
struct virtio_net_config *config;
struct mlx5_vdpa_dev *mvdev;
struct mlx5_vdpa_net *ndev;
+ struct mlx5_core_dev *mdev;
u32 max_vqs;
int err;
+ if (mgtdev->ndev)
+ return -ENOSPC;
+
+ mdev = mgtdev->madev->mdev;
/* we save one virtqueue for control virtqueue should we require it */
max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
- NULL);
+ name);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
@@ -2017,11 +2032,12 @@ static int mlx5v_probe(struct auxiliary_device *adev,
if (err)
goto err_res;
- err = vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs));
+ mvdev->vdev.mdev = &mgtdev->mgtdev;
+ err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs));
if (err)
goto err_reg;
- dev_set_drvdata(&adev->dev, ndev);
+ mgtdev->ndev = ndev;
return 0;
err_reg:
@@ -2034,11 +2050,62 @@ err_mtu:
return err;
}
+static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev)
+{
+ struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
+
+ _vdpa_unregister_device(dev);
+ mgtdev->ndev = NULL;
+}
+
+static const struct vdpa_mgmtdev_ops mdev_ops = {
+ .dev_add = mlx5_vdpa_dev_add,
+ .dev_del = mlx5_vdpa_dev_del,
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static int mlx5v_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+
+{
+ struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = madev->mdev;
+ struct mlx5_vdpa_mgmtdev *mgtdev;
+ int err;
+
+ mgtdev = kzalloc(sizeof(*mgtdev), GFP_KERNEL);
+ if (!mgtdev)
+ return -ENOMEM;
+
+ mgtdev->mgtdev.ops = &mdev_ops;
+ mgtdev->mgtdev.device = mdev->device;
+ mgtdev->mgtdev.id_table = id_table;
+ mgtdev->madev = madev;
+
+ err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
+ if (err)
+ goto reg_err;
+
+ dev_set_drvdata(&adev->dev, mgtdev);
+
+ return 0;
+
+reg_err:
+ kfree(mgtdev);
+ return err;
+}
+
static void mlx5v_remove(struct auxiliary_device *adev)
{
- struct mlx5_vdpa_dev *mvdev = dev_get_drvdata(&adev->dev);
+ struct mlx5_vdpa_mgmtdev *mgtdev;
- vdpa_unregister_device(&mvdev->vdev);
+ mgtdev = dev_get_drvdata(&adev->dev);
+ vdpa_mgmtdev_unregister(&mgtdev->mgtdev);
+ kfree(mgtdev);
}
static const struct auxiliary_device_id mlx5v_id_table[] = {
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 5cffce67cab0..bb3f1d1f0422 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -75,8 +75,8 @@ static void vdpa_release_dev(struct device *d)
* Driver should use vdpa_alloc_device() wrapper macro instead of
* using this directly.
*
- * Returns an error when parent/config/dma_dev is not set or fail to get
- * ida.
+ * Return: Returns an error when parent/config/dma_dev is not set or fail to get
+ * ida.
*/
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
@@ -157,7 +157,7 @@ static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)
* @vdev: the vdpa device to be registered to vDPA bus
* @nvqs: number of virtqueues supported by this device
*
- * Returns an error when fail to add device to vDPA bus
+ * Return: Returns an error when fail to add device to vDPA bus
*/
int _vdpa_register_device(struct vdpa_device *vdev, int nvqs)
{
@@ -174,7 +174,7 @@ EXPORT_SYMBOL_GPL(_vdpa_register_device);
* @vdev: the vdpa device to be registered to vDPA bus
* @nvqs: number of virtqueues supported by this device
*
- * Returns an error when fail to add to vDPA bus
+ * Return: Returns an error when fail to add to vDPA bus
*/
int vdpa_register_device(struct vdpa_device *vdev, int nvqs)
{
@@ -218,7 +218,7 @@ EXPORT_SYMBOL_GPL(vdpa_unregister_device);
* @drv: the vdpa device driver to be registered
* @owner: module owner of the driver
*
- * Returns an err when fail to do the registration
+ * Return: Returns an err when fail to do the registration
*/
int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
{
@@ -245,6 +245,8 @@ EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
* @mdev: Pointer to vdpa management device
* vdpa_mgmtdev_register() register a vdpa management device which supports
* vdpa device management.
+ * Return: Returns 0 on success or failure when required callback ops are not
+ * initialized.
*/
int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
{
diff --git a/drivers/vdpa/vdpa_sim/Makefile b/drivers/vdpa/vdpa_sim/Makefile
index 79d4536d347e..d458103302f2 100644
--- a/drivers/vdpa/vdpa_sim/Makefile
+++ b/drivers/vdpa/vdpa_sim/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VDPA_SIM) += vdpa_sim.o
obj-$(CONFIG_VDPA_SIM_NET) += vdpa_sim_net.o
+obj-$(CONFIG_VDPA_SIM_BLOCK) += vdpa_sim_blk.o
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 5b6b2f87d40c..98f793bc9376 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -17,6 +17,7 @@
#include <linux/vringh.h>
#include <linux/vdpa.h>
#include <linux/vhost_iotlb.h>
+#include <linux/iova.h>
#include "vdpa_sim.h"
@@ -128,30 +129,57 @@ static int dir_to_perm(enum dma_data_direction dir)
return perm;
}
+static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
+ size_t size, unsigned int perm)
+{
+ struct iova *iova;
+ dma_addr_t dma_addr;
+ int ret;
+
+ /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */
+ iova = alloc_iova(&vdpasim->iova, size, ULONG_MAX - 1, true);
+ if (!iova)
+ return DMA_MAPPING_ERROR;
+
+ dma_addr = iova_dma_addr(&vdpasim->iova, iova);
+
+ spin_lock(&vdpasim->iommu_lock);
+ ret = vhost_iotlb_add_range(vdpasim->iommu, (u64)dma_addr,
+ (u64)dma_addr + size - 1, (u64)paddr, perm);
+ spin_unlock(&vdpasim->iommu_lock);
+
+ if (ret) {
+ __free_iova(&vdpasim->iova, iova);
+ return DMA_MAPPING_ERROR;
+ }
+
+ return dma_addr;
+}
+
+static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
+ size_t size)
+{
+ spin_lock(&vdpasim->iommu_lock);
+ vhost_iotlb_del_range(vdpasim->iommu, (u64)dma_addr,
+ (u64)dma_addr + size - 1);
+ spin_unlock(&vdpasim->iommu_lock);
+
+ free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr));
+}
+
static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
struct vdpasim *vdpasim = dev_to_sim(dev);
- struct vhost_iotlb *iommu = vdpasim->iommu;
- u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset;
- int ret, perm = dir_to_perm(dir);
+ phys_addr_t paddr = page_to_phys(page) + offset;
+ int perm = dir_to_perm(dir);
if (perm < 0)
return DMA_MAPPING_ERROR;
- /* For simplicity, use identical mapping to avoid e.g iova
- * allocator.
- */
- spin_lock(&vdpasim->iommu_lock);
- ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
- pa, dir_to_perm(dir));
- spin_unlock(&vdpasim->iommu_lock);
- if (ret)
- return DMA_MAPPING_ERROR;
-
- return (dma_addr_t)(pa);
+ return vdpasim_map_range(vdpasim, paddr, size, perm);
}
static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
@@ -159,12 +187,8 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
unsigned long attrs)
{
struct vdpasim *vdpasim = dev_to_sim(dev);
- struct vhost_iotlb *iommu = vdpasim->iommu;
- spin_lock(&vdpasim->iommu_lock);
- vhost_iotlb_del_range(iommu, (u64)dma_addr,
- (u64)dma_addr + size - 1);
- spin_unlock(&vdpasim->iommu_lock);
+ vdpasim_unmap_range(vdpasim, dma_addr, size);
}
static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
@@ -172,27 +196,22 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
struct vdpasim *vdpasim = dev_to_sim(dev);
- struct vhost_iotlb *iommu = vdpasim->iommu;
- void *addr = kmalloc(size, flag);
- int ret;
+ phys_addr_t paddr;
+ void *addr;
- spin_lock(&vdpasim->iommu_lock);
+ addr = kmalloc(size, flag);
if (!addr) {
*dma_addr = DMA_MAPPING_ERROR;
- } else {
- u64 pa = virt_to_phys(addr);
-
- ret = vhost_iotlb_add_range(iommu, (u64)pa,
- (u64)pa + size - 1,
- pa, VHOST_MAP_RW);
- if (ret) {
- *dma_addr = DMA_MAPPING_ERROR;
- kfree(addr);
- addr = NULL;
- } else
- *dma_addr = (dma_addr_t)pa;
+ return NULL;
+ }
+
+ paddr = virt_to_phys(addr);
+
+ *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW);
+ if (*dma_addr == DMA_MAPPING_ERROR) {
+ kfree(addr);
+ return NULL;
}
- spin_unlock(&vdpasim->iommu_lock);
return addr;
}
@@ -202,14 +221,10 @@ static void vdpasim_free_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
struct vdpasim *vdpasim = dev_to_sim(dev);
- struct vhost_iotlb *iommu = vdpasim->iommu;
- spin_lock(&vdpasim->iommu_lock);
- vhost_iotlb_del_range(iommu, (u64)dma_addr,
- (u64)dma_addr + size - 1);
- spin_unlock(&vdpasim->iommu_lock);
+ vdpasim_unmap_range(vdpasim, dma_addr, size);
- kfree(phys_to_virt((uintptr_t)dma_addr));
+ kfree(vaddr);
}
static const struct dma_map_ops vdpasim_dma_ops = {
@@ -269,7 +284,15 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
goto err_iommu;
for (i = 0; i < dev_attr->nvqs; i++)
- vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu);
+ vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu,
+ &vdpasim->iommu_lock);
+
+ ret = iova_cache_get();
+ if (ret)
+ goto err_iommu;
+
+ /* For simplicity we use an IOVA allocator with byte granularity */
+ init_iova_domain(&vdpasim->iova, 1, 0);
vdpasim->vdpa.dma_dev = dev;
@@ -439,6 +462,13 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
spin_unlock(&vdpasim->lock);
}
+static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ return vdpasim->dev_attr.config_size;
+}
+
static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
void *buf, unsigned int len)
{
@@ -539,8 +569,17 @@ static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
static void vdpasim_free(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ int i;
cancel_work_sync(&vdpasim->work);
+
+ for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
+ vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
+ vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
+ }
+
+ put_iova_domain(&vdpasim->iova);
+ iova_cache_put();
kvfree(vdpasim->buffer);
if (vdpasim->iommu)
vhost_iotlb_free(vdpasim->iommu);
@@ -566,6 +605,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_vendor_id = vdpasim_get_vendor_id,
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
+ .get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
@@ -593,6 +633,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_vendor_id = vdpasim_get_vendor_id,
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
+ .get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
index 6d75444f9948..cd58e888bcf3 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -6,6 +6,7 @@
#ifndef _VDPA_SIM_H
#define _VDPA_SIM_H
+#include <linux/iova.h>
#include <linux/vringh.h>
#include <linux/vdpa.h>
#include <linux/virtio_byteorder.h>
@@ -57,6 +58,7 @@ struct vdpasim {
/* virtio config according to device type */
void *config;
struct vhost_iotlb *iommu;
+ struct iova_domain iova;
void *buffer;
u32 status;
u32 generation;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
new file mode 100644
index 000000000000..5bfe1c281645
--- /dev/null
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VDPA simulator for block device.
+ *
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2021, Red Hat Inc. All rights reserved.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/blkdev.h>
+#include <linux/vringh.h>
+#include <linux/vdpa.h>
+#include <linux/blkdev.h>
+#include <uapi/linux/virtio_blk.h>
+
+#include "vdpa_sim.h"
+
+#define DRV_VERSION "0.1"
+#define DRV_AUTHOR "Max Gurtovoy <mgurtovoy@nvidia.com>"
+#define DRV_DESC "vDPA Device Simulator for block device"
+#define DRV_LICENSE "GPL v2"
+
+#define VDPASIM_BLK_FEATURES (VDPASIM_FEATURES | \
+ (1ULL << VIRTIO_BLK_F_SIZE_MAX) | \
+ (1ULL << VIRTIO_BLK_F_SEG_MAX) | \
+ (1ULL << VIRTIO_BLK_F_BLK_SIZE) | \
+ (1ULL << VIRTIO_BLK_F_TOPOLOGY) | \
+ (1ULL << VIRTIO_BLK_F_MQ))
+
+#define VDPASIM_BLK_CAPACITY 0x40000
+#define VDPASIM_BLK_SIZE_MAX 0x1000
+#define VDPASIM_BLK_SEG_MAX 32
+#define VDPASIM_BLK_VQ_NUM 1
+
+static char vdpasim_blk_id[VIRTIO_BLK_ID_BYTES] = "vdpa_blk_sim";
+
+static bool vdpasim_blk_check_range(u64 start_sector, size_t range_size)
+{
+ u64 range_sectors = range_size >> SECTOR_SHIFT;
+
+ if (range_size > VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)
+ return false;
+
+ if (start_sector > VDPASIM_BLK_CAPACITY)
+ return false;
+
+ if (range_sectors > VDPASIM_BLK_CAPACITY - start_sector)
+ return false;
+
+ return true;
+}
+
+/* Returns 'true' if the request is handled (with or without an I/O error)
+ * and the status is correctly written in the last byte of the 'in iov',
+ * 'false' otherwise.
+ */
+static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
+ struct vdpasim_virtqueue *vq)
+{
+ size_t pushed = 0, to_pull, to_push;
+ struct virtio_blk_outhdr hdr;
+ ssize_t bytes;
+ loff_t offset;
+ u64 sector;
+ u8 status;
+ u32 type;
+ int ret;
+
+ ret = vringh_getdesc_iotlb(&vq->vring, &vq->out_iov, &vq->in_iov,
+ &vq->head, GFP_ATOMIC);
+ if (ret != 1)
+ return false;
+
+ if (vq->out_iov.used < 1 || vq->in_iov.used < 1) {
+ dev_err(&vdpasim->vdpa.dev, "missing headers - out_iov: %u in_iov %u\n",
+ vq->out_iov.used, vq->in_iov.used);
+ return false;
+ }
+
+ if (vq->in_iov.iov[vq->in_iov.used - 1].iov_len < 1) {
+ dev_err(&vdpasim->vdpa.dev, "request in header too short\n");
+ return false;
+ }
+
+ /* The last byte is the status and we checked if the last iov has
+ * enough room for it.
+ */
+ to_push = vringh_kiov_length(&vq->in_iov) - 1;
+
+ to_pull = vringh_kiov_length(&vq->out_iov);
+
+ bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &hdr,
+ sizeof(hdr));
+ if (bytes != sizeof(hdr)) {
+ dev_err(&vdpasim->vdpa.dev, "request out header too short\n");
+ return false;
+ }
+
+ to_pull -= bytes;
+
+ type = vdpasim32_to_cpu(vdpasim, hdr.type);
+ sector = vdpasim64_to_cpu(vdpasim, hdr.sector);
+ offset = sector << SECTOR_SHIFT;
+ status = VIRTIO_BLK_S_OK;
+
+ switch (type) {
+ case VIRTIO_BLK_T_IN:
+ if (!vdpasim_blk_check_range(sector, to_push)) {
+ dev_err(&vdpasim->vdpa.dev,
+ "reading over the capacity - offset: 0x%llx len: 0x%zx\n",
+ offset, to_push);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov,
+ vdpasim->buffer + offset,
+ to_push);
+ if (bytes < 0) {
+ dev_err(&vdpasim->vdpa.dev,
+ "vringh_iov_push_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
+ bytes, offset, to_push);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ pushed += bytes;
+ break;
+
+ case VIRTIO_BLK_T_OUT:
+ if (!vdpasim_blk_check_range(sector, to_pull)) {
+ dev_err(&vdpasim->vdpa.dev,
+ "writing over the capacity - offset: 0x%llx len: 0x%zx\n",
+ offset, to_pull);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov,
+ vdpasim->buffer + offset,
+ to_pull);
+ if (bytes < 0) {
+ dev_err(&vdpasim->vdpa.dev,
+ "vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
+ bytes, offset, to_pull);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+ break;
+
+ case VIRTIO_BLK_T_GET_ID:
+ bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov,
+ vdpasim_blk_id,
+ VIRTIO_BLK_ID_BYTES);
+ if (bytes < 0) {
+ dev_err(&vdpasim->vdpa.dev,
+ "vringh_iov_push_iotlb() error: %zd\n", bytes);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ pushed += bytes;
+ break;
+
+ default:
+ dev_warn(&vdpasim->vdpa.dev,
+ "Unsupported request type %d\n", type);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ /* If some operations fail, we need to skip the remaining bytes
+ * to put the status in the last byte
+ */
+ if (to_push - pushed > 0)
+ vringh_kiov_advance(&vq->in_iov, to_push - pushed);
+
+ /* Last byte is the status */
+ bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, &status, 1);
+ if (bytes != 1)
+ return false;
+
+ pushed += bytes;
+
+ /* Make sure data is wrote before advancing index */
+ smp_wmb();
+
+ vringh_complete_iotlb(&vq->vring, vq->head, pushed);
+
+ return true;
+}
+
+static void vdpasim_blk_work(struct work_struct *work)
+{
+ struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
+ int i;
+
+ spin_lock(&vdpasim->lock);
+
+ if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
+ goto out;
+
+ for (i = 0; i < VDPASIM_BLK_VQ_NUM; i++) {
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[i];
+
+ if (!vq->ready)
+ continue;
+
+ while (vdpasim_blk_handle_req(vdpasim, vq)) {
+ /* Make sure used is visible before rasing the interrupt. */
+ smp_wmb();
+
+ local_bh_disable();
+ if (vringh_need_notify_iotlb(&vq->vring) > 0)
+ vringh_notify(&vq->vring);
+ local_bh_enable();
+ }
+ }
+out:
+ spin_unlock(&vdpasim->lock);
+}
+
+static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config)
+{
+ struct virtio_blk_config *blk_config = config;
+
+ memset(config, 0, sizeof(struct virtio_blk_config));
+
+ blk_config->capacity = cpu_to_vdpasim64(vdpasim, VDPASIM_BLK_CAPACITY);
+ blk_config->size_max = cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_SIZE_MAX);
+ blk_config->seg_max = cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_SEG_MAX);
+ blk_config->num_queues = cpu_to_vdpasim16(vdpasim, VDPASIM_BLK_VQ_NUM);
+ blk_config->min_io_size = cpu_to_vdpasim16(vdpasim, 1);
+ blk_config->opt_io_size = cpu_to_vdpasim32(vdpasim, 1);
+ blk_config->blk_size = cpu_to_vdpasim32(vdpasim, SECTOR_SIZE);
+}
+
+static void vdpasim_blk_mgmtdev_release(struct device *dev)
+{
+}
+
+static struct device vdpasim_blk_mgmtdev = {
+ .init_name = "vdpasim_blk",
+ .release = vdpasim_blk_mgmtdev_release,
+};
+
+static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
+{
+ struct vdpasim_dev_attr dev_attr = {};
+ struct vdpasim *simdev;
+ int ret;
+
+ dev_attr.mgmt_dev = mdev;
+ dev_attr.name = name;
+ dev_attr.id = VIRTIO_ID_BLOCK;
+ dev_attr.supported_features = VDPASIM_BLK_FEATURES;
+ dev_attr.nvqs = VDPASIM_BLK_VQ_NUM;
+ dev_attr.config_size = sizeof(struct virtio_blk_config);
+ dev_attr.get_config = vdpasim_blk_get_config;
+ dev_attr.work_fn = vdpasim_blk_work;
+ dev_attr.buffer_size = VDPASIM_BLK_CAPACITY << SECTOR_SHIFT;
+
+ simdev = vdpasim_create(&dev_attr);
+ if (IS_ERR(simdev))
+ return PTR_ERR(simdev);
+
+ ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_BLK_VQ_NUM);
+ if (ret)
+ goto put_dev;
+
+ return 0;
+
+put_dev:
+ put_device(&simdev->vdpa.dev);
+ return ret;
+}
+
+static void vdpasim_blk_dev_del(struct vdpa_mgmt_dev *mdev,
+ struct vdpa_device *dev)
+{
+ struct vdpasim *simdev = container_of(dev, struct vdpasim, vdpa);
+
+ _vdpa_unregister_device(&simdev->vdpa);
+}
+
+static const struct vdpa_mgmtdev_ops vdpasim_blk_mgmtdev_ops = {
+ .dev_add = vdpasim_blk_dev_add,
+ .dev_del = vdpasim_blk_dev_del
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct vdpa_mgmt_dev mgmt_dev = {
+ .device = &vdpasim_blk_mgmtdev,
+ .id_table = id_table,
+ .ops = &vdpasim_blk_mgmtdev_ops,
+};
+
+static int __init vdpasim_blk_init(void)
+{
+ int ret;
+
+ ret = device_register(&vdpasim_blk_mgmtdev);
+ if (ret)
+ return ret;
+
+ ret = vdpa_mgmtdev_register(&mgmt_dev);
+ if (ret)
+ goto parent_err;
+
+ return 0;
+
+parent_err:
+ device_unregister(&vdpasim_blk_mgmtdev);
+ return ret;
+}
+
+static void __exit vdpasim_blk_exit(void)
+{
+ vdpa_mgmtdev_unregister(&mgmt_dev);
+ device_unregister(&vdpasim_blk_mgmtdev);
+}
+
+module_init(vdpasim_blk_init)
+module_exit(vdpasim_blk_exit)
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE(DRV_LICENSE);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/vdpa/virtio_pci/Makefile b/drivers/vdpa/virtio_pci/Makefile
new file mode 100644
index 000000000000..231088d3af7d
--- /dev/null
+++ b/drivers/vdpa/virtio_pci/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VP_VDPA) += vp_vdpa.o
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
new file mode 100644
index 000000000000..c76ebb531212
--- /dev/null
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vDPA bridge driver for modern virtio-pci device
+ *
+ * Copyright (c) 2020, Red Hat Inc. All rights reserved.
+ * Author: Jason Wang <jasowang@redhat.com>
+ *
+ * Based on virtio_pci_modern.c.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/vdpa.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_pci.h>
+#include <linux/virtio_pci_modern.h>
+
+#define VP_VDPA_QUEUE_MAX 256
+#define VP_VDPA_DRIVER_NAME "vp_vdpa"
+#define VP_VDPA_NAME_SIZE 256
+
+struct vp_vring {
+ void __iomem *notify;
+ char msix_name[VP_VDPA_NAME_SIZE];
+ struct vdpa_callback cb;
+ resource_size_t notify_pa;
+ int irq;
+};
+
+struct vp_vdpa {
+ struct vdpa_device vdpa;
+ struct virtio_pci_modern_device mdev;
+ struct vp_vring *vring;
+ struct vdpa_callback config_cb;
+ char msix_name[VP_VDPA_NAME_SIZE];
+ int config_irq;
+ int queues;
+ int vectors;
+};
+
+static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa)
+{
+ return container_of(vdpa, struct vp_vdpa, vdpa);
+}
+
+static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+
+ return &vp_vdpa->mdev;
+}
+
+static u64 vp_vdpa_get_features(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_get_features(mdev);
+}
+
+static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ vp_modern_set_features(mdev, features);
+
+ return 0;
+}
+
+static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_get_status(mdev);
+}
+
+static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
+{
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct pci_dev *pdev = mdev->pci_dev;
+ int i;
+
+ for (i = 0; i < vp_vdpa->queues; i++) {
+ if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
+ vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR);
+ devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq,
+ &vp_vdpa->vring[i]);
+ vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
+ }
+ }
+
+ if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
+ vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR);
+ devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa);
+ vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
+ }
+
+ if (vp_vdpa->vectors) {
+ pci_free_irq_vectors(pdev);
+ vp_vdpa->vectors = 0;
+ }
+}
+
+static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg)
+{
+ struct vp_vring *vring = arg;
+
+ if (vring->cb.callback)
+ return vring->cb.callback(vring->cb.private);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vp_vdpa_config_handler(int irq, void *arg)
+{
+ struct vp_vdpa *vp_vdpa = arg;
+
+ if (vp_vdpa->config_cb.callback)
+ return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private);
+
+ return IRQ_HANDLED;
+}
+
+static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
+{
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct pci_dev *pdev = mdev->pci_dev;
+ int i, ret, irq;
+ int queues = vp_vdpa->queues;
+ int vectors = queues + 1;
+
+ ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
+ if (ret != vectors) {
+ dev_err(&pdev->dev,
+ "vp_vdpa: fail to allocate irq vectors want %d but %d\n",
+ vectors, ret);
+ return ret;
+ }
+
+ vp_vdpa->vectors = vectors;
+
+ for (i = 0; i < queues; i++) {
+ snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE,
+ "vp-vdpa[%s]-%d\n", pci_name(pdev), i);
+ irq = pci_irq_vector(pdev, i);
+ ret = devm_request_irq(&pdev->dev, irq,
+ vp_vdpa_vq_handler,
+ 0, vp_vdpa->vring[i].msix_name,
+ &vp_vdpa->vring[i]);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "vp_vdpa: fail to request irq for vq %d\n", i);
+ goto err;
+ }
+ vp_modern_queue_vector(mdev, i, i);
+ vp_vdpa->vring[i].irq = irq;
+ }
+
+ snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n",
+ pci_name(pdev));
+ irq = pci_irq_vector(pdev, queues);
+ ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0,
+ vp_vdpa->msix_name, vp_vdpa);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "vp_vdpa: fail to request irq for vq %d\n", i);
+ goto err;
+ }
+ vp_modern_config_vector(mdev, queues);
+ vp_vdpa->config_irq = irq;
+
+ return 0;
+err:
+ vp_vdpa_free_irq(vp_vdpa);
+ return ret;
+}
+
+static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ u8 s = vp_vdpa_get_status(vdpa);
+
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
+ !(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ vp_vdpa_request_irq(vp_vdpa);
+ }
+
+ vp_modern_set_status(mdev, status);
+
+ if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
+ (s & VIRTIO_CONFIG_S_DRIVER_OK))
+ vp_vdpa_free_irq(vp_vdpa);
+}
+
+static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
+{
+ return VP_VDPA_QUEUE_MAX;
+}
+
+static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
+ struct vdpa_vq_state *state)
+{
+ /* Note that this is not supported by virtio specification, so
+ * we return -EOPNOTSUPP here. This means we can't support live
+ * migration, vhost device start/stop.
+ */
+ return -EOPNOTSUPP;
+}
+
+static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
+ const struct vdpa_vq_state *state)
+{
+ /* Note that this is not supported by virtio specification, so
+ * we return -ENOPOTSUPP here. This means we can't support live
+ * migration, vhost device start/stop.
+ */
+ return -EOPNOTSUPP;
+}
+
+static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
+ struct vdpa_callback *cb)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+
+ vp_vdpa->vring[qid].cb = *cb;
+}
+
+static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa,
+ u16 qid, bool ready)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ vp_modern_set_queue_enable(mdev, qid, ready);
+}
+
+static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_get_queue_enable(mdev, qid);
+}
+
+static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
+ u32 num)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ vp_modern_set_queue_size(mdev, qid, num);
+}
+
+static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
+ u64 desc_area, u64 driver_area,
+ u64 device_area)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ vp_modern_queue_address(mdev, qid, desc_area,
+ driver_area, device_area);
+
+ return 0;
+}
+
+static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+
+ vp_iowrite16(qid, vp_vdpa->vring[qid].notify);
+}
+
+static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_generation(mdev);
+}
+
+static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return mdev->id.device;
+}
+
+static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return mdev->id.vendor;
+}
+
+static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa)
+{
+ return PAGE_SIZE;
+}
+
+static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return mdev->device_len;
+}
+
+static void vp_vdpa_get_config(struct vdpa_device *vdpa,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ u8 old, new;
+ u8 *p;
+ int i;
+
+ do {
+ old = vp_ioread8(&mdev->common->config_generation);
+ p = buf;
+ for (i = 0; i < len; i++)
+ *p++ = vp_ioread8(mdev->device + offset + i);
+
+ new = vp_ioread8(&mdev->common->config_generation);
+ } while (old != new);
+}
+
+static void vp_vdpa_set_config(struct vdpa_device *vdpa,
+ unsigned int offset, const void *buf,
+ unsigned int len)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ const u8 *p = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ vp_iowrite8(*p++, mdev->device + offset + i);
+}
+
+static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa,
+ struct vdpa_callback *cb)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+
+ vp_vdpa->config_cb = *cb;
+}
+
+static struct vdpa_notification_area
+vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct vdpa_notification_area notify;
+
+ notify.addr = vp_vdpa->vring[qid].notify_pa;
+ notify.size = mdev->notify_offset_multiplier;
+
+ return notify;
+}
+
+static const struct vdpa_config_ops vp_vdpa_ops = {
+ .get_features = vp_vdpa_get_features,
+ .set_features = vp_vdpa_set_features,
+ .get_status = vp_vdpa_get_status,
+ .set_status = vp_vdpa_set_status,
+ .get_vq_num_max = vp_vdpa_get_vq_num_max,
+ .get_vq_state = vp_vdpa_get_vq_state,
+ .get_vq_notification = vp_vdpa_get_vq_notification,
+ .set_vq_state = vp_vdpa_set_vq_state,
+ .set_vq_cb = vp_vdpa_set_vq_cb,
+ .set_vq_ready = vp_vdpa_set_vq_ready,
+ .get_vq_ready = vp_vdpa_get_vq_ready,
+ .set_vq_num = vp_vdpa_set_vq_num,
+ .set_vq_address = vp_vdpa_set_vq_address,
+ .kick_vq = vp_vdpa_kick_vq,
+ .get_generation = vp_vdpa_get_generation,
+ .get_device_id = vp_vdpa_get_device_id,
+ .get_vendor_id = vp_vdpa_get_vendor_id,
+ .get_vq_align = vp_vdpa_get_vq_align,
+ .get_config_size = vp_vdpa_get_config_size,
+ .get_config = vp_vdpa_get_config,
+ .set_config = vp_vdpa_set_config,
+ .set_config_cb = vp_vdpa_set_config_cb,
+};
+
+static void vp_vdpa_free_irq_vectors(void *data)
+{
+ pci_free_irq_vectors(data);
+}
+
+static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct virtio_pci_modern_device *mdev;
+ struct device *dev = &pdev->dev;
+ struct vp_vdpa *vp_vdpa;
+ int ret, i;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
+ dev, &vp_vdpa_ops, NULL);
+ if (vp_vdpa == NULL) {
+ dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
+ return -ENOMEM;
+ }
+
+ mdev = &vp_vdpa->mdev;
+ mdev->pci_dev = pdev;
+
+ ret = vp_modern_probe(mdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
+ goto err;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, vp_vdpa);
+
+ vp_vdpa->vdpa.dma_dev = &pdev->dev;
+ vp_vdpa->queues = vp_modern_get_num_queues(mdev);
+
+ ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed for adding devres for freeing irq vectors\n");
+ goto err;
+ }
+
+ vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues,
+ sizeof(*vp_vdpa->vring),
+ GFP_KERNEL);
+ if (!vp_vdpa->vring) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Fail to allocate virtqueues\n");
+ goto err;
+ }
+
+ for (i = 0; i < vp_vdpa->queues; i++) {
+ vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
+ vp_vdpa->vring[i].notify =
+ vp_modern_map_vq_notify(mdev, i,
+ &vp_vdpa->vring[i].notify_pa);
+ if (!vp_vdpa->vring[i].notify) {
+ dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i);
+ goto err;
+ }
+ }
+ vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
+
+ ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register to vdpa bus\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ put_device(&vp_vdpa->vdpa.dev);
+ return ret;
+}
+
+static void vp_vdpa_remove(struct pci_dev *pdev)
+{
+ struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
+
+ vdpa_unregister_device(&vp_vdpa->vdpa);
+ vp_modern_remove(&vp_vdpa->mdev);
+}
+
+static struct pci_driver vp_vdpa_driver = {
+ .name = "vp-vdpa",
+ .id_table = NULL, /* only dynamic ids */
+ .probe = vp_vdpa_probe,
+ .remove = vp_vdpa_remove,
+};
+
+module_pci_driver(vp_vdpa_driver);
+
+MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
+MODULE_DESCRIPTION("vp-vdpa");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1");
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 07296326d24d..a0747c35a778 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -2248,7 +2248,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
int ret;
bool resv_msi, msi_remap;
phys_addr_t resv_msi_base = 0;
- struct iommu_domain_geometry geo;
+ struct iommu_domain_geometry *geo;
LIST_HEAD(iova_copy);
LIST_HEAD(group_resv_regions);
@@ -2316,10 +2316,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
}
if (iommu->nesting) {
- int attr = 1;
-
- ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING,
- &attr);
+ ret = iommu_enable_nesting(domain->domain);
if (ret)
goto out_domain;
}
@@ -2329,10 +2326,9 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
goto out_domain;
/* Get aperture info */
- iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, &geo);
-
- if (vfio_iommu_aper_conflict(iommu, geo.aperture_start,
- geo.aperture_end)) {
+ geo = &domain->domain->geometry;
+ if (vfio_iommu_aper_conflict(iommu, geo->aperture_start,
+ geo->aperture_end)) {
ret = -EINVAL;
goto out_detach;
}
@@ -2355,8 +2351,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (ret)
goto out_detach;
- ret = vfio_iommu_aper_resize(&iova_copy, geo.aperture_start,
- geo.aperture_end);
+ ret = vfio_iommu_aper_resize(&iova_copy, geo->aperture_start,
+ geo->aperture_end);
if (ret)
goto out_detach;
@@ -2489,7 +2485,6 @@ static void vfio_iommu_aper_expand(struct vfio_iommu *iommu,
struct list_head *iova_copy)
{
struct vfio_domain *domain;
- struct iommu_domain_geometry geo;
struct vfio_iova *node;
dma_addr_t start = 0;
dma_addr_t end = (dma_addr_t)~0;
@@ -2498,12 +2493,12 @@ static void vfio_iommu_aper_expand(struct vfio_iommu *iommu,
return;
list_for_each_entry(domain, &iommu->domain_list, next) {
- iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY,
- &geo);
- if (geo.aperture_start > start)
- start = geo.aperture_start;
- if (geo.aperture_end < end)
- end = geo.aperture_end;
+ struct iommu_domain_geometry *geo = &domain->domain->geometry;
+
+ if (geo->aperture_start > start)
+ start = geo->aperture_start;
+ if (geo->aperture_end < end)
+ end = geo->aperture_end;
}
/* Modify aperture limits. The new aper is either same or bigger */
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index bfa4c6ef554e..fb41db3da611 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -16,12 +16,12 @@
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/uuid.h>
#include <linux/vdpa.h>
#include <linux/nospec.h>
#include <linux/vhost.h>
-#include <linux/virtio_net.h>
#include "vhost.h"
@@ -188,13 +188,8 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
struct vhost_vdpa_config *c)
{
- long size = 0;
-
- switch (v->virtio_id) {
- case VIRTIO_ID_NET:
- size = sizeof(struct virtio_net_config);
- break;
- }
+ struct vdpa_device *vdpa = v->vdpa;
+ long size = vdpa->config->get_config_size(vdpa);
if (c->len == 0)
return -EINVAL;
@@ -836,18 +831,14 @@ static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
{
struct vdpa_iova_range *range = &v->range;
- struct iommu_domain_geometry geo;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
if (ops->get_iova_range) {
*range = ops->get_iova_range(vdpa);
- } else if (v->domain &&
- !iommu_domain_get_attr(v->domain,
- DOMAIN_ATTR_GEOMETRY, &geo) &&
- geo.force_aperture) {
- range->first = geo.aperture_start;
- range->last = geo.aperture_end;
+ } else if (v->domain && v->domain->geometry.force_aperture) {
+ range->first = v->domain->geometry.aperture_start;
+ range->last = v->domain->geometry.aperture_end;
} else {
range->first = 0;
range->last = ULLONG_MAX;
@@ -993,6 +984,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_end - vma->vm_start != notify.size)
return -ENOTSUPP;
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &vhost_vdpa_vm_ops;
return 0;
}
@@ -1027,10 +1019,6 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
int minor;
int r;
- /* Currently, we only accept the network devices. */
- if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
- return -ENOTSUPP;
-
v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!v)
return -ENOMEM;
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 85d85faba058..4af8fa259d65 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -75,6 +75,34 @@ static inline int __vringh_get_head(const struct vringh *vrh,
return head;
}
+/**
+ * vringh_kiov_advance - skip bytes from vring_kiov
+ * @iov: an iov passed to vringh_getdesc_*() (updated as we consume)
+ * @len: the maximum length to advance
+ */
+void vringh_kiov_advance(struct vringh_kiov *iov, size_t len)
+{
+ while (len && iov->i < iov->used) {
+ size_t partlen = min(iov->iov[iov->i].iov_len, len);
+
+ iov->consumed += partlen;
+ iov->iov[iov->i].iov_len -= partlen;
+ iov->iov[iov->i].iov_base += partlen;
+
+ if (!iov->iov[iov->i].iov_len) {
+ /* Fix up old iov element then increment. */
+ iov->iov[iov->i].iov_len = iov->consumed;
+ iov->iov[iov->i].iov_base -= iov->consumed;
+
+ iov->consumed = 0;
+ iov->i++;
+ }
+
+ len -= partlen;
+ }
+}
+EXPORT_SYMBOL(vringh_kiov_advance);
+
/* Copy some bytes to/from the iovec. Returns num copied. */
static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
struct vringh_kiov *iov,
@@ -95,19 +123,8 @@ static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
done += partlen;
len -= partlen;
ptr += partlen;
- iov->consumed += partlen;
- iov->iov[iov->i].iov_len -= partlen;
- iov->iov[iov->i].iov_base += partlen;
- if (!iov->iov[iov->i].iov_len) {
- /* Fix up old iov element then increment. */
- iov->iov[iov->i].iov_len = iov->consumed;
- iov->iov[iov->i].iov_base -= iov->consumed;
-
-
- iov->consumed = 0;
- iov->i++;
- }
+ vringh_kiov_advance(iov, partlen);
}
return done;
}
@@ -290,9 +307,9 @@ __vringh_iov(struct vringh *vrh, u16 i,
return -EINVAL;
if (riov)
- riov->i = riov->used = 0;
+ riov->i = riov->used = riov->consumed = 0;
if (wiov)
- wiov->i = wiov->used = 0;
+ wiov->i = wiov->used = wiov->consumed = 0;
for (;;) {
void *addr;
@@ -662,7 +679,10 @@ EXPORT_SYMBOL(vringh_init_user);
* *head will be vrh->vring.num. You may be able to ignore an invalid
* descriptor, but there's not much you can do with an invalid ring.
*
- * Note that you may need to clean up riov and wiov, even on error!
+ * Note that you can reuse riov and wiov with subsequent calls. Content is
+ * overwritten and memory reallocated if more space is needed.
+ * When you don't have to use riov and wiov anymore, you should clean up them
+ * calling vringh_iov_cleanup() to release the memory, even on error!
*/
int vringh_getdesc_user(struct vringh *vrh,
struct vringh_iov *riov,
@@ -932,7 +952,10 @@ EXPORT_SYMBOL(vringh_init_kern);
* *head will be vrh->vring.num. You may be able to ignore an invalid
* descriptor, but there's not much you can do with an invalid ring.
*
- * Note that you may need to clean up riov and wiov, even on error!
+ * Note that you can reuse riov and wiov with subsequent calls. Content is
+ * overwritten and memory reallocated if more space is needed.
+ * When you don't have to use riov and wiov anymore, you should clean up them
+ * calling vringh_kiov_cleanup() to release the memory, even on error!
*/
int vringh_getdesc_kern(struct vringh *vrh,
struct vringh_kiov *riov,
@@ -1074,6 +1097,8 @@ static int iotlb_translate(const struct vringh *vrh,
int ret = 0;
u64 s = 0;
+ spin_lock(vrh->iotlb_lock);
+
while (len > s) {
u64 size, pa, pfn;
@@ -1103,6 +1128,8 @@ static int iotlb_translate(const struct vringh *vrh,
++ret;
}
+ spin_unlock(vrh->iotlb_lock);
+
return ret;
}
@@ -1262,10 +1289,13 @@ EXPORT_SYMBOL(vringh_init_iotlb);
* vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
* @vrh: the vring
* @iotlb: iotlb associated with this vring
+ * @iotlb_lock: spinlock to synchronize the iotlb accesses
*/
-void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb)
+void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
+ spinlock_t *iotlb_lock)
{
vrh->iotlb = iotlb;
+ vrh->iotlb_lock = iotlb_lock;
}
EXPORT_SYMBOL(vringh_set_iotlb);
@@ -1285,7 +1315,10 @@ EXPORT_SYMBOL(vringh_set_iotlb);
* *head will be vrh->vring.num. You may be able to ignore an invalid
* descriptor, but there's not much you can do with an invalid ring.
*
- * Note that you may need to clean up riov and wiov, even on error!
+ * Note that you can reuse riov and wiov with subsequent calls. Content is
+ * overwritten and memory reallocated if more space is needed.
+ * When you don't have to use riov and wiov anymore, you should clean up them
+ * calling vringh_kiov_cleanup() to release the memory, even on error!
*/
int vringh_getdesc_iotlb(struct vringh *vrh,
struct vringh_kiov *riov,
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index f58a545b3bf3..8ea8f079cde2 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -575,7 +575,8 @@ static int efifb_probe(struct platform_device *dev)
goto err_fb_dealoc;
}
fb_info(info, "%s frame buffer device\n", info->fix.id);
- pm_runtime_get_sync(&efifb_pci_dev->dev);
+ if (efifb_pci_dev)
+ pm_runtime_get_sync(&efifb_pci_dev->dev);
return 0;
err_fb_dealoc:
@@ -602,7 +603,8 @@ static int efifb_remove(struct platform_device *pdev)
unregister_framebuffer(info);
sysfs_remove_groups(&pdev->dev.kobj, efifb_groups);
framebuffer_release(info);
- pm_runtime_put(&efifb_pci_dev->dev);
+ if (efifb_pci_dev)
+ pm_runtime_put(&efifb_pci_dev->dev);
return 0;
}
diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
index f1964ea4b826..e21e1e86ad15 100644
--- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
@@ -1524,7 +1524,8 @@ static const struct file_operations ne_enclave_fops = {
* enclave file descriptor to be further used for enclave
* resources handling e.g. memory regions and CPUs.
* @ne_pci_dev : Private data associated with the PCI device.
- * @slot_uid: Generated unique slot id associated with an enclave.
+ * @slot_uid: User pointer to store the generated unique slot id
+ * associated with an enclave to.
*
* Context: Process context. This function is called with the ne_pci_dev enclave
* mutex held.
@@ -1532,7 +1533,7 @@ static const struct file_operations ne_enclave_fops = {
* * Enclave fd on success.
* * Negative return value on failure.
*/
-static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
+static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 __user *slot_uid)
{
struct ne_pci_dev_cmd_reply cmd_reply = {};
int enclave_fd = -1;
@@ -1634,7 +1635,18 @@ static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list);
- *slot_uid = ne_enclave->slot_uid;
+ if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) {
+ /*
+ * As we're holding the only reference to 'enclave_file', fput()
+ * will call ne_enclave_release() which will do a proper cleanup
+ * of all so far allocated resources, leaving only the unused fd
+ * for us to free.
+ */
+ fput(enclave_file);
+ put_unused_fd(enclave_fd);
+
+ return -EFAULT;
+ }
fd_install(enclave_fd, enclave_file);
@@ -1671,34 +1683,13 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
case NE_CREATE_VM: {
int enclave_fd = -1;
- struct file *enclave_file = NULL;
struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
- int rc = -EINVAL;
- u64 slot_uid = 0;
+ u64 __user *slot_uid = (void __user *)arg;
mutex_lock(&ne_pci_dev->enclaves_list_mutex);
-
- enclave_fd = ne_create_vm_ioctl(ne_pci_dev, &slot_uid);
- if (enclave_fd < 0) {
- rc = enclave_fd;
-
- mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
-
- return rc;
- }
-
+ enclave_fd = ne_create_vm_ioctl(ne_pci_dev, slot_uid);
mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
- if (copy_to_user((void __user *)arg, &slot_uid, sizeof(slot_uid))) {
- enclave_file = fget(enclave_fd);
- /* Decrement file refs to have release() called. */
- fput(enclave_file);
- fput(enclave_file);
- put_unused_fd(enclave_fd);
-
- return -EFAULT;
- }
-
return enclave_fd;
}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 8985fc2cea86..510e9318854d 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -734,7 +734,7 @@ static void report_free_page_func(struct work_struct *work)
#ifdef CONFIG_BALLOON_COMPACTION
/*
* virtballoon_migratepage - perform the balloon page migration on behalf of
- * a compation thread. (called under page lock)
+ * a compaction thread. (called under page lock)
* @vb_dev_info: the balloon device
* @newpage: page that will replace the isolated page after migration finishes.
* @page : the isolated (old) page that is about to be migrated to newpage.
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index fbd4ebc00eb6..30654d3a0b41 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -192,7 +192,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
struct virtqueue *vq;
- u16 num, off;
+ u16 num;
int err;
if (index >= vp_modern_get_num_queues(mdev))
@@ -208,9 +208,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
return ERR_PTR(-EINVAL);
}
- /* get offset of notification word for this vq */
- off = vp_modern_get_queue_notify_off(mdev, index);
-
info->msix_vector = msix_vec;
/* create the vring */
@@ -227,27 +224,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
virtqueue_get_avail_addr(vq),
virtqueue_get_used_addr(vq));
- if (mdev->notify_base) {
- /* offset should not wrap */
- if ((u64)off * mdev->notify_offset_multiplier + 2
- > mdev->notify_len) {
- dev_warn(&mdev->pci_dev->dev,
- "bad notification offset %u (x %u) "
- "for queue %u > %zd",
- off, mdev->notify_offset_multiplier,
- index, mdev->notify_len);
- err = -EINVAL;
- goto err_map_notify;
- }
- vq->priv = (void __force *)mdev->notify_base +
- off * mdev->notify_offset_multiplier;
- } else {
- vq->priv = (void __force *)vp_modern_map_capability(mdev,
- mdev->notify_map_cap, 2, 2,
- off * mdev->notify_offset_multiplier, 2,
- NULL);
- }
-
+ vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL);
if (!vq->priv) {
err = -ENOMEM;
goto err_map_notify;
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index cbd667496bb1..54f297028586 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -13,14 +13,14 @@
* @start: start from the capability
* @size: map size
* @len: the length that is actually mapped
+ * @pa: physical address of the capability
*
* Returns the io address of for the part of the capability
*/
-void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
- size_t minlen,
- u32 align,
- u32 start, u32 size,
- size_t *len)
+static void __iomem *
+vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
+ size_t minlen, u32 align, u32 start, u32 size,
+ size_t *len, resource_size_t *pa)
{
struct pci_dev *dev = mdev->pci_dev;
u8 bar;
@@ -88,9 +88,11 @@ void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, in
dev_err(&dev->dev,
"virtio_pci: unable to map virtio %u@%u on bar %i\n",
length, offset, bar);
+ else if (pa)
+ *pa = pci_resource_start(dev, bar) + offset;
+
return p;
}
-EXPORT_SYMBOL_GPL(vp_modern_map_capability);
/**
* virtio_pci_find_capability - walk capabilities to find device info.
@@ -275,12 +277,12 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
mdev->common = vp_modern_map_capability(mdev, common,
sizeof(struct virtio_pci_common_cfg), 4,
0, sizeof(struct virtio_pci_common_cfg),
- NULL);
+ NULL, NULL);
if (!mdev->common)
goto err_map_common;
mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
0, 1,
- NULL);
+ NULL, NULL);
if (!mdev->isr)
goto err_map_isr;
@@ -308,7 +310,8 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
mdev->notify_base = vp_modern_map_capability(mdev, notify,
2, 2,
0, notify_length,
- &mdev->notify_len);
+ &mdev->notify_len,
+ &mdev->notify_pa);
if (!mdev->notify_base)
goto err_map_notify;
} else {
@@ -321,7 +324,8 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
if (device) {
mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
0, PAGE_SIZE,
- &mdev->device_len);
+ &mdev->device_len,
+ NULL);
if (!mdev->device)
goto err_map_device;
}
@@ -584,14 +588,51 @@ EXPORT_SYMBOL_GPL(vp_modern_get_num_queues);
*
* Returns the notification offset for a virtqueue
*/
-u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
- u16 index)
+static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
+ u16 index)
{
vp_iowrite16(index, &mdev->common->queue_select);
return vp_ioread16(&mdev->common->queue_notify_off);
}
-EXPORT_SYMBOL_GPL(vp_modern_get_queue_notify_off);
+
+/*
+ * vp_modern_map_vq_notify - map notification area for a
+ * specific virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ * @pa: the pointer to the physical address of the nofity area
+ *
+ * Returns the address of the notification area
+ */
+void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
+ u16 index, resource_size_t *pa)
+{
+ u16 off = vp_modern_get_queue_notify_off(mdev, index);
+
+ if (mdev->notify_base) {
+ /* offset should not wrap */
+ if ((u64)off * mdev->notify_offset_multiplier + 2
+ > mdev->notify_len) {
+ dev_warn(&mdev->pci_dev->dev,
+ "bad notification offset %u (x %u) "
+ "for queue %u > %zd",
+ off, mdev->notify_offset_multiplier,
+ index, mdev->notify_len);
+ return NULL;
+ }
+ if (pa)
+ *pa = mdev->notify_pa +
+ off * mdev->notify_offset_multiplier;
+ return mdev->notify_base + off * mdev->notify_offset_multiplier;
+ } else {
+ return vp_modern_map_capability(mdev,
+ mdev->notify_map_cap, 2, 2,
+ off * mdev->notify_offset_multiplier, 2,
+ NULL, pa);
+ }
+}
+EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify);
MODULE_VERSION("0.1");
MODULE_DESCRIPTION("Modern Virtio PCI Device");
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 2b385c1b4a99..4c89afc0df62 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -40,14 +40,7 @@
#include <trace/events/swiotlb.h>
#define MAX_DMA_BITS 32
-/*
- * Used to do a quick range check in swiotlb_tbl_unmap_single and
- * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
- * API.
- */
-static char *xen_io_tlb_start, *xen_io_tlb_end;
-static unsigned long xen_io_tlb_nslabs;
/*
* Quick lookup value of the bus address of the IOTLB.
*/
@@ -82,11 +75,6 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev,
return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
}
-static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address)
-{
- return xen_phys_to_dma(dev, virt_to_phys(address));
-}
-
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
@@ -111,15 +99,12 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
* have the same virtual address as another address
* in our domain. Therefore _only_ check address within our domain.
*/
- if (pfn_valid(PFN_DOWN(paddr))) {
- return paddr >= virt_to_phys(xen_io_tlb_start) &&
- paddr < virt_to_phys(xen_io_tlb_end);
- }
+ if (pfn_valid(PFN_DOWN(paddr)))
+ return is_swiotlb_buffer(paddr);
return 0;
}
-static int
-xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
+static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
{
int i, rc;
int dma_bits;
@@ -145,16 +130,6 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
} while (i < nslabs);
return 0;
}
-static unsigned long xen_set_nslabs(unsigned long nr_tbl)
-{
- if (!nr_tbl) {
- xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
- xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
- } else
- xen_io_tlb_nslabs = nr_tbl;
-
- return xen_io_tlb_nslabs << IO_TLB_SHIFT;
-}
enum xen_swiotlb_err {
XEN_SWIOTLB_UNKNOWN = 0,
@@ -177,102 +152,109 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
}
return "";
}
-int __ref xen_swiotlb_init(int verbose, bool early)
+
+#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
+
+int __ref xen_swiotlb_init(void)
{
- unsigned long bytes, order;
- int rc = -ENOMEM;
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
- unsigned int repeat = 3;
+ unsigned long bytes = swiotlb_size_or_default();
+ unsigned long nslabs = bytes >> IO_TLB_SHIFT;
+ unsigned int order, repeat = 3;
+ int rc = -ENOMEM;
+ char *start;
- xen_io_tlb_nslabs = swiotlb_nr_tbl();
retry:
- bytes = xen_set_nslabs(xen_io_tlb_nslabs);
- order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
-
- /*
- * IO TLB memory already allocated. Just use it.
- */
- if (io_tlb_start != 0) {
- xen_io_tlb_start = phys_to_virt(io_tlb_start);
- goto end;
- }
+ m_ret = XEN_SWIOTLB_ENOMEM;
+ order = get_order(bytes);
/*
* Get IO TLB memory from any location.
*/
- if (early) {
- xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
- PAGE_SIZE);
- if (!xen_io_tlb_start)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
- } else {
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
- while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
- xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
- if (xen_io_tlb_start)
- break;
- order--;
- }
- if (order != get_order(bytes)) {
- pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
- (PAGE_SIZE << order) >> 20);
- xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
- bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
- }
+ while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
+ start = (void *)xen_get_swiotlb_free_pages(order);
+ if (start)
+ break;
+ order--;
}
- if (!xen_io_tlb_start) {
- m_ret = XEN_SWIOTLB_ENOMEM;
+ if (!start)
goto error;
+ if (order != get_order(bytes)) {
+ pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
+ (PAGE_SIZE << order) >> 20);
+ nslabs = SLABS_PER_PAGE << order;
+ bytes = nslabs << IO_TLB_SHIFT;
}
+
/*
* And replace that memory with pages under 4GB.
*/
- rc = xen_swiotlb_fixup(xen_io_tlb_start,
- bytes,
- xen_io_tlb_nslabs);
+ rc = xen_swiotlb_fixup(start, nslabs);
if (rc) {
- if (early)
- memblock_free(__pa(xen_io_tlb_start),
- PAGE_ALIGN(bytes));
- else {
- free_pages((unsigned long)xen_io_tlb_start, order);
- xen_io_tlb_start = NULL;
- }
+ free_pages((unsigned long)start, order);
m_ret = XEN_SWIOTLB_EFIXUP;
goto error;
}
- if (early) {
- if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
- verbose))
- panic("Cannot allocate SWIOTLB buffer");
- rc = 0;
- } else
- rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
-
-end:
- xen_io_tlb_end = xen_io_tlb_start + bytes;
- if (!rc)
- swiotlb_set_max_segment(PAGE_SIZE);
-
- return rc;
+ rc = swiotlb_late_init_with_tbl(start, nslabs);
+ if (rc)
+ return rc;
+ swiotlb_set_max_segment(PAGE_SIZE);
+ return 0;
error:
if (repeat--) {
- xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
- (xen_io_tlb_nslabs >> 1));
+ /* Min is 2MB */
+ nslabs = max(1024UL, (nslabs >> 1));
pr_info("Lowering to %luMB\n",
- (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
+ (nslabs << IO_TLB_SHIFT) >> 20);
goto retry;
}
pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
- if (early)
- panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
- else
- free_pages((unsigned long)xen_io_tlb_start, order);
+ free_pages((unsigned long)start, order);
return rc;
}
+#ifdef CONFIG_X86
+void __init xen_swiotlb_init_early(void)
+{
+ unsigned long bytes = swiotlb_size_or_default();
+ unsigned long nslabs = bytes >> IO_TLB_SHIFT;
+ unsigned int repeat = 3;
+ char *start;
+ int rc;
+
+retry:
+ /*
+ * Get IO TLB memory from any location.
+ */
+ start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
+ if (!start)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
+
+ /*
+ * And replace that memory with pages under 4GB.
+ */
+ rc = xen_swiotlb_fixup(start, nslabs);
+ if (rc) {
+ memblock_free(__pa(start), PAGE_ALIGN(bytes));
+ if (repeat--) {
+ /* Min is 2MB */
+ nslabs = max(1024UL, (nslabs >> 1));
+ bytes = nslabs << IO_TLB_SHIFT;
+ pr_info("Lowering to %luMB\n", bytes >> 20);
+ goto retry;
+ }
+ panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
+ }
+
+ if (swiotlb_init_with_tbl(start, nslabs, false))
+ panic("Cannot allocate SWIOTLB buffer");
+ swiotlb_set_max_segment(PAGE_SIZE);
+}
+#endif /* CONFIG_X86 */
+
static void *
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
@@ -406,7 +388,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* Ensure that the address returned is DMA'ble
*/
if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
- swiotlb_tbl_unmap_single(dev, map, size, size, dir,
+ swiotlb_tbl_unmap_single(dev, map, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
}
@@ -445,7 +427,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(hwdev, dev_addr))
- swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
+ swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
}
static void
@@ -462,7 +444,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
}
if (is_xen_swiotlb_buffer(dev, dma_addr))
- swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
+ swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
}
static void
@@ -472,7 +454,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
if (is_xen_swiotlb_buffer(dev, dma_addr))
- swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
+ swiotlb_sync_single_for_device(dev, paddr, size, dir);
if (!dev_is_dma_coherent(dev)) {
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
@@ -560,7 +542,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
+ return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask;
}
const struct dma_map_ops xen_swiotlb_dma_ops = {