summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_apd.c4
-rw-r--r--drivers/acpi/acpi_lpss.c14
-rw-r--r--drivers/acpi/acpi_watchdog.c7
-rw-r--r--drivers/acpi/ec.c39
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/acpi/sleep.c6
-rw-r--r--drivers/acpi/spcr.c36
-rw-r--r--drivers/ata/Kconfig4
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-eh.c7
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/sata_rcar.c8
-rw-r--r--drivers/base/dma-coherent.c164
-rw-r--r--drivers/base/dma-mapping.c2
-rw-r--r--drivers/base/firmware_class.c49
-rw-r--r--drivers/block/nbd.c18
-rw-r--r--drivers/block/sunvdc.c61
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkfront.c25
-rw-r--r--drivers/block/zram/zram_drv.c4
-rw-r--r--drivers/bus/uniphier-system-bus.c14
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/clk/clk-gemini.c14
-rw-r--r--drivers/clk/keystone/sci-clk.c66
-rw-r--r--drivers/clk/meson/clk-mpll.c7
-rw-r--r--drivers/clk/meson/clkc.h1
-rw-r--r--drivers/clk/meson/gxbb.c5
-rw-r--r--drivers/clk/meson/meson8b.c5
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c2
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c7
-rw-r--r--drivers/cpufreq/intel_pstate.c8
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c10
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/bcm/spu2.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c3
-rw-r--r--drivers/crypto/inside-secure/safexcel.c5
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c8
-rw-r--r--drivers/dax/super.c6
-rw-r--r--drivers/dma-buf/reservation.c99
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-exar.c2
-rw-r--r--drivers/gpio/gpio-lp87565.c46
-rw-r--r--drivers/gpio/gpio-mxc.c3
-rw-r--r--drivers/gpio/gpio-tegra.c6
-rw-r--r--drivers/gpio/gpiolib.c9
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c227
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c164
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c281
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c241
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c148
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c183
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c102
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c123
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c316
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c33
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c63
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c46
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c294
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h330
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h140
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c71
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c46
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h30
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c113
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c25
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c40
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h7
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c1
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c10
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c2
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c10
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c5
-rw-r--r--drivers/gpu/drm/drm_drv.c40
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c180
-rw-r--r--drivers/gpu/drm/drm_file.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c37
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c283
-rw-r--r--drivers/gpu/drm/drm_internal.h6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c10
-rw-r--r--drivers/gpu/drm/drm_plane.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c529
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c45
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c8
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c124
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c228
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c30
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c68
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c27
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c58
-rw-r--r--drivers/gpu/drm/gma500/gem.c30
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c8
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c67
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c14
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c12
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h6
-rw-r--r--drivers/gpu/drm/i915/intel_color.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c110
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c20
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c53
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c10
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c232
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c64
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c108
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c63
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c27
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c66
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c63
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c59
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c37
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h12
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c45
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c57
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c58
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c85
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h2
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c36
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c71
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c104
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c81
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c190
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c824
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c88
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c329
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c406
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h49
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.c905
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.h109
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h16
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c38
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c60
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c24
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h25
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c29
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c86
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c37
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c117
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c3
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c4
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig19
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c4
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig16
-rw-r--r--drivers/gpu/drm/sun4i/Makefile1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h32
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c156
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c220
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c10
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/Makefile2
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c12
-rw-r--r--drivers/gpu/drm/tegra/drm.c104
-rw-r--r--drivers/gpu/drm/tegra/drm.h12
-rw-r--r--drivers/gpu/drm/tegra/dsi.c14
-rw-r--r--drivers/gpu/drm/tegra/fb.c8
-rw-r--r--drivers/gpu/drm/tegra/gem.c78
-rw-r--r--drivers/gpu/drm/tegra/gem.h2
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c12
-rw-r--r--drivers/gpu/drm/tegra/sor.c12
-rw-r--r--drivers/gpu/drm/tegra/trace.c2
-rw-r--r--drivers/gpu/drm/tegra/trace.h68
-rw-r--r--drivers/gpu/drm/tegra/vic.c15
-rw-r--r--drivers/gpu/drm/tinydrm/Kconfig10
-rw-r--r--drivers/gpu/drm/tinydrm/Makefile1
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c42
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c2
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c28
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c428
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c64
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c5
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c8
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c28
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c31
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c61
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c78
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c72
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c4
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c252
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c156
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c114
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c111
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c4
-rw-r--r--drivers/gpu/host1x/bus.c18
-rw-r--r--drivers/gpu/host1x/dev.c12
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c24
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c2
-rw-r--r--drivers/gpu/host1x/job.c8
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-ortek.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c16
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c6
-rw-r--r--drivers/i2c/i2c-core-acpi.c19
-rw-r--r--drivers/i2c/i2c-core-base.c1
-rw-r--r--drivers/i2c/i2c-core.h9
-rw-r--r--drivers/i2c/muxes/Kconfig2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c9
-rw-r--r--drivers/iio/accel/st_accel_core.c32
-rw-r--r--drivers/iio/adc/aspeed_adc.c26
-rw-r--r--drivers/iio/adc/axp288_adc.c42
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c3
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c29
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c2
-rw-r--r--drivers/infiniband/core/addr.c62
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/uverbs_main.c3
-rw-r--r--drivers/infiniband/core/verbs.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c2
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c33
-rw-r--r--drivers/iommu/amd_iommu.c1
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/arm-smmu.c23
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c7
-rw-r--r--drivers/iommu/io-pgtable.h9
-rw-r--r--drivers/iommu/mtk_iommu.c6
-rw-r--r--drivers/iommu/mtk_iommu.h1
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c28
-rw-r--r--drivers/isdn/i4l/isdn_common.c1
-rw-r--r--drivers/isdn/i4l/isdn_net.c5
-rw-r--r--drivers/lightnvm/pblk-rb.c4
-rw-r--r--drivers/lightnvm/pblk-read.c23
-rw-r--r--drivers/lightnvm/pblk.h2
-rw-r--r--drivers/mailbox/pcc.c2
-rw-r--r--drivers/md/dm-bufio.c3
-rw-r--r--drivers/md/dm-integrity.c22
-rw-r--r--drivers/md/dm-raid.c29
-rw-r--r--drivers/md/dm-table.c35
-rw-r--r--drivers/md/dm-verity-fec.c21
-rw-r--r--drivers/md/dm-zoned-metadata.c12
-rw-r--r--drivers/md/dm-zoned-reclaim.c2
-rw-r--r--drivers/md/dm-zoned-target.c8
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/md.h54
-rw-r--r--drivers/md/raid1-10.c81
-rw-r--r--drivers/md/raid1.c68
-rw-r--r--drivers/md/raid10.c25
-rw-r--r--drivers/md/raid5.c11
-rw-r--r--drivers/media/cec/cec-adap.c2
-rw-r--r--drivers/media/cec/cec-notifier.c6
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c143
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.h7
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c5
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_driver.h15
-rw-r--r--drivers/media/dvb-frontends/lnbh25.c6
-rw-r--r--drivers/media/dvb-frontends/stv0367.c210
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c1
-rw-r--r--drivers/media/i2c/tvp5150.c25
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c102
-rw-r--r--drivers/media/pci/ngene/ngene-core.c32
-rw-r--r--drivers/media/pci/ngene/ngene-i2c.c6
-rw-r--r--drivers/media/pci/ngene/ngene.h6
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c1
-rw-r--r--drivers/media/platform/Kconfig4
-rw-r--r--drivers/media/platform/coda/coda-bit.c8
-rw-r--r--drivers/media/platform/coda/coda-common.c4
-rw-r--r--drivers/media/platform/coda/coda.h2
-rw-r--r--drivers/media/platform/davinci/ccdc_hw_device.h10
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c92
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c151
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c93
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c2
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c133
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h6
-rw-r--r--drivers/media/platform/qcom/venus/core.c18
-rw-r--r--drivers/media/platform/qcom/venus/core.h1
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c76
-rw-r--r--drivers/media/platform/qcom/venus/firmware.h5
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c11
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-debug.c14
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c15
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c15
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c15
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c15
-rw-r--r--drivers/media/radio/radio-wl1273.c15
-rw-r--r--drivers/media/rc/ir-lirc-codec.c2
-rw-r--r--drivers/media/tuners/fc0011.c1
-rw-r--r--drivers/media/tuners/mxl5005s.c2
-rw-r--r--drivers/media/usb/au0828/au0828-input.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c10
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c38
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c18
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c2
-rw-r--r--drivers/media/usb/rainshadow-cec/rainshadow-cec.c18
-rw-r--r--drivers/media/usb/stkwebcam/stk-sensor.c32
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c70
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.h6
-rw-r--r--drivers/media/v4l2-core/tuner-core.c2
-rw-r--r--drivers/misc/mei/pci-me.c6
-rw-r--r--drivers/misc/mei/pci-txe.c6
-rw-r--r--drivers/mmc/core/block.c3
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c13
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c35
-rw-r--r--drivers/mmc/host/sunxi-mmc.c8
-rw-r--r--drivers/mtd/mtd_blkdevs.c1
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c2
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c21
-rw-r--r--drivers/mtd/nand/nand_base.c13
-rw-r--r--drivers/mtd/nand/nand_timings.c6
-rw-r--r--drivers/mtd/nand/sunxi_nand.c4
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/mt7530.c38
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c6
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c9
-rw-r--r--drivers/net/ethernet/broadcom/b44.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c7
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c27
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c37
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c5
-rw-r--r--drivers/net/ethernet/sun/sunhme.h6
-rw-r--r--drivers/net/ethernet/ti/cpts.c111
-rw-r--r--drivers/net/ethernet/ti/cpts.h2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c43
-rw-r--r--drivers/net/hyperv/rndis_filter.c14
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/irda/mcs7780.c16
-rw-r--r--drivers/net/phy/Kconfig13
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c18
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/team/team.c8
-rw-r--r--drivers/net/tun.c10
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c53
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/usb/lan78xx.c18
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/vxlan.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h1
-rw-r--r--drivers/nvme/host/core.c41
-rw-r--r--drivers/nvme/host/fc.c121
-rw-r--r--drivers/nvme/host/pci.c24
-rw-r--r--drivers/nvme/target/fc.c313
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/property.c17
-rw-r--r--drivers/parisc/pdc_stable.c8
-rw-r--r--drivers/pci/pci.c35
-rw-r--r--drivers/perf/arm_pmu.c41
-rw-r--r--drivers/perf/arm_pmu_platform.c9
-rw-r--r--drivers/perf/qcom_l2_pmu.c2
-rw-r--r--drivers/phy/broadcom/Kconfig2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c25
-rw-r--r--drivers/pinctrl/stm32/Kconfig9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c2
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c11
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/dell-wmi.c12
-rw-r--r--drivers/platform/x86/intel-vbtn.c4
-rw-r--r--drivers/platform/x86/wmi.c6
-rw-r--r--drivers/ptp/ptp_clock.c42
-rw-r--r--drivers/ptp/ptp_private.h3
-rw-r--r--drivers/s390/cio/chp.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c4
-rw-r--r--drivers/sbus/char/display7seg.c4
-rw-r--r--drivers/sbus/char/flash.c4
-rw-r--r--drivers/sbus/char/uctrl.c4
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/aacraid/aachba.c7
-rw-r--r--drivers/scsi/aic7xxx/Makefile12
-rw-r--r--drivers/scsi/aic7xxx/aicasm/Makefile53
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c68
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c45
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c64
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c7
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c6
-rw-r--r--drivers/scsi/qedf/qedf.h3
-rw-r--r--drivers/scsi/qedf/qedf_main.c20
-rw-r--r--drivers/scsi/qedi/Kconfig1
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c8
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c30
-rw-r--r--drivers/scsi/scsi_transport_fc.c6
-rw-r--r--drivers/scsi/sg.c28
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/soc/zte/Kconfig1
-rw-r--r--drivers/staging/comedi/comedi_fops.c3
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/ap1302.h4
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.h3
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858_btns.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/Makefile10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h2
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c165
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.h6
-rw-r--r--drivers/staging/vboxvideo/vbox_fb.c2
-rw-r--r--drivers/staging/vboxvideo/vbox_main.c8
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c16
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c12
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c7
-rw-r--r--drivers/target/target_core_tpg.c4
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/target_core_user.c13
-rw-r--r--drivers/thunderbolt/eeprom.c9
-rw-r--r--drivers/thunderbolt/icm.c9
-rw-r--r--drivers/thunderbolt/switch.c8
-rw-r--r--drivers/thunderbolt/tb.h4
-rw-r--r--drivers/thunderbolt/tb_msgs.h12
-rw-r--r--drivers/tty/serial/8250/8250_core.c23
-rw-r--r--drivers/tty/serial/8250/8250_exar.c4
-rw-r--r--drivers/tty/serial/amba-pl011.c37
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc3/gadget.c33
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c21
-rw-r--r--drivers/usb/host/pci-quirks.c37
-rw-r--r--drivers/usb/host/pci-quirks.h1
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/musb/musb_host.c1
-rw-r--r--drivers/usb/phy/phy-msm-usb.c17
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c5
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c9
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/storage/unusual_uas.h4
-rw-r--r--drivers/usb/storage/usb.c18
-rw-r--r--drivers/vfio/pci/vfio_pci.c9
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c13
-rw-r--r--drivers/vhost/vhost.c28
-rw-r--r--drivers/vhost/vhost.h3
-rw-r--r--drivers/video/fbdev/efifb.c8
-rw-r--r--drivers/video/fbdev/imxfb.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c2
-rw-r--r--drivers/virtio/virtio_balloon.c28
-rw-r--r--drivers/xen/events/events_base.c15
-rw-r--r--drivers/xen/xen-selfballoon.c4
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c3
-rw-r--r--drivers/xen/xenfs/super.c1
785 files changed, 12647 insertions, 8948 deletions
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index fc6c416f8724..d5999eb41c00 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -180,8 +180,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
{ "BRCM900D", APD_ADDR(vulcan_spi_desc) },
{ "CAV900D", APD_ADDR(vulcan_spi_desc) },
- { "HISI0A21", APD_ADDR(hip07_i2c_desc) },
- { "HISI0A22", APD_ADDR(hip08_i2c_desc) },
+ { "HISI02A1", APD_ADDR(hip07_i2c_desc) },
+ { "HISI02A2", APD_ADDR(hip08_i2c_desc) },
#endif
{ }
};
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index e51a1e98e62f..f88caf5aab76 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -85,6 +85,7 @@ static const struct lpss_device_desc lpss_dma_desc = {
};
struct lpss_private_data {
+ struct acpi_device *adev;
void __iomem *mmio_base;
resource_size_t mmio_size;
unsigned int fixed_clk_rate;
@@ -155,6 +156,12 @@ static struct pwm_lookup byt_pwm_lookup[] = {
static void byt_pwm_setup(struct lpss_private_data *pdata)
{
+ struct acpi_device *adev = pdata->adev;
+
+ /* Only call pwm_add_table for the first PWM controller */
+ if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
+ return;
+
if (!acpi_dev_present("INT33FD", NULL, -1))
pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
}
@@ -180,6 +187,12 @@ static struct pwm_lookup bsw_pwm_lookup[] = {
static void bsw_pwm_setup(struct lpss_private_data *pdata)
{
+ struct acpi_device *adev = pdata->adev;
+
+ /* Only call pwm_add_table for the first PWM controller */
+ if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
+ return;
+
pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
}
@@ -456,6 +469,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
goto err_out;
}
+ pdata->adev = adev;
pdata->dev_desc = dev_desc;
if (dev_desc->setup)
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 8c4e0a18460a..bf22c29d2517 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -86,7 +86,12 @@ void __init acpi_watchdog_init(void)
found = false;
resource_list_for_each_entry(rentry, &resource_list) {
- if (resource_contains(rentry->res, &res)) {
+ if (rentry->res->flags == res.flags &&
+ resource_overlaps(rentry->res, &res)) {
+ if (res.start < rentry->res->start)
+ rentry->res->start = res.start;
+ if (res.end > rentry->res->end)
+ rentry->res->end = res.end;
found = true;
break;
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index ddb01e9fa5b2..62068a5e814f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -151,6 +151,10 @@ static bool ec_freeze_events __read_mostly = false;
module_param(ec_freeze_events, bool, 0644);
MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
+static bool ec_no_wakeup __read_mostly;
+module_param(ec_no_wakeup, bool, 0644);
+MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
+
struct acpi_ec_query_handler {
struct list_head node;
acpi_ec_query_func func;
@@ -535,6 +539,14 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
spin_unlock_irqrestore(&ec->lock, flags);
__acpi_ec_flush_event(ec);
}
+
+void acpi_ec_flush_work(void)
+{
+ if (first_ec)
+ __acpi_ec_flush_event(first_ec);
+
+ flush_scheduled_work();
+}
#endif /* CONFIG_PM_SLEEP */
static bool acpi_ec_guard_event(struct acpi_ec *ec)
@@ -1880,6 +1892,32 @@ static int acpi_ec_suspend(struct device *dev)
return 0;
}
+static int acpi_ec_suspend_noirq(struct device *dev)
+{
+ struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
+
+ /*
+ * The SCI handler doesn't run at this point, so the GPE can be
+ * masked at the low level without side effects.
+ */
+ if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
+
+ return 0;
+}
+
+static int acpi_ec_resume_noirq(struct device *dev)
+{
+ struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
+
+ if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
+
+ return 0;
+}
+
static int acpi_ec_resume(struct device *dev)
{
struct acpi_ec *ec =
@@ -1891,6 +1929,7 @@ static int acpi_ec_resume(struct device *dev)
#endif
static const struct dev_pm_ops acpi_ec_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
};
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 9531d3276f65..58dd7ab3c653 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -193,6 +193,10 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
void *data);
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
+#ifdef CONFIG_PM_SLEEP
+void acpi_ec_flush_work(void);
+#endif
+
/*--------------------------------------------------------------------------
Suspend/Resume
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index edb0c79f7c64..917f1cc0fda4 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -443,7 +443,7 @@ int __init acpi_numa_init(void)
* So go over all cpu entries in SRAT to get apicid to node mapping.
*/
- /* SRAT: Static Resource Affinity Table */
+ /* SRAT: System Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
struct acpi_subtable_proc srat_proc[3];
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index be17664736b2..fa8243c5c062 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -777,11 +777,11 @@ static void acpi_freeze_sync(void)
/*
* Process all pending events in case there are any wakeup ones.
*
- * The EC driver uses the system workqueue, so that one needs to be
- * flushed too.
+ * The EC driver uses the system workqueue and an additional special
+ * one, so those need to be flushed too.
*/
+ acpi_ec_flush_work();
acpi_os_wait_events_complete();
- flush_scheduled_work();
s2idle_wakeup = false;
}
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 4ac3e06b41d8..98aa8c808a33 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -17,6 +17,16 @@
#include <linux/serial_core.h>
/*
+ * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
+ * occasionally getting stuck as 1. To avoid the potential for a hang, check
+ * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
+ * implementations, so only do so if an affected platform is detected in
+ * parse_spcr().
+ */
+bool qdf2400_e44_present;
+EXPORT_SYMBOL(qdf2400_e44_present);
+
+/*
* Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
* Detect them by examining the OEM fields in the SPCR header, similiar to PCI
* quirk detection in pci_mcfg.c.
@@ -147,8 +157,30 @@ int __init parse_spcr(bool earlycon)
goto done;
}
- if (qdf2400_erratum_44_present(&table->header))
- uart = "qdf2400_e44";
+ /*
+ * If the E44 erratum is required, then we need to tell the pl011
+ * driver to implement the work-around.
+ *
+ * The global variable is used by the probe function when it
+ * creates the UARTs, whether or not they're used as a console.
+ *
+ * If the user specifies "traditional" earlycon, the qdf2400_e44
+ * console name matches the EARLYCON_DECLARE() statement, and
+ * SPCR is not used. Parameter "earlycon" is false.
+ *
+ * If the user specifies "SPCR" earlycon, then we need to update
+ * the console name so that it also says "qdf2400_e44". Parameter
+ * "earlycon" is true.
+ *
+ * For consistency, if we change the console name, then we do it
+ * for everyone, not just earlycon.
+ */
+ if (qdf2400_erratum_44_present(&table->header)) {
+ qdf2400_e44_present = true;
+ if (earlycon)
+ uart = "qdf2400_e44";
+ }
+
if (xgene_8250_erratum_present(table))
iotype = "mmio32";
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 948fc86980a1..363fc5330c21 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -215,7 +215,7 @@ config SATA_FSL
config SATA_GEMINI
tristate "Gemini SATA bridge support"
- depends on PATA_FTIDE010
+ depends on ARCH_GEMINI || COMPILE_TEST
default ARCH_GEMINI
help
This enabled support for the FTIDE010 to SATA bridge
@@ -613,7 +613,7 @@ config PATA_FTIDE010
tristate "Faraday Technology FTIDE010 PATA support"
depends on OF
depends on ARM
- default ARCH_GEMINI
+ depends on SATA_GEMINI
help
This option enables support for the Faraday FTIDE010
PATA controller found in the Cortina Gemini SoCs.
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8453f9a4682f..fa7dd4394c02 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2083,7 +2083,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
retry:
ata_tf_init(dev, &tf);
if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
- !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) {
+ !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
tf.command = ATA_CMD_READ_LOG_DMA_EXT;
tf.protocol = ATA_PROT_DMA;
dma = true;
@@ -2102,8 +2102,8 @@ retry:
buf, sectors * ATA_SECT_SIZE, 0);
if (err_mask && dma) {
- dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG;
- ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n");
+ dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
+ ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
goto retry;
}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index b70bcf6d2914..3dbd05532c09 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1434,7 +1434,7 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
/**
* ata_eh_done - EH action complete
-* @ap: target ATA port
+ * @link: ATA link for which EH actions are complete
* @dev: target ATA dev for per-dev action (can be NULL)
* @action: action just completed
*
@@ -1576,7 +1576,7 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
/**
* ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
+ * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
* @cmd: scsi command for which the sense code should be set
*
* Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
@@ -4175,7 +4175,6 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
- int rc = 0;
/* are we resuming? */
spin_lock_irqsave(ap->lock, flags);
@@ -4202,7 +4201,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
ata_acpi_set_state(ap, ap->pm_mesg);
if (ap->ops->port_resume)
- rc = ap->ops->port_resume(ap);
+ ap->ops->port_resume(ap);
/* tell ACPI that we're resuming */
ata_acpi_on_resume(ap);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d462c5a3a7ef..44ba292f2cd7 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3030,10 +3030,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
{
if (!sata_pmp_attached(ap)) {
- if (likely(devno < ata_link_max_devices(&ap->link)))
+ if (likely(devno >= 0 &&
+ devno < ata_link_max_devices(&ap->link)))
return &ap->link.device[devno];
} else {
- if (likely(devno < ap->nr_pmp_links))
+ if (likely(devno >= 0 &&
+ devno < ap->nr_pmp_links))
return &ap->pmp_link[devno].device[0];
}
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index ee9844758736..537d11869069 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -858,6 +858,14 @@ static const struct of_device_id sata_rcar_match[] = {
.compatible = "renesas,sata-r8a7795",
.data = (void *)RCAR_GEN2_SATA
},
+ {
+ .compatible = "renesas,rcar-gen2-sata",
+ .data = (void *)RCAR_GEN2_SATA
+ },
+ {
+ .compatible = "renesas,rcar-gen3-sata",
+ .data = (void *)RCAR_GEN2_SATA
+ },
{ },
};
MODULE_DEVICE_TABLE(of, sata_rcar_match);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 2ae24c28e70c..1c152aed6b82 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de
{
if (dev && dev->dma_mem)
return dev->dma_mem;
- return dma_coherent_default_memory;
+ return NULL;
}
static inline dma_addr_t dma_get_device_base(struct device *dev,
@@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
}
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-/**
- * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
- *
- * @dev: device from which we allocate memory
- * @size: size of requested memory area
- * @dma_handle: This will be filled with the correct dma handle
- * @ret: This pointer will be filled with the virtual address
- * to allocated area.
- *
- * This function should be only called from per-arch dma_alloc_coherent()
- * to support allocation from per-device coherent memory pools.
- *
- * Returns 0 if dma_alloc_coherent should continue with allocating from
- * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
- */
-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle, void **ret)
+static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
+ ssize_t size, dma_addr_t *dma_handle)
{
- struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
int order = get_order(size);
unsigned long flags;
int pageno;
int dma_memory_map;
+ void *ret;
- if (!mem)
- return 0;
-
- *ret = NULL;
spin_lock_irqsave(&mem->spinlock, flags);
if (unlikely(size > (mem->size << PAGE_SHIFT)))
@@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
goto err;
/*
- * Memory was found in the per-device area.
+ * Memory was found in the coherent area.
*/
- *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
- *ret = mem->virt_base + (pageno << PAGE_SHIFT);
+ *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+ ret = mem->virt_base + (pageno << PAGE_SHIFT);
dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
spin_unlock_irqrestore(&mem->spinlock, flags);
if (dma_memory_map)
- memset(*ret, 0, size);
+ memset(ret, 0, size);
else
- memset_io(*ret, 0, size);
+ memset_io(ret, 0, size);
- return 1;
+ return ret;
err:
spin_unlock_irqrestore(&mem->spinlock, flags);
+ return NULL;
+}
+
+/**
+ * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
+ * @dev: device from which we allocate memory
+ * @size: size of requested memory area
+ * @dma_handle: This will be filled with the correct dma handle
+ * @ret: This pointer will be filled with the virtual address
+ * to allocated area.
+ *
+ * This function should be only called from per-arch dma_alloc_coherent()
+ * to support allocation from per-device coherent memory pools.
+ *
+ * Returns 0 if dma_alloc_coherent should continue with allocating from
+ * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
+ */
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret)
+{
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+ if (!mem)
+ return 0;
+
+ *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
+ if (*ret)
+ return 1;
+
/*
* In the case where the allocation can not be satisfied from the
* per-device area, try to fall back to generic memory if the
@@ -225,25 +235,20 @@ err:
*/
return mem->flags & DMA_MEMORY_EXCLUSIVE;
}
-EXPORT_SYMBOL(dma_alloc_from_coherent);
+EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
-/**
- * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
- * @dev: device from which the memory was allocated
- * @order: the order of pages allocated
- * @vaddr: virtual address of allocated pages
- *
- * This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, releases that memory.
- *
- * Returns 1 if we correctly released the memory, or 0 if
- * dma_release_coherent() should proceed with releasing memory from
- * generic pools.
- */
-int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
+void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
{
- struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+ if (!dma_coherent_default_memory)
+ return NULL;
+
+ return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
+ dma_handle);
+}
+static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
+ int order, void *vaddr)
+{
if (mem && vaddr >= mem->virt_base && vaddr <
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
@@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
}
return 0;
}
-EXPORT_SYMBOL(dma_release_from_coherent);
/**
- * dma_mmap_from_coherent() - try to mmap the memory allocated from
- * per-device coherent memory pool to userspace
+ * dma_release_from_dev_coherent() - free memory to device coherent memory pool
* @dev: device from which the memory was allocated
- * @vma: vm_area for the userspace memory
- * @vaddr: cpu address returned by dma_alloc_from_coherent
- * @size: size of the memory buffer allocated by dma_alloc_from_coherent
- * @ret: result from remap_pfn_range()
+ * @order: the order of pages allocated
+ * @vaddr: virtual address of allocated pages
*
* This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, maps that memory to the provided vma.
+ * coherent memory pool and if so, releases that memory.
*
- * Returns 1 if we correctly mapped the memory, or 0 if the caller should
- * proceed with mapping memory from generic pools.
+ * Returns 1 if we correctly released the memory, or 0 if the caller should
+ * proceed with releasing memory from generic pools.
*/
-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
- void *vaddr, size_t size, int *ret)
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
{
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+ return __dma_release_from_coherent(mem, order, vaddr);
+}
+EXPORT_SYMBOL(dma_release_from_dev_coherent);
+
+int dma_release_from_global_coherent(int order, void *vaddr)
+{
+ if (!dma_coherent_default_memory)
+ return 0;
+
+ return __dma_release_from_coherent(dma_coherent_default_memory, order,
+ vaddr);
+}
+
+static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
+ struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
+{
if (mem && vaddr >= mem->virt_base && vaddr + size <=
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
unsigned long off = vma->vm_pgoff;
@@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
}
return 0;
}
-EXPORT_SYMBOL(dma_mmap_from_coherent);
+
+/**
+ * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
+ * @dev: device from which the memory was allocated
+ * @vma: vm_area for the userspace memory
+ * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
+ * @size: size of the memory buffer allocated
+ * @ret: result from remap_pfn_range()
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if the caller should
+ * proceed with mapping memory from generic pools.
+ */
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *vaddr, size_t size, int *ret)
+{
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+ return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
+}
+EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
+
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
+ size_t size, int *ret)
+{
+ if (!dma_coherent_default_memory)
+ return 0;
+
+ return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
+ vaddr, size, ret);
+}
/*
* Support for reserved memory regions defined in device tree
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 5096755d185e..b555ff9dd8fc 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index b9f907eedbf7..bfbe1e154128 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -30,7 +30,6 @@
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
-#include <linux/swait.h>
#include <generated/utsrelease.h>
@@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void)
* state of the firmware loading.
*/
struct fw_state {
- struct swait_queue_head wq;
+ struct completion completion;
enum fw_status status;
};
static void fw_state_init(struct fw_state *fw_st)
{
- init_swait_queue_head(&fw_st->wq);
+ init_completion(&fw_st->completion);
fw_st->status = FW_STATUS_UNKNOWN;
}
@@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
{
long ret;
- ret = swait_event_interruptible_timeout(fw_st->wq,
- __fw_state_is_done(READ_ONCE(fw_st->status)),
- timeout);
+ ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
return -ENOENT;
if (!ret)
@@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st,
WRITE_ONCE(fw_st->status, status);
if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
- swake_up(&fw_st->wq);
+ complete_all(&fw_st->completion);
}
#define fw_state_start(fw_st) \
__fw_state_set(fw_st, FW_STATUS_LOADING)
#define fw_state_done(fw_st) \
__fw_state_set(fw_st, FW_STATUS_DONE)
+#define fw_state_aborted(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_ABORTED)
#define fw_state_wait(fw_st) \
__fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
-#ifndef CONFIG_FW_LOADER_USER_HELPER
-
-#define fw_state_is_aborted(fw_st) false
-
-#else /* CONFIG_FW_LOADER_USER_HELPER */
-
static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
{
return fw_st->status == status;
}
+#define fw_state_is_aborted(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_ABORTED)
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+
#define fw_state_aborted(fw_st) \
__fw_state_set(fw_st, FW_STATUS_ABORTED)
#define fw_state_is_done(fw_st) \
__fw_state_check(fw_st, FW_STATUS_DONE)
#define fw_state_is_loading(fw_st) \
__fw_state_check(fw_st, FW_STATUS_LOADING)
-#define fw_state_is_aborted(fw_st) \
- __fw_state_check(fw_st, FW_STATUS_ABORTED)
#define fw_state_wait_timeout(fw_st, timeout) \
__fw_state_wait_common(fw_st, timeout)
@@ -1200,6 +1196,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
return 1; /* need to load */
}
+/*
+ * Batched requests need only one wake, we need to do this step last due to the
+ * fallback mechanism. The buf is protected with kref_get(), and it won't be
+ * released until the last user calls release_firmware().
+ *
+ * Failed batched requests are possible as well, in such cases we just share
+ * the struct firmware_buf and won't release it until all requests are woken
+ * and have gone through this same path.
+ */
+static void fw_abort_batch_reqs(struct firmware *fw)
+{
+ struct firmware_buf *buf;
+
+ /* Loaded directly? */
+ if (!fw || !fw->priv)
+ return;
+
+ buf = fw->priv;
+ if (!fw_state_is_aborted(&buf->fw_st))
+ fw_state_aborted(&buf->fw_st);
+}
+
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
@@ -1243,6 +1261,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
out:
if (ret < 0) {
+ fw_abort_batch_reqs(fw);
release_firmware(fw);
fw = NULL;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 87a0a29f6e7e..5bdf923294a5 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -908,7 +908,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
continue;
}
sk_set_memalloc(sock->sk);
- sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
+ if (nbd->tag_set.timeout)
+ sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
atomic_inc(&config->recv_threads);
refcount_inc(&nbd->config_refs);
old = nsock->sock;
@@ -922,6 +923,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
mutex_unlock(&nsock->tx_lock);
sockfd_put(old);
+ clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
+
/* We take the tx_mutex in an error path in the recv_work, so we
* need to queue_work outside of the tx_mutex.
*/
@@ -978,11 +981,15 @@ static void send_disconnects(struct nbd_device *nbd)
int i, ret;
for (i = 0; i < config->num_connections; i++) {
+ struct nbd_sock *nsock = config->socks[i];
+
iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
+ mutex_lock(&nsock->tx_lock);
ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
if (ret <= 0)
dev_err(disk_to_dev(nbd->disk),
"Send disconnect failed %d\n", ret);
+ mutex_unlock(&nsock->tx_lock);
}
}
@@ -991,9 +998,8 @@ static int nbd_disconnect(struct nbd_device *nbd)
struct nbd_config *config = nbd->config;
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
- if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
- &config->runtime_flags))
- send_disconnects(nbd);
+ set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
+ send_disconnects(nbd);
return 0;
}
@@ -1074,7 +1080,9 @@ static int nbd_start_device(struct nbd_device *nbd)
return -ENOMEM;
}
sk_set_memalloc(config->socks[i]->sock->sk);
- config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
+ if (nbd->tag_set.timeout)
+ config->socks[i]->sock->sk->sk_sndtimeo =
+ nbd->tag_set.timeout;
atomic_inc(&config->recv_threads);
refcount_inc(&nbd->config_refs);
INIT_WORK(&args->work, recv_work);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 6b16ead1da58..ad9749463d4f 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -875,6 +875,56 @@ static void print_version(void)
printk(KERN_INFO "%s", version);
}
+struct vdc_check_port_data {
+ int dev_no;
+ char *type;
+};
+
+static int vdc_device_probed(struct device *dev, void *arg)
+{
+ struct vio_dev *vdev = to_vio_dev(dev);
+ struct vdc_check_port_data *port_data;
+
+ port_data = (struct vdc_check_port_data *)arg;
+
+ if ((vdev->dev_no == port_data->dev_no) &&
+ (!(strcmp((char *)&vdev->type, port_data->type))) &&
+ dev_get_drvdata(dev)) {
+ /* This device has already been configured
+ * by vdc_port_probe()
+ */
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/* Determine whether the VIO device is part of an mpgroup
+ * by locating all the virtual-device-port nodes associated
+ * with the parent virtual-device node for the VIO device
+ * and checking whether any of these nodes are vdc-ports
+ * which have already been configured.
+ *
+ * Returns true if this device is part of an mpgroup and has
+ * already been probed.
+ */
+static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
+{
+ struct vdc_check_port_data port_data;
+ struct device *dev;
+
+ port_data.dev_no = vdev->dev_no;
+ port_data.type = (char *)&vdev->type;
+
+ dev = device_find_child(vdev->dev.parent, &port_data,
+ vdc_device_probed);
+
+ if (dev)
+ return true;
+
+ return false;
+}
+
static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct mdesc_handle *hp;
@@ -893,6 +943,14 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto err_out_release_mdesc;
}
+ /* Check if this device is part of an mpgroup */
+ if (vdc_port_mpgroup_check(vdev)) {
+ printk(KERN_WARNING
+ "VIO: Ignoring extra vdisk port %s",
+ dev_name(&vdev->dev));
+ goto err_out_release_mdesc;
+ }
+
port = kzalloc(sizeof(*port), GFP_KERNEL);
err = -ENOMEM;
if (!port) {
@@ -943,6 +1001,9 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (err)
goto err_out_free_tx_ring;
+ /* Note that the device driver_data is used to determine
+ * whether the port has been probed.
+ */
dev_set_drvdata(&vdev->dev, port);
mdesc_release(hp);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4e02aa5fdac0..1498b899a593 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -541,12 +541,9 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
int i;
BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
- for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
- if (sysfs_streq(buf, virtblk_cache_types[i]))
- break;
-
+ i = sysfs_match_string(virtblk_cache_types, buf);
if (i < 0)
- return -EINVAL;
+ return i;
virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
virtblk_update_cache_mode(vdev);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c852ed3c01d5..98e34e4c62b8 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -111,7 +111,7 @@ struct blk_shadow {
};
struct blkif_req {
- int error;
+ blk_status_t error;
};
static inline struct blkif_req *blkif_req(struct request *rq)
@@ -708,6 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* existing persistent grants, or if we have to get new grants,
* as there are not sufficiently many free.
*/
+ bool new_persistent_gnts = false;
struct scatterlist *sg;
int num_sg, max_grefs, num_grant;
@@ -719,19 +720,21 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
*/
max_grefs += INDIRECT_GREFS(max_grefs);
- /*
- * We have to reserve 'max_grefs' grants because persistent
- * grants are shared by all rings.
- */
- if (max_grefs > 0)
- if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) {
+ /* Check if we have enough persistent grants to allocate a requests */
+ if (rinfo->persistent_gnts_c < max_grefs) {
+ new_persistent_gnts = true;
+
+ if (gnttab_alloc_grant_references(
+ max_grefs - rinfo->persistent_gnts_c,
+ &setup.gref_head) < 0) {
gnttab_request_free_callback(
&rinfo->callback,
blkif_restart_queue_callback,
rinfo,
- max_grefs);
+ max_grefs - rinfo->persistent_gnts_c);
return 1;
}
+ }
/* Fill out a communications ring structure. */
id = blkif_ring_get_request(rinfo, req, &ring_req);
@@ -832,7 +835,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
if (unlikely(require_extra_req))
rinfo->shadow[extra_id].req = *extra_ring_req;
- if (max_grefs > 0)
+ if (new_persistent_gnts)
gnttab_free_grant_references(setup.gref_head);
return 0;
@@ -906,8 +909,8 @@ out_err:
return BLK_STS_IOERR;
out_busy:
- spin_unlock_irqrestore(&rinfo->ring_lock, flags);
blk_mq_stop_hw_queue(hctx);
+ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_RESOURCE;
}
@@ -1616,7 +1619,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- blkif_req(req)->error = -EOPNOTSUPP;
+ blkif_req(req)->error = BLK_STS_NOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 856d5dc02451..3b1b6340ba13 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
- char compressor[CRYPTO_MAX_ALG_NAME];
+ char compressor[ARRAY_SIZE(zram->compressor)];
size_t sz;
strlcpy(compressor, buf, sizeof(compressor));
@@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
return -EBUSY;
}
- strlcpy(zram->compressor, compressor, sizeof(compressor));
+ strcpy(zram->compressor, compressor);
up_write(&zram->init_lock);
return len;
}
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index 1e6e0269edcc..f76be6bd6eb3 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -256,10 +256,23 @@ static int uniphier_system_bus_probe(struct platform_device *pdev)
uniphier_system_bus_set_reg(priv);
+ platform_set_drvdata(pdev, priv);
+
/* Now, the bus is configured. Populate platform_devices below it */
return of_platform_default_populate(dev->of_node, NULL, dev);
}
+static int __maybe_unused uniphier_system_bus_resume(struct device *dev)
+{
+ uniphier_system_bus_set_reg(dev_get_drvdata(dev));
+
+ return 0;
+}
+
+static const struct dev_pm_ops uniphier_system_bus_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, uniphier_system_bus_resume)
+};
+
static const struct of_device_id uniphier_system_bus_match[] = {
{ .compatible = "socionext,uniphier-system-bus" },
{ /* sentinel */ }
@@ -271,6 +284,7 @@ static struct platform_driver uniphier_system_bus_driver = {
.driver = {
.name = "uniphier-system-bus",
.of_match_table = uniphier_system_bus_match,
+ .pm = &uniphier_system_bus_pm_ops,
},
};
module_platform_driver(uniphier_system_bus_driver);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index afa3ce7d3e72..8ad92707e45f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1492,7 +1492,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
print_once = true;
#endif
- pr_notice("random: %s called from %pF with crng_init=%d\n",
+ pr_notice("random: %s called from %pS with crng_init=%d\n",
func_name, caller, crng_init);
}
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
index c391a49aaaff..b4cf2f699a21 100644
--- a/drivers/clk/clk-gemini.c
+++ b/drivers/clk/clk-gemini.c
@@ -237,6 +237,18 @@ static int gemini_reset(struct reset_controller_dev *rcdev,
BIT(GEMINI_RESET_CPU1) | BIT(id));
}
+static int gemini_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return 0;
+}
+
+static int gemini_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return 0;
+}
+
static int gemini_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
@@ -253,6 +265,8 @@ static int gemini_reset_status(struct reset_controller_dev *rcdev,
static const struct reset_control_ops gemini_reset_ops = {
.reset = gemini_reset,
+ .assert = gemini_reset_assert,
+ .deassert = gemini_reset_deassert,
.status = gemini_reset_status,
};
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index 43b0f2f08df2..9cdf9d5050ac 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/bsearch.h>
#define SCI_CLK_SSC_ENABLE BIT(0)
#define SCI_CLK_ALLOW_FREQ_CHANGE BIT(1)
@@ -44,6 +45,7 @@ struct sci_clk_data {
* @dev: Device pointer for the clock provider
* @clk_data: Clock data
* @clocks: Clocks array for this device
+ * @num_clocks: Total number of clocks for this provider
*/
struct sci_clk_provider {
const struct ti_sci_handle *sci;
@@ -51,6 +53,7 @@ struct sci_clk_provider {
struct device *dev;
const struct sci_clk_data *clk_data;
struct clk_hw **clocks;
+ int num_clocks;
};
/**
@@ -58,7 +61,6 @@ struct sci_clk_provider {
* @hw: Hardware clock cookie for common clock framework
* @dev_id: Device index
* @clk_id: Clock index
- * @node: Clocks list link
* @provider: Master clock provider
* @flags: Flags for the clock
*/
@@ -66,7 +68,6 @@ struct sci_clk {
struct clk_hw hw;
u16 dev_id;
u8 clk_id;
- struct list_head node;
struct sci_clk_provider *provider;
u8 flags;
};
@@ -367,6 +368,19 @@ err:
return &sci_clk->hw;
}
+static int _cmp_sci_clk(const void *a, const void *b)
+{
+ const struct sci_clk *ca = a;
+ const struct sci_clk *cb = *(struct sci_clk **)b;
+
+ if (ca->dev_id == cb->dev_id && ca->clk_id == cb->clk_id)
+ return 0;
+ if (ca->dev_id > cb->dev_id ||
+ (ca->dev_id == cb->dev_id && ca->clk_id > cb->clk_id))
+ return 1;
+ return -1;
+}
+
/**
* sci_clk_get - Xlate function for getting clock handles
* @clkspec: device tree clock specifier
@@ -380,29 +394,22 @@ err:
static struct clk_hw *sci_clk_get(struct of_phandle_args *clkspec, void *data)
{
struct sci_clk_provider *provider = data;
- u16 dev_id;
- u8 clk_id;
- const struct sci_clk_data *clks = provider->clk_data;
- struct clk_hw **clocks = provider->clocks;
+ struct sci_clk **clk;
+ struct sci_clk key;
if (clkspec->args_count != 2)
return ERR_PTR(-EINVAL);
- dev_id = clkspec->args[0];
- clk_id = clkspec->args[1];
+ key.dev_id = clkspec->args[0];
+ key.clk_id = clkspec->args[1];
- while (clks->num_clks) {
- if (clks->dev == dev_id) {
- if (clk_id >= clks->num_clks)
- return ERR_PTR(-EINVAL);
-
- return clocks[clk_id];
- }
+ clk = bsearch(&key, provider->clocks, provider->num_clocks,
+ sizeof(clk), _cmp_sci_clk);
- clks++;
- }
+ if (!clk)
+ return ERR_PTR(-ENODEV);
- return ERR_PTR(-ENODEV);
+ return &(*clk)->hw;
}
static int ti_sci_init_clocks(struct sci_clk_provider *p)
@@ -410,18 +417,29 @@ static int ti_sci_init_clocks(struct sci_clk_provider *p)
const struct sci_clk_data *data = p->clk_data;
struct clk_hw *hw;
int i;
+ int num_clks = 0;
while (data->num_clks) {
- p->clocks = devm_kcalloc(p->dev, data->num_clks,
- sizeof(struct sci_clk),
- GFP_KERNEL);
- if (!p->clocks)
- return -ENOMEM;
+ num_clks += data->num_clks;
+ data++;
+ }
+ p->num_clocks = num_clks;
+
+ p->clocks = devm_kcalloc(p->dev, num_clks, sizeof(struct sci_clk),
+ GFP_KERNEL);
+ if (!p->clocks)
+ return -ENOMEM;
+
+ num_clks = 0;
+
+ data = p->clk_data;
+
+ while (data->num_clks) {
for (i = 0; i < data->num_clks; i++) {
hw = _sci_clk_build(p, data->dev, i);
if (!IS_ERR(hw)) {
- p->clocks[i] = hw;
+ p->clocks[num_clks++] = hw;
continue;
}
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index 39eab69fe51a..44a5a535ca63 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -161,6 +161,13 @@ static int mpll_set_rate(struct clk_hw *hw,
reg = PARM_SET(p->width, p->shift, reg, 1);
writel(reg, mpll->base + p->reg_off);
+ p = &mpll->ssen;
+ if (p->width != 0) {
+ reg = readl(mpll->base + p->reg_off);
+ reg = PARM_SET(p->width, p->shift, reg, 1);
+ writel(reg, mpll->base + p->reg_off);
+ }
+
p = &mpll->n2;
reg = readl(mpll->base + p->reg_off);
reg = PARM_SET(p->width, p->shift, reg, n2);
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
index d6feafe8bd6c..1629da9b4141 100644
--- a/drivers/clk/meson/clkc.h
+++ b/drivers/clk/meson/clkc.h
@@ -118,6 +118,7 @@ struct meson_clk_mpll {
struct parm sdm_en;
struct parm n2;
struct parm en;
+ struct parm ssen;
spinlock_t *lock;
};
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index a897ea45327c..a7ea5f3da89d 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -528,6 +528,11 @@ static struct meson_clk_mpll gxbb_mpll0 = {
.shift = 14,
.width = 1,
},
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 25,
+ .width = 1,
+ },
.lock = &clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll0",
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index bb3f1de876b1..6ec512ad2598 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -267,6 +267,11 @@ static struct meson_clk_mpll meson8b_mpll0 = {
.shift = 14,
.width = 1,
},
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 25,
+ .width = 1,
+ },
.lock = &clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll0",
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 0748a0b333c5..9a6476aa7d81 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -1283,16 +1283,16 @@ static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __ini
static const struct samsung_pll_rate_table exynos5420_epll_24mhz_tbl[] = {
PLL_36XX_RATE(600000000U, 100, 2, 1, 0),
PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
- PLL_36XX_RATE(393216000U, 197, 3, 2, 25690),
- PLL_36XX_RATE(361267200U, 301, 5, 2, 3671),
+ PLL_36XX_RATE(393216003U, 197, 3, 2, -25690),
+ PLL_36XX_RATE(361267218U, 301, 5, 2, 3671),
PLL_36XX_RATE(200000000U, 200, 3, 3, 0),
- PLL_36XX_RATE(196608000U, 197, 3, 3, -25690),
- PLL_36XX_RATE(180633600U, 301, 5, 3, 3671),
- PLL_36XX_RATE(131072000U, 131, 3, 3, 4719),
+ PLL_36XX_RATE(196608001U, 197, 3, 3, -25690),
+ PLL_36XX_RATE(180633609U, 301, 5, 3, 3671),
+ PLL_36XX_RATE(131072006U, 131, 3, 3, 4719),
PLL_36XX_RATE(100000000U, 200, 3, 4, 0),
- PLL_36XX_RATE(65536000U, 131, 3, 4, 4719),
- PLL_36XX_RATE(49152000U, 197, 3, 5, 25690),
- PLL_36XX_RATE(32768000U, 131, 3, 5, 4719),
+ PLL_36XX_RATE( 65536003U, 131, 3, 4, 4719),
+ PLL_36XX_RATE( 49152000U, 197, 3, 5, -25690),
+ PLL_36XX_RATE( 32768001U, 131, 3, 5, 4719),
};
static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index 5372bf8be5e6..31d7ffda9aab 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -184,7 +184,7 @@ static struct ccu_mux cpu_clk = {
.hw.init = CLK_HW_INIT_PARENTS("cpu",
cpu_parents,
&ccu_mux_ops,
- CLK_IS_CRITICAL),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
}
};
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index f99abc1106f0..08ef69945ffb 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -186,6 +186,13 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
spin_lock_init(&pclk->lock);
+ /*
+ * If the clock was already enabled by the firmware mark it as critical
+ * to avoid it being gated by the clock framework if no driver owns it.
+ */
+ if (plt_clk_is_enabled(&pclk->hw))
+ init.flags |= CLK_IS_CRITICAL;
+
ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
if (ret) {
pclk = ERR_PTR(ret);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6cd503525638..0566455f233e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1922,13 +1922,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
return 0;
}
-static unsigned int intel_pstate_get(unsigned int cpu_num)
-{
- struct cpudata *cpu = all_cpu_data[cpu_num];
-
- return cpu ? get_avg_frequency(cpu) : 0;
-}
-
static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
{
struct cpudata *cpu = all_cpu_data[cpu_num];
@@ -2169,7 +2162,6 @@ static struct cpufreq_driver intel_pstate = {
.setpolicy = intel_pstate_set_policy,
.suspend = intel_pstate_hwp_save_state,
.resume = intel_pstate_resume,
- .get = intel_pstate_get,
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,
.stop_cpu = intel_pstate_stop_cpu,
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 37b0698b7193..42896a67aeae 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len,
return -1;
}
+extern u32 pnv_get_supported_cpuidle_states(void);
static int powernv_add_idle_states(void)
{
struct device_node *power_mgt;
@@ -248,6 +249,8 @@ static int powernv_add_idle_states(void)
const char *names[CPUIDLE_STATE_MAX];
u32 has_stop_states = 0;
int i, rc;
+ u32 supported_flags = pnv_get_supported_cpuidle_states();
+
/* Currently we have snooze statically defined */
@@ -362,6 +365,13 @@ static int powernv_add_idle_states(void)
for (i = 0; i < dt_idle_states; i++) {
unsigned int exit_latency, target_residency;
bool stops_timebase = false;
+
+ /*
+ * Skip the platform idle state whose flag isn't in
+ * the supported_cpuidle_states flag mask.
+ */
+ if ((flags[i] & supported_flags) != flags[i])
+ continue;
/*
* If an idle state has exit latency beyond
* POWERNV_THRESHOLD_LATENCY_NS then don't use it
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 193204dfbf3a..4b75084fabad 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -655,7 +655,7 @@ source "drivers/crypto/virtio/Kconfig"
config CRYPTO_DEV_BCM_SPU
tristate "Broadcom symmetric crypto/hash acceleration support"
depends on ARCH_BCM_IPROC
- depends on BCM_PDC_MBOX
+ depends on MAILBOX
default m
select CRYPTO_DES
select CRYPTO_MD5
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index ef04c9748317..bf7ac621c591 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -302,6 +302,7 @@ spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode,
break;
case HASH_ALG_SHA3_512:
*spu2_type = SPU2_HASH_TYPE_SHA3_512;
+ break;
case HASH_ALG_LAST:
default:
err = -EINVAL;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index ae44a464cd2d..9ccefb9b7232 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -18,8 +18,9 @@
#define SE_GROUP 0
#define DRIVER_VERSION "1.0"
+#define FW_DIR "cavium/"
/* SE microcode */
-#define SE_FW "cnn55xx_se.fw"
+#define SE_FW FW_DIR "cnn55xx_se.fw"
static const char nitrox_driver_name[] = "CNN55XX";
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index e7f87ac12685..1fabd4aee81b 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -773,7 +773,6 @@ static int safexcel_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct safexcel_crypto_priv *priv;
- u64 dma_mask;
int i, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -802,9 +801,7 @@ static int safexcel_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- if (of_property_read_u64(dev->of_node, "dma-mask", &dma_mask))
- dma_mask = DMA_BIT_MASK(64);
- ret = dma_set_mask_and_coherent(dev, dma_mask);
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret)
goto err_clk;
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 8527a5899a2f..3f819399cd95 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -883,10 +883,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
- memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
-
- for (i = 0; i < ARRAY_SIZE(istate.state); i++) {
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
ctx->base.needs_inv = true;
@@ -894,6 +891,9 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
}
}
+ memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
+ memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
+
return 0;
}
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index ce9e563e6e1d..938eb4868f7f 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -278,6 +278,12 @@ void dax_write_cache(struct dax_device *dax_dev, bool wc)
}
EXPORT_SYMBOL_GPL(dax_write_cache);
+bool dax_write_cache_enabled(struct dax_device *dax_dev)
+{
+ return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
+
bool dax_alive(struct dax_device *dax_dev)
{
lockdep_assert_held(&dax_srcu);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 393817e849ed..dec3a815455d 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -195,8 +195,7 @@ done:
if (old)
kfree_rcu(old, rcu);
- if (old_fence)
- dma_fence_put(old_fence);
+ dma_fence_put(old_fence);
}
/**
@@ -258,12 +257,71 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
dma_fence_put(rcu_dereference_protected(old->shared[i],
reservation_object_held(obj)));
- if (old_fence)
- dma_fence_put(old_fence);
+ dma_fence_put(old_fence);
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
/**
+* reservation_object_copy_fences - Copy all fences from src to dst.
+* @dst: the destination reservation object
+* @src: the source reservation object
+*
+* Copy all fences from src to dst. Both src->lock as well as dst-lock must be
+* held.
+*/
+int reservation_object_copy_fences(struct reservation_object *dst,
+ struct reservation_object *src)
+{
+ struct reservation_object_list *src_list, *dst_list;
+ struct dma_fence *old, *new;
+ size_t size;
+ unsigned i;
+
+ src_list = reservation_object_get_list(src);
+
+ if (src_list) {
+ size = offsetof(typeof(*src_list),
+ shared[src_list->shared_count]);
+ dst_list = kmalloc(size, GFP_KERNEL);
+ if (!dst_list)
+ return -ENOMEM;
+
+ dst_list->shared_count = src_list->shared_count;
+ dst_list->shared_max = src_list->shared_count;
+ for (i = 0; i < src_list->shared_count; ++i)
+ dst_list->shared[i] =
+ dma_fence_get(src_list->shared[i]);
+ } else {
+ dst_list = NULL;
+ }
+
+ kfree(dst->staged);
+ dst->staged = NULL;
+
+ src_list = reservation_object_get_list(dst);
+
+ old = reservation_object_get_excl(dst);
+ new = reservation_object_get_excl(src);
+
+ dma_fence_get(new);
+
+ preempt_disable();
+ write_seqcount_begin(&dst->seq);
+ /* write_seqcount_begin provides the necessary memory barrier */
+ RCU_INIT_POINTER(dst->fence_excl, new);
+ RCU_INIT_POINTER(dst->fence, dst_list);
+ write_seqcount_end(&dst->seq);
+ preempt_enable();
+
+ if (src_list)
+ kfree_rcu(src_list, rcu);
+ dma_fence_put(old);
+
+ return 0;
+}
+EXPORT_SYMBOL(reservation_object_copy_fences);
+
+/**
* reservation_object_get_fences_rcu - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
@@ -373,12 +431,25 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
long ret = timeout ? timeout : 1;
retry:
- fence = NULL;
shared_count = 0;
seq = read_seqcount_begin(&obj->seq);
rcu_read_lock();
- if (wait_all) {
+ fence = rcu_dereference(obj->fence_excl);
+ if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (!dma_fence_get_rcu(fence))
+ goto unlock_retry;
+
+ if (dma_fence_is_signaled(fence)) {
+ dma_fence_put(fence);
+ fence = NULL;
+ }
+
+ } else {
+ fence = NULL;
+ }
+
+ if (!fence && wait_all) {
struct reservation_object_list *fobj =
rcu_dereference(obj->fence);
@@ -405,22 +476,6 @@ retry:
}
}
- if (!shared_count) {
- struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
-
- if (fence_excl &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &fence_excl->flags)) {
- if (!dma_fence_get_rcu(fence_excl))
- goto unlock_retry;
-
- if (dma_fence_is_signaled(fence_excl))
- dma_fence_put(fence_excl);
- else
- fence = fence_excl;
- }
- }
-
rcu_read_unlock();
if (fence) {
if (read_seqcount_retry(&obj->seq, seq)) {
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index f235eae04c16..461d6fc3688b 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -504,6 +504,7 @@ config GPIO_XGENE_SB
depends on ARCH_XGENE && OF_GPIO
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
help
This driver supports the GPIO block within the APM X-Gene
Standby Domain. Say yes here to enable the GPIO functionality.
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index fb8d304cfa17..0ecd2369c2ca 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -132,7 +132,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
if (!p)
return -ENOMEM;
- ret = device_property_read_u32(&pdev->dev, "linux,first-pin",
+ ret = device_property_read_u32(&pdev->dev, "exar,first-pin",
&first_pin);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index 6313c50bb91b..a121c8f10610 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -26,6 +26,27 @@ struct lp87565_gpio {
struct regmap *map;
};
+static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+ int ret, val;
+
+ ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val);
+ if (ret < 0)
+ return ret;
+
+ return !!(val & BIT(offset));
+}
+
+static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+
+ regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
+ BIT(offset), value ? BIT(offset) : 0);
+}
+
static int lp87565_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
@@ -54,30 +75,11 @@ static int lp87565_gpio_direction_output(struct gpio_chip *chip,
{
struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+ lp87565_gpio_set(chip, offset, value);
+
return regmap_update_bits(gpio->map,
LP87565_REG_GPIO_CONFIG,
- BIT(offset), !value ? BIT(offset) : 0);
-}
-
-static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- struct lp87565_gpio *gpio = gpiochip_get_data(chip);
- int ret, val;
-
- ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val);
- if (ret < 0)
- return ret;
-
- return !!(val & BIT(offset));
-}
-
-static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- struct lp87565_gpio *gpio = gpiochip_get_data(chip);
-
- regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
- BIT(offset), value ? BIT(offset) : 0);
+ BIT(offset), BIT(offset));
}
static int lp87565_gpio_request(struct gpio_chip *gc, unsigned int offset)
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 3abea3f0b307..92692251ade1 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -424,6 +424,9 @@ static int mxc_gpio_probe(struct platform_device *pdev)
return PTR_ERR(port->base);
port->irq_high = platform_get_irq(pdev, 1);
+ if (port->irq_high < 0)
+ port->irq_high = 0;
+
port->irq = platform_get_irq(pdev, 0);
if (port->irq < 0)
return port->irq;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 88529d3c06c9..506c6a67c5fc 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -360,7 +360,7 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
{
int port;
int pin;
- int unmasked = 0;
+ bool unmasked = false;
int gpio;
u32 lvl;
unsigned long sta;
@@ -384,8 +384,8 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
* before executing the handler so that we don't
* miss edges
*/
- if (lvl & (0x100 << pin)) {
- unmasked = 1;
+ if (!unmasked && lvl & (0x100 << pin)) {
+ unmasked = true;
chained_irq_exit(chip, desc);
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 9568708a550b..cd003b74512f 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -704,24 +704,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
{
struct lineevent_state *le = p;
struct gpioevent_data ge;
- int ret;
+ int ret, level;
ge.timestamp = ktime_get_real_ns();
+ level = gpiod_get_value_cansleep(le->desc);
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
&& le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
- int level = gpiod_get_value_cansleep(le->desc);
-
if (level)
/* Emit low-to-high event */
ge.id = GPIOEVENT_EVENT_RISING_EDGE;
else
/* Emit high-to-low event */
ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
- } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
+ } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) {
/* Emit low-to-high event */
ge.id = GPIOEVENT_EVENT_RISING_EDGE;
- } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
+ } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) {
/* Emit high-to-low event */
ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
} else {
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 24a066e1841c..a8acc197dec3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -33,7 +33,7 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
- drm_scdc_helper.o
+ drm_scdc_helper.o drm_gem_framebuffer_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 51d1364cf185..12e71bbfd222 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -96,6 +96,7 @@ extern int amdgpu_bapm;
extern int amdgpu_deep_color;
extern int amdgpu_vm_size;
extern int amdgpu_vm_block_size;
+extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
@@ -373,78 +374,10 @@ struct amdgpu_clock {
};
/*
- * BO.
+ * GEM.
*/
-struct amdgpu_bo_list_entry {
- struct amdgpu_bo *robj;
- struct ttm_validate_buffer tv;
- struct amdgpu_bo_va *bo_va;
- uint32_t priority;
- struct page **user_pages;
- int user_invalidated;
-};
-
-struct amdgpu_bo_va_mapping {
- struct list_head list;
- struct rb_node rb;
- uint64_t start;
- uint64_t last;
- uint64_t __subtree_last;
- uint64_t offset;
- uint64_t flags;
-};
-
-/* bo virtual addresses in a specific vm */
-struct amdgpu_bo_va {
- /* protected by bo being reserved */
- struct list_head bo_list;
- struct dma_fence *last_pt_update;
- unsigned ref_count;
-
- /* protected by vm mutex and spinlock */
- struct list_head vm_status;
-
- /* mappings for this bo_va */
- struct list_head invalids;
- struct list_head valids;
-
- /* constant after initialization */
- struct amdgpu_vm *vm;
- struct amdgpu_bo *bo;
-};
#define AMDGPU_GEM_DOMAIN_MAX 0x3
-
-struct amdgpu_bo {
- /* Protected by tbo.reserved */
- u32 prefered_domains;
- u32 allowed_domains;
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
- struct ttm_placement placement;
- struct ttm_buffer_object tbo;
- struct ttm_bo_kmap_obj kmap;
- u64 flags;
- unsigned pin_count;
- void *kptr;
- u64 tiling_flags;
- u64 metadata_flags;
- void *metadata;
- u32 metadata_size;
- unsigned prime_shared_count;
- /* list of all virtual address to which this bo
- * is associated to
- */
- struct list_head va;
- /* Constant after initialization */
- struct drm_gem_object gem_base;
- struct amdgpu_bo *parent;
- struct amdgpu_bo *shadow;
-
- struct ttm_bo_kmap_obj dma_buf_vmap;
- struct amdgpu_mn *mn;
- struct list_head mn_list;
- struct list_head shadow_list;
-};
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
void amdgpu_gem_object_free(struct drm_gem_object *obj);
@@ -678,15 +611,15 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
/* overlap the doorbell assignment with VCN as they are mutually exclusive
* VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
*/
- AMDGPU_DOORBELL64_RING0_1 = 0xF8,
- AMDGPU_DOORBELL64_RING2_3 = 0xF9,
- AMDGPU_DOORBELL64_RING4_5 = 0xFA,
- AMDGPU_DOORBELL64_RING6_7 = 0xFB,
+ AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
+ AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
+ AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
+ AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
- AMDGPU_DOORBELL64_UVD_RING0_1 = 0xFC,
- AMDGPU_DOORBELL64_UVD_RING2_3 = 0xFD,
- AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFE,
- AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFF,
+ AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
+ AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
+ AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
+ AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
AMDGPU_DOORBELL64_INVALID = 0xFFFF
@@ -816,6 +749,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
struct amdgpu_fpriv {
struct amdgpu_vm vm;
struct amdgpu_bo_va *prt_va;
+ struct amdgpu_bo_va *csa_va;
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
@@ -825,6 +759,14 @@ struct amdgpu_fpriv {
/*
* residency list
*/
+struct amdgpu_bo_list_entry {
+ struct amdgpu_bo *robj;
+ struct ttm_validate_buffer tv;
+ struct amdgpu_bo_va *bo_va;
+ uint32_t priority;
+ struct page **user_pages;
+ int user_invalidated;
+};
struct amdgpu_bo_list {
struct mutex lock;
@@ -1191,10 +1133,6 @@ struct amdgpu_wb {
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
-int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb);
-int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb);
-void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb);
-void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb);
void amdgpu_get_pcie_info(struct amdgpu_device *adev);
@@ -1488,7 +1426,7 @@ struct amdgpu_device {
bool is_atom_fw;
uint8_t *bios;
uint32_t bios_size;
- struct amdgpu_bo *stollen_vga_memory;
+ struct amdgpu_bo *stolen_vga_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -1546,9 +1484,6 @@ struct amdgpu_device {
struct amdgpu_mman mman;
struct amdgpu_vram_scratch vram_scratch;
struct amdgpu_wb wb;
- atomic64_t vram_usage;
- atomic64_t vram_vis_usage;
- atomic64_t gtt_usage;
atomic64_t num_bytes_moved;
atomic64_t num_evictions;
atomic64_t num_vram_cpu_page_faults;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 06879d1dcabd..a52795d9b458 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -285,19 +285,20 @@ static int acp_hw_init(void *handle)
return 0;
else if (r)
return r;
+ if (adev->asic_type != CHIP_STONEY) {
+ adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
+ if (adev->acp.acp_genpd == NULL)
+ return -ENOMEM;
- adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
- if (adev->acp.acp_genpd == NULL)
- return -ENOMEM;
-
- adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
- adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
- adev->acp.acp_genpd->gpd.power_on = acp_poweron;
+ adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
+ adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
+ adev->acp.acp_genpd->gpd.power_on = acp_poweron;
- adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
+ adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
- pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
+ pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
+ }
adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
GFP_KERNEL);
@@ -319,14 +320,29 @@ static int acp_hw_init(void *handle)
return -ENOMEM;
}
- i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+ DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+ break;
+ default:
+ i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+ }
i2s_pdata[0].cap = DWC_I2S_PLAY;
i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+ DW_I2S_QUIRK_COMP_PARAM1 |
+ DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+ break;
+ default:
+ i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+ DW_I2S_QUIRK_COMP_PARAM1;
+ }
- i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
- DW_I2S_QUIRK_COMP_PARAM1;
i2s_pdata[1].cap = DWC_I2S_RECORD;
i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
@@ -373,12 +389,14 @@ static int acp_hw_init(void *handle)
if (r)
return r;
- for (i = 0; i < ACP_DEVS ; i++) {
- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
- r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
- if (r) {
- dev_err(dev, "Failed to add dev to genpd\n");
- return r;
+ if (adev->asic_type != CHIP_STONEY) {
+ for (i = 0; i < ACP_DEVS ; i++) {
+ dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+ r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
+ if (r) {
+ dev_err(dev, "Failed to add dev to genpd\n");
+ return r;
+ }
}
}
@@ -398,20 +416,22 @@ static int acp_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* return early if no ACP */
- if (!adev->acp.acp_genpd)
+ if (!adev->acp.acp_cell)
return 0;
- for (i = 0; i < ACP_DEVS ; i++) {
- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
- ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
- /* If removal fails, dont giveup and try rest */
- if (ret)
- dev_err(dev, "remove dev from genpd failed\n");
+ if (adev->acp.acp_genpd) {
+ for (i = 0; i < ACP_DEVS ; i++) {
+ dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+ ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
+ /* If removal fails, dont giveup and try rest */
+ if (ret)
+ dev_err(dev, "remove dev from genpd failed\n");
+ }
+ kfree(adev->acp.acp_genpd);
}
mfd_remove_devices(adev->acp.parent);
kfree(adev->acp.acp_res);
- kfree(adev->acp.acp_genpd);
kfree(adev->acp.acp_cell);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index ef79551b4cb7..57afad79f55d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -30,10 +30,10 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "amdgpu.h"
+#include "amdgpu_pm.h"
#include "amd_acpi.h"
#include "atom.h"
-extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
/* Call the ATIF method
*/
/**
@@ -289,7 +289,7 @@ out:
* handles it.
* Returns NOTIFY code
*/
-int amdgpu_atif_handler(struct amdgpu_device *adev,
+static int amdgpu_atif_handler(struct amdgpu_device *adev,
struct acpi_bus_event *event)
{
struct amdgpu_atif *atif = &adev->atif;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 37971d9402e3..5432af39a674 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -27,16 +27,15 @@
#include "amdgpu_gfx.h"
#include <linux/module.h>
-const struct kfd2kgd_calls *kfd2kgd;
const struct kgd2kfd_calls *kgd2kfd;
-bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
int amdgpu_amdkfd_init(void)
{
int ret;
#if defined(CONFIG_HSA_AMD_MODULE)
- int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+ int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
kgd2kfd_init_p = symbol_request(kgd2kfd_init);
@@ -61,8 +60,21 @@ int amdgpu_amdkfd_init(void)
return ret;
}
-bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev)
+void amdgpu_amdkfd_fini(void)
+{
+ if (kgd2kfd) {
+ kgd2kfd->exit();
+ symbol_put(kgd2kfd_init);
+ }
+}
+
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
{
+ const struct kfd2kgd_calls *kfd2kgd;
+
+ if (!kgd2kfd)
+ return;
+
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
@@ -73,25 +85,12 @@ bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev)
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
break;
default:
- return false;
+ dev_info(adev->dev, "kfd not supported on this ASIC\n");
+ return;
}
- return true;
-}
-
-void amdgpu_amdkfd_fini(void)
-{
- if (kgd2kfd) {
- kgd2kfd->exit();
- symbol_put(kgd2kfd_init);
- }
-}
-
-void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
-{
- if (kgd2kfd)
- adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
- adev->pdev, kfd2kgd);
+ adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
+ adev->pdev, kfd2kgd);
}
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
@@ -184,7 +183,8 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
return -ENOMEM;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, 0,
+ &(*mem)->bo);
if (r) {
dev_err(adev->dev,
"failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 73f83a10ae14..8d689ab7e429 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -26,6 +26,7 @@
#define AMDGPU_AMDKFD_H_INCLUDED
#include <linux/types.h>
+#include <linux/mmu_context.h>
#include <kgd_kfd_interface.h>
struct amdgpu_device;
@@ -39,8 +40,6 @@ struct kgd_mem {
int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
-bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev);
-
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev);
int amdgpu_amdkfd_resume(struct amdgpu_device *adev);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
@@ -62,4 +61,19 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+#define read_user_wptr(mmptr, wptr, dst) \
+ ({ \
+ bool valid = false; \
+ if ((mmptr) && (wptr)) { \
+ if ((mmptr) == current->mm) { \
+ valid = !get_user((dst), (wptr)); \
+ } else if (current->mm == NULL) { \
+ use_mm(mmptr); \
+ valid = !get_user((dst), (wptr)); \
+ unuse_mm(mmptr); \
+ } \
+ } \
+ valid; \
+ })
+
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 5254562fd0f9..b9dbbf9cb8b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -39,6 +39,12 @@
#include "gmc/gmc_7_1_sh_mask.h"
#include "cik_structs.h"
+enum hqd_dequeue_request_type {
+ NO_ACTION = 0,
+ DRAIN_PIPE,
+ RESET_WAVES
+};
+
enum {
MAX_TRAPID = 8, /* 3 bits in the bitfield. */
MAX_WATCH_ADDRESSES = 4
@@ -96,12 +102,15 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr);
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr);
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm);
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
@@ -126,6 +135,33 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+static void set_scratch_backing_va(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid);
+
+/* Because of REG_GET_FIELD() being used, we put this function in the
+ * asic specific file.
+ */
+static int get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ config->gb_addr_config = adev->gfx.config.gb_addr_config;
+ config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFBANK);
+ config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFRANKS);
+
+ config->tile_config_ptr = adev->gfx.config.tile_mode_array;
+ config->num_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+ config->macro_tile_config_ptr =
+ adev->gfx.config.macrotile_mode_array;
+ config->num_macro_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+
+ return 0;
+}
static const struct kfd2kgd_calls kfd2kgd = {
.init_gtt_mem_allocation = alloc_gtt_mem,
@@ -150,7 +186,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
.write_vmid_invalidate_request = write_vmid_invalidate_request,
- .get_fw_version = get_fw_version
+ .get_fw_version = get_fw_version,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
@@ -186,7 +224,7 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(kgd, mec, pipe, queue_id, 0);
@@ -290,20 +328,38 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
}
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr)
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t wptr_shadow, is_wptr_shadow_valid;
struct cik_mqd *m;
+ uint32_t *mqd_hqd;
+ uint32_t reg, wptr_val, data;
m = get_mqd(mqd);
- is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
- if (is_wptr_shadow_valid)
- m->cp_hqd_pq_wptr = wptr_shadow;
-
acquire_queue(kgd, pipe_id, queue_id);
- gfx_v7_0_mqd_commit(adev, m);
+
+ /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
+ mqd_hqd = &m->cp_mqd_base_addr_lo;
+
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
+ WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
+
+ /* Copy userspace write pointer value to register.
+ * Activate doorbell logic to monitor subsequent changes.
+ */
+ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
+
+ if (read_user_wptr(mm, wptr, wptr_val))
+ WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
+
+ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+ WREG32(mmCP_HQD_ACTIVE, data);
+
release_queue(kgd);
return 0;
@@ -382,30 +438,99 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
- int timeout = utimeout;
+ enum hqd_dequeue_request_type type;
+ unsigned long flags, end_jiffies;
+ int retry;
acquire_queue(kgd, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
- WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+ switch (reset_type) {
+ case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
+ type = DRAIN_PIPE;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
+ type = RESET_WAVES;
+ break;
+ default:
+ type = DRAIN_PIPE;
+ break;
+ }
+
+ /* Workaround: If IQ timer is active and the wait time is close to or
+ * equal to 0, dequeueing is not safe. Wait until either the wait time
+ * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
+ * cleared before continuing. Also, ensure wait times are set to at
+ * least 0x3.
+ */
+ local_irq_save(flags);
+ preempt_disable();
+ retry = 5000; /* wait for 500 usecs at maximum */
+ while (true) {
+ temp = RREG32(mmCP_HQD_IQ_TIMER);
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
+ pr_debug("HW is processing IQ\n");
+ goto loop;
+ }
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
+ == 3) /* SEM-rearm is safe */
+ break;
+ /* Wait time 3 is safe for CP, but our MMIO read/write
+ * time is close to 1 microsecond, so check for 10 to
+ * leave more buffer room
+ */
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
+ >= 10)
+ break;
+ pr_debug("IQ timer is active\n");
+ } else
+ break;
+loop:
+ if (!retry) {
+ pr_err("CP HQD IQ timer status time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ retry = 1000;
+ while (true) {
+ temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+ if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
+ break;
+ pr_debug("Dequeue request is pending\n");
+ if (!retry) {
+ pr_err("CP HQD dequeue request time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ local_irq_restore(flags);
+ preempt_enable();
+
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
+
+ end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
temp = RREG32(mmCP_HQD_ACTIVE);
- if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
- if (timeout <= 0) {
- pr_err("kfd: cp queue preemption time out.\n");
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("cp queue preemption time out\n");
release_queue(kgd);
return -ETIME;
}
- msleep(20);
- timeout -= 20;
+ usleep_range(500, 1000);
}
release_queue(kgd);
@@ -556,6 +681,16 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
}
+static void set_scratch_backing_va(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ lock_srbm(kgd, 0, 0, 0, vmid);
+ WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
+ unlock_srbm(kgd);
+}
+
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
@@ -566,42 +701,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
switch (type) {
case KGD_ENGINE_PFP:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw->data;
break;
case KGD_ENGINE_ME:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.me_fw->data;
+ adev->gfx.me_fw->data;
break;
case KGD_ENGINE_CE:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw->data;
break;
case KGD_ENGINE_MEC1:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw->data;
break;
case KGD_ENGINE_MEC2:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw->data;
break;
case KGD_ENGINE_RLC:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_fw->data;
break;
case KGD_ENGINE_SDMA1:
hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[0].fw->data;
+ adev->sdma.instance[0].fw->data;
break;
case KGD_ENGINE_SDMA2:
hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[1].fw->data;
+ adev->sdma.instance[1].fw->data;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 133d06671e46..fb6e5dbd5a03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -39,6 +39,12 @@
#include "vi_structs.h"
#include "vid.h"
+enum hqd_dequeue_request_type {
+ NO_ACTION = 0,
+ DRAIN_PIPE,
+ RESET_WAVES
+};
+
struct cik_sdma_rlc_registers;
/*
@@ -55,12 +61,15 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr);
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr);
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm);
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
@@ -85,6 +94,33 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+static void set_scratch_backing_va(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid);
+
+/* Because of REG_GET_FIELD() being used, we put this function in the
+ * asic specific file.
+ */
+static int get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ config->gb_addr_config = adev->gfx.config.gb_addr_config;
+ config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFBANK);
+ config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFRANKS);
+
+ config->tile_config_ptr = adev->gfx.config.tile_mode_array;
+ config->num_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+ config->macro_tile_config_ptr =
+ adev->gfx.config.macrotile_mode_array;
+ config->num_macro_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+
+ return 0;
+}
static const struct kfd2kgd_calls kfd2kgd = {
.init_gtt_mem_allocation = alloc_gtt_mem,
@@ -111,12 +147,15 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_atc_vmid_pasid_mapping_valid =
get_atc_vmid_pasid_mapping_valid,
.write_vmid_invalidate_request = write_vmid_invalidate_request,
- .get_fw_version = get_fw_version
+ .get_fw_version = get_fw_version,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
{
return (struct kfd2kgd_calls *)&kfd2kgd;
+ return (struct kfd2kgd_calls *)&kfd2kgd;
}
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
@@ -147,7 +186,7 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(kgd, mec, pipe, queue_id, 0);
@@ -216,7 +255,7 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
uint32_t mec;
uint32_t pipe;
- mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(kgd, mec, pipe, 0, 0);
@@ -244,20 +283,67 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
}
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr)
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm)
{
- struct vi_mqd *m;
- uint32_t shadow_wptr, valid_wptr;
struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct vi_mqd *m;
+ uint32_t *mqd_hqd;
+ uint32_t reg, wptr_val, data;
m = get_mqd(mqd);
- valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr));
- if (valid_wptr == 0)
- m->cp_hqd_pq_wptr = shadow_wptr;
-
acquire_queue(kgd, pipe_id, queue_id);
- gfx_v8_0_mqd_commit(adev, mqd);
+
+ /* HIQ is set during driver init period with vmid set to 0*/
+ if (m->cp_hqd_vmid == 0) {
+ uint32_t value, mec, pipe;
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+ value = RREG32(mmRLC_CP_SCHEDULERS);
+ value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
+ ((mec << 5) | (pipe << 3) | queue_id | 0x80));
+ WREG32(mmRLC_CP_SCHEDULERS, value);
+ }
+
+ /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
+ mqd_hqd = &m->cp_mqd_base_addr_lo;
+
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++)
+ WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
+
+ /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
+ * This is safe since EOP RPTR==WPTR for any inactive HQD
+ * on ASICs that do not support context-save.
+ * EOP writes/reads can start anywhere in the ring.
+ */
+ if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
+ WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
+ WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
+ WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
+ }
+
+ for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++)
+ WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
+
+ /* Copy userspace write pointer value to register.
+ * Activate doorbell logic to monitor subsequent changes.
+ */
+ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
+
+ if (read_user_wptr(mm, wptr, wptr_val))
+ WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
+
+ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+ WREG32(mmCP_HQD_ACTIVE, data);
+
release_queue(kgd);
return 0;
@@ -308,29 +394,102 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
- int timeout = utimeout;
+ enum hqd_dequeue_request_type type;
+ unsigned long flags, end_jiffies;
+ int retry;
+ struct vi_mqd *m = get_mqd(mqd);
acquire_queue(kgd, pipe_id, queue_id);
- WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+ if (m->cp_hqd_vmid == 0)
+ WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
+
+ switch (reset_type) {
+ case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
+ type = DRAIN_PIPE;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
+ type = RESET_WAVES;
+ break;
+ default:
+ type = DRAIN_PIPE;
+ break;
+ }
+ /* Workaround: If IQ timer is active and the wait time is close to or
+ * equal to 0, dequeueing is not safe. Wait until either the wait time
+ * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
+ * cleared before continuing. Also, ensure wait times are set to at
+ * least 0x3.
+ */
+ local_irq_save(flags);
+ preempt_disable();
+ retry = 5000; /* wait for 500 usecs at maximum */
+ while (true) {
+ temp = RREG32(mmCP_HQD_IQ_TIMER);
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
+ pr_debug("HW is processing IQ\n");
+ goto loop;
+ }
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
+ == 3) /* SEM-rearm is safe */
+ break;
+ /* Wait time 3 is safe for CP, but our MMIO read/write
+ * time is close to 1 microsecond, so check for 10 to
+ * leave more buffer room
+ */
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
+ >= 10)
+ break;
+ pr_debug("IQ timer is active\n");
+ } else
+ break;
+loop:
+ if (!retry) {
+ pr_err("CP HQD IQ timer status time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ retry = 1000;
+ while (true) {
+ temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+ if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
+ break;
+ pr_debug("Dequeue request is pending\n");
+
+ if (!retry) {
+ pr_err("CP HQD dequeue request time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ local_irq_restore(flags);
+ preempt_enable();
+
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
+
+ end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
temp = RREG32(mmCP_HQD_ACTIVE);
- if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
- if (timeout <= 0) {
- pr_err("kfd: cp queue preemption time out.\n");
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("cp queue preemption time out.\n");
release_queue(kgd);
return -ETIME;
}
- msleep(20);
- timeout -= 20;
+ usleep_range(500, 1000);
}
release_queue(kgd);
@@ -444,6 +603,16 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
return 0;
}
+static void set_scratch_backing_va(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ lock_srbm(kgd, 0, 0, 0, vmid);
+ WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
+ unlock_srbm(kgd);
+}
+
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
@@ -454,42 +623,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
switch (type) {
case KGD_ENGINE_PFP:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw->data;
break;
case KGD_ENGINE_ME:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.me_fw->data;
+ adev->gfx.me_fw->data;
break;
case KGD_ENGINE_CE:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw->data;
break;
case KGD_ENGINE_MEC1:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw->data;
break;
case KGD_ENGINE_MEC2:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw->data;
break;
case KGD_ENGINE_RLC:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_fw->data;
break;
case KGD_ENGINE_SDMA1:
hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[0].fw->data;
+ adev->sdma.instance[0].fw->data;
break;
case KGD_ENGINE_SDMA2:
hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[1].fw->data;
+ adev->sdma.instance[1].fw->data;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 2fb299afc12b..63ec1e1bb6aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -81,7 +81,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
n = AMDGPU_BENCHMARK_ITERATIONS;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
- NULL, &sobj);
+ NULL, 0, &sobj);
if (r) {
goto out_cleanup;
}
@@ -94,7 +94,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
goto out_cleanup;
}
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
- NULL, &dobj);
+ NULL, 0, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index d324e1c24028..59089e027f4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -136,7 +136,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
}
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
if (usermm) {
@@ -156,11 +156,11 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = !entry->robj->prime_shared_count;
- if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
+ if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
gds_obj = entry->robj;
- if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
+ if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
gws_obj = entry->robj;
- if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
+ if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
oa_obj = entry->robj;
total_size += amdgpu_bo_size(entry->robj);
@@ -270,7 +270,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_bo_list *args = data;
uint32_t handle = args->in.list_handle;
- const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr;
+ const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);
struct drm_amdgpu_bo_list_entry *info;
struct amdgpu_bo_list *list;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index a99e0bca6812..fd435a96481c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -124,7 +124,7 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
true, domain, flags,
NULL, &placement, NULL,
- &obj);
+ 0, &obj);
if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret);
return ret;
@@ -166,7 +166,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
+ r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains,
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
@@ -659,7 +659,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
if (CGS_UCODE_ID_CP_MEC == type)
- info->image_size = (header->jt_offset) << 2;
+ info->image_size = le32_to_cpu(header->jt_offset) << 2;
info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 33789510e663..269b835571eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -54,7 +54,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
*offset = data->offset;
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
amdgpu_bo_unref(&p->uf_entry.robj);
@@ -90,7 +90,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
/* get chunks */
- chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks);
+ chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
@@ -110,7 +110,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;
- chunk_ptr = (void __user *)(uintptr_t)chunk_array[i];
+ chunk_ptr = u64_to_user_ptr(chunk_array[i]);
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
ret = -EFAULT;
@@ -121,7 +121,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
- cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
+ cdata = u64_to_user_ptr(user_chunk.chunk_data);
p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {
@@ -246,7 +246,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
}
total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
- used_vram = atomic64_read(&adev->vram_usage);
+ used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
spin_lock(&adev->mm_stats.lock);
@@ -292,7 +292,8 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
/* Do the same for visible VRAM if half of it is free */
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
u64 total_vis_vram = adev->mc.visible_vram_size;
- u64 used_vis_vram = atomic64_read(&adev->vram_vis_usage);
+ u64 used_vis_vram =
+ amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
if (used_vis_vram < total_vis_vram) {
u64 free_vis_vram = total_vis_vram - used_vis_vram;
@@ -348,11 +349,11 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
* that.
*/
if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
- domain = bo->prefered_domains;
+ domain = bo->preferred_domains;
else
domain = bo->allowed_domains;
} else {
- domain = bo->prefered_domains;
+ domain = bo->preferred_domains;
}
} else {
domain = bo->allowed_domains;
@@ -673,10 +674,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
error_validate:
- if (r) {
- amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
+ if (r)
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
- }
error_free_pages:
@@ -724,21 +723,18 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
**/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
+ bool backoff)
{
- struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
unsigned i;
- if (!error) {
- amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
-
+ if (!error)
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
parser->fence);
- } else if (backoff) {
+ else if (backoff)
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
- }
for (i = 0; i < parser->num_post_dep_syncobjs; i++)
drm_syncobj_put(parser->post_dep_syncobjs[i]);
@@ -791,7 +787,8 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (amdgpu_sriov_vf(adev)) {
struct dma_fence *f;
- bo_va = vm->csa_bo_va;
+
+ bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
@@ -828,7 +825,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
}
- r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync);
+ r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync);
if (amdgpu_vm_debug && p->bo_list) {
/* Invalidate all BOs to test for userspace bugs */
@@ -1038,7 +1035,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
{
int r;
struct dma_fence *fence;
- r = drm_syncobj_fence_get(p->filp, handle, &fence);
+ r = drm_syncobj_find_fence(p->filp, handle, &fence);
if (r)
return r;
@@ -1437,7 +1434,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
if (fences == NULL)
return -ENOMEM;
- fences_user = (void __user *)(uintptr_t)(wait->in.fences);
+ fences_user = u64_to_user_ptr(wait->in.fences);
if (copy_from_user(fences, fences_user,
sizeof(struct drm_amdgpu_fence) * fence_count)) {
r = -EFAULT;
@@ -1490,7 +1487,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
addr > mapping->last)
continue;
- *bo = lobj->bo_va->bo;
+ *bo = lobj->bo_va->base.bo;
return mapping;
}
@@ -1499,7 +1496,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
addr > mapping->last)
continue;
- *bo = lobj->bo_va->bo;
+ *bo = lobj->bo_va->base.bo;
return mapping;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6279956e92a4..1a459ac63df4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -336,51 +336,16 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->vram_scratch.robj == NULL) {
- r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
- PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->vram_scratch.robj);
- if (r) {
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin(adev->vram_scratch.robj,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->vram_scratch.robj);
- return r;
- }
- r = amdgpu_bo_kmap(adev->vram_scratch.robj,
- (void **)&adev->vram_scratch.ptr);
- if (r)
- amdgpu_bo_unpin(adev->vram_scratch.robj);
- amdgpu_bo_unreserve(adev->vram_scratch.robj);
-
- return r;
+ return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->vram_scratch.robj,
+ &adev->vram_scratch.gpu_addr,
+ (void **)&adev->vram_scratch.ptr);
}
static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->vram_scratch.robj == NULL) {
- return;
- }
- r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
- if (likely(r == 0)) {
- amdgpu_bo_kunmap(adev->vram_scratch.robj);
- amdgpu_bo_unpin(adev->vram_scratch.robj);
- amdgpu_bo_unreserve(adev->vram_scratch.robj);
- }
- amdgpu_bo_unref(&adev->vram_scratch.robj);
+ amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
}
/**
@@ -539,7 +504,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
int r;
if (adev->wb.wb_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
+ /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
&adev->wb.wb_obj, &adev->wb.gpu_addr,
(void **)&adev->wb.wb);
@@ -570,47 +536,10 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
{
unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
- if (offset < adev->wb.num_wb) {
- __set_bit(offset, adev->wb.used);
- *wb = offset;
- return 0;
- } else {
- return -EINVAL;
- }
-}
-/**
- * amdgpu_wb_get_64bit - Allocate a wb entry
- *
- * @adev: amdgpu_device pointer
- * @wb: wb index
- *
- * Allocate a wb slot for use by the driver (all asics).
- * Returns 0 on success or -EINVAL on failure.
- */
-int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
-{
- unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
- adev->wb.num_wb, 0, 2, 7, 0);
- if ((offset + 1) < adev->wb.num_wb) {
+ if (offset < adev->wb.num_wb) {
__set_bit(offset, adev->wb.used);
- __set_bit(offset + 1, adev->wb.used);
- *wb = offset;
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb)
-{
- int i = 0;
- unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
- adev->wb.num_wb, 0, 8, 63, 0);
- if ((offset + 7) < adev->wb.num_wb) {
- for (i = 0; i < 8; i++)
- __set_bit(offset + i, adev->wb.used);
- *wb = offset;
+ *wb = offset * 8; /* convert to dw offset */
return 0;
} else {
return -EINVAL;
@@ -632,39 +561,6 @@ void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
}
/**
- * amdgpu_wb_free_64bit - Free a wb entry
- *
- * @adev: amdgpu_device pointer
- * @wb: wb index
- *
- * Free a wb slot allocated for use by the driver (all asics)
- */
-void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
-{
- if ((wb + 1) < adev->wb.num_wb) {
- __clear_bit(wb, adev->wb.used);
- __clear_bit(wb + 1, adev->wb.used);
- }
-}
-
-/**
- * amdgpu_wb_free_256bit - Free a wb entry
- *
- * @adev: amdgpu_device pointer
- * @wb: wb index
- *
- * Free a wb slot allocated for use by the driver (all asics)
- */
-void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb)
-{
- int i = 0;
-
- if ((wb + 7) < adev->wb.num_wb)
- for (i = 0; i < 8; i++)
- __clear_bit(wb + i, adev->wb.used);
-}
-
-/**
* amdgpu_vram_location - try to find VRAM location
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
@@ -1180,6 +1076,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_gtt_size = -1;
}
+ /* valid range is between 4 and 9 inclusive */
+ if (amdgpu_vm_fragment_size != -1 &&
+ (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
+ dev_warn(adev->dev, "valid range is between 4 and 9\n");
+ amdgpu_vm_fragment_size = -1;
+ }
+
amdgpu_check_vm_size(adev);
amdgpu_check_block_size(adev);
@@ -1948,7 +1851,8 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_DCE,
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,
- AMD_IP_BLOCK_TYPE_VCE,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_IP_BLOCK_TYPE_VCE
};
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index cdf2ab20166a..6ad243293a78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -482,7 +482,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
- drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
+ drm_gem_object_put_unlocked(amdgpu_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(amdgpu_fb);
}
@@ -542,14 +542,14 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 5e9ce8a29669..e39ec981b11c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -68,9 +68,10 @@
* - 3.16.0 - Add reserved vmid support
* - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
* - 3.18.0 - Export gpu always on cu bitmap
+ * - 3.19.0 - Add support for UVD MJPEG decode
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 18
+#define KMS_DRIVER_MINOR 19
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -94,6 +95,7 @@ unsigned amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1;
int amdgpu_deep_color = 0;
int amdgpu_vm_size = -1;
+int amdgpu_vm_fragment_size = -1;
int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
@@ -183,6 +185,9 @@ module_param_named(deep_color, amdgpu_deep_color, int, 0444);
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
module_param_named(vm_size, amdgpu_vm_size, int, 0444);
+MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)");
+module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444);
+
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 0a8ee2411180..9afa9c097e1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -118,7 +118,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
}
static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
@@ -250,7 +250,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start;
info->fix.smem_start = adev->mc.aper_base + tmp;
info->fix.smem_len = amdgpu_bo_size(abo);
- info->screen_base = abo->kptr;
+ info->screen_base = amdgpu_bo_kptr(abo);
info->screen_size = amdgpu_bo_size(abo);
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
@@ -280,7 +280,7 @@ out:
}
if (fb && ret) {
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 5cc4987cd887..94c1e2e8e34c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -144,7 +144,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->gart.robj);
+ NULL, NULL, 0, &adev->gart.robj);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 917ac5e074a0..7171968f261e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -59,7 +59,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
retry:
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
- flags, NULL, NULL, &robj);
+ flags, NULL, NULL, 0, &robj);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -91,7 +91,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, gobj, handle) {
WARN_ONCE(1, "And also active allocations!\n");
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
}
idr_destroy(&file->object_idr);
spin_unlock(&file->table_lock);
@@ -225,9 +225,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_VRAM_CLEARED|
- AMDGPU_GEM_CREATE_SHADOW |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
+ AMDGPU_GEM_CREATE_VRAM_CLEARED))
return -EINVAL;
/* reject invalid gem domains */
@@ -263,7 +261,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r)
return r;
@@ -306,7 +304,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
return r;
bo = gem_to_amdgpu_bo(gobj);
- bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
if (r)
@@ -341,7 +339,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r)
return r;
@@ -355,7 +353,7 @@ unlock_mmap_sem:
up_read(&current->mm->mmap_sem);
release_object:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -374,11 +372,11 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
robj = gem_to_amdgpu_bo(gobj);
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
(robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return -EPERM;
}
*offset_p = amdgpu_bo_mmap_offset(robj);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return 0;
}
@@ -448,7 +446,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
} else
r = ret;
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -491,7 +489,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
unreserve:
amdgpu_bo_unreserve(robj);
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -623,7 +621,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
+ r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
args->map_size);
if (r)
goto error_backoff;
@@ -643,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
+ r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
args->map_size);
if (r)
goto error_backoff;
@@ -664,7 +662,7 @@ error_backoff:
ttm_eu_backoff_reservation(&ticket, &list);
error_unref:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -689,11 +687,11 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
struct drm_amdgpu_gem_create_in info;
- void __user *out = (void __user *)(uintptr_t)args->value;
+ void __user *out = u64_to_user_ptr(args->value);
info.bo_size = robj->gem_base.size;
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
- info.domains = robj->prefered_domains;
+ info.domains = robj->preferred_domains;
info.domain_flags = robj->flags;
amdgpu_bo_unreserve(robj);
if (copy_to_user(out, &info, sizeof(info)))
@@ -711,10 +709,10 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
amdgpu_bo_unreserve(robj);
break;
}
- robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
+ robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU);
- robj->allowed_domains = robj->prefered_domains;
+ robj->allowed_domains = robj->preferred_domains;
if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
@@ -726,7 +724,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
}
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -754,7 +752,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
r = drm_gem_handle_create(file_priv, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 5e6b90c6794f..9e05e257729f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -28,7 +28,7 @@
struct amdgpu_gtt_mgr {
struct drm_mm mm;
spinlock_t lock;
- uint64_t available;
+ atomic64_t available;
};
/**
@@ -54,7 +54,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
size = (adev->mc.gart_size >> PAGE_SHIFT) - start;
drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock);
- mgr->available = p_size;
+ atomic64_set(&mgr->available, p_size);
man->priv = mgr;
return 0;
}
@@ -153,15 +153,6 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
return r;
}
-void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager *man)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_gtt_mgr *mgr = man->priv;
-
- seq_printf(m, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
- man->size, mgr->available, (u64)atomic64_read(&adev->gtt_usage) >> 20);
-
-}
/**
* amdgpu_gtt_mgr_new - allocate a new node
*
@@ -182,11 +173,11 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
int r;
spin_lock(&mgr->lock);
- if (mgr->available < mem->num_pages) {
+ if (atomic64_read(&mgr->available) < mem->num_pages) {
spin_unlock(&mgr->lock);
return 0;
}
- mgr->available -= mem->num_pages;
+ atomic64_sub(mem->num_pages, &mgr->available);
spin_unlock(&mgr->lock);
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -213,9 +204,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
return 0;
err_out:
- spin_lock(&mgr->lock);
- mgr->available += mem->num_pages;
- spin_unlock(&mgr->lock);
+ atomic64_add(mem->num_pages, &mgr->available);
return r;
}
@@ -242,30 +231,47 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
spin_lock(&mgr->lock);
if (node->start != AMDGPU_BO_INVALID_OFFSET)
drm_mm_remove_node(node);
- mgr->available += mem->num_pages;
spin_unlock(&mgr->lock);
+ atomic64_add(mem->num_pages, &mgr->available);
kfree(node);
mem->mm_node = NULL;
}
/**
+ * amdgpu_gtt_mgr_usage - return usage of GTT domain
+ *
+ * @man: TTM memory type manager
+ *
+ * Return how many bytes are used in the GTT domain
+ */
+uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+
+ return (u64)(man->size - atomic64_read(&mgr->available)) * PAGE_SIZE;
+}
+
+/**
* amdgpu_gtt_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
- * @prefix: text prefix
+ * @printer: DRM printer to use
*
* Dump the table content using printk.
*/
static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
+ struct drm_printer *printer)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
- struct drm_printer p = drm_debug_printer(prefix);
spin_lock(&mgr->lock);
- drm_mm_print(&mgr->mm, &p);
+ drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock);
+
+ drm_printf(printer, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
+ man->size, (u64)atomic64_read(&mgr->available),
+ amdgpu_gtt_mgr_usage(man) >> 20);
}
const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 09f833255ba1..e16229000a98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -158,7 +158,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
- amdgpu_amdkfd_load_interface(adev);
amdgpu_amdkfd_device_probe(adev);
amdgpu_amdkfd_device_init(adev);
@@ -456,13 +455,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
- ui64 = atomic64_read(&adev->vram_usage);
+ ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE:
- ui64 = atomic64_read(&adev->vram_vis_usage);
+ ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GTT_USAGE:
- ui64 = atomic64_read(&adev->gtt_usage);
+ ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GDS_CONFIG: {
struct drm_amdgpu_info_gds gds_info;
@@ -498,7 +497,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
mem.vram.total_heap_size = adev->mc.real_vram_size;
mem.vram.usable_heap_size =
adev->mc.real_vram_size - adev->vram_pin_size;
- mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
+ mem.vram.heap_usage =
+ amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
@@ -507,7 +507,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
adev->mc.visible_vram_size -
(adev->vram_pin_size - adev->invisible_pin_size);
mem.cpu_accessible_vram.heap_usage =
- atomic64_read(&adev->vram_vis_usage);
+ amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.cpu_accessible_vram.max_allocation =
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
@@ -515,7 +515,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
mem.gtt.total_heap_size *= PAGE_SIZE;
mem.gtt.usable_heap_size = mem.gtt.total_heap_size
- adev->gart_pin_size;
- mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
+ mem.gtt.heap_usage =
+ amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
return copy_to_user(out, &mem,
@@ -589,11 +590,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
- dev_info.pte_fragment_size =
- (1 << AMDGPU_LOG2_PAGES_PER_FRAG(adev)) *
- AMDGPU_GPU_PAGE_SIZE;
+ dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
-
dev_info.cu_active_number = adev->gfx.cu_info.number;
dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
dev_info.ce_ram_size = adev->gfx.ce_ram_size;
@@ -842,7 +840,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
}
if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_map_static_csa(adev, &fpriv->vm);
+ r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
if (r)
goto out_suspend;
}
@@ -895,8 +893,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
if (amdgpu_sriov_vf(adev)) {
/* TODO: how to handle reserve failure */
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
- amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
- fpriv->vm.csa_bo_va = NULL;
+ amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
+ fpriv->csa_va = NULL;
amdgpu_bo_unreserve(adev->virt.csa_obj);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 38f739fb727b..6558a3ed57a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -359,7 +359,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
head = bo->mn_list.next;
bo->mn = NULL;
- list_del(&bo->mn_list);
+ list_del_init(&bo->mn_list);
if (list_empty(head)) {
struct amdgpu_mn_node *node;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 3ec43cf9ad78..e7e899190bef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -37,55 +37,6 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-
-
-static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
- struct ttm_mem_reg *mem)
-{
- if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
- return 0;
-
- return ((mem->start << PAGE_SHIFT) + mem->size) >
- adev->mc.visible_vram_size ?
- adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
- mem->size;
-}
-
-static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
- struct ttm_mem_reg *old_mem,
- struct ttm_mem_reg *new_mem)
-{
- u64 vis_size;
- if (!adev)
- return;
-
- if (new_mem) {
- switch (new_mem->mem_type) {
- case TTM_PL_TT:
- atomic64_add(new_mem->size, &adev->gtt_usage);
- break;
- case TTM_PL_VRAM:
- atomic64_add(new_mem->size, &adev->vram_usage);
- vis_size = amdgpu_get_vis_part_size(adev, new_mem);
- atomic64_add(vis_size, &adev->vram_vis_usage);
- break;
- }
- }
-
- if (old_mem) {
- switch (old_mem->mem_type) {
- case TTM_PL_TT:
- atomic64_sub(old_mem->size, &adev->gtt_usage);
- break;
- case TTM_PL_VRAM:
- atomic64_sub(old_mem->size, &adev->vram_usage);
- vis_size = amdgpu_get_vis_part_size(adev, old_mem);
- atomic64_sub(vis_size, &adev->vram_vis_usage);
- break;
- }
- }
-}
-
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
@@ -94,7 +45,6 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
bo = container_of(tbo, struct amdgpu_bo, tbo);
amdgpu_bo_kunmap(bo);
- amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
@@ -220,7 +170,7 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
}
/**
- * amdgpu_bo_create_kernel - create BO for kernel use
+ * amdgpu_bo_create_reserved - create reserved BO for kernel use
*
* @adev: amdgpu device object
* @size: size for the new BO
@@ -230,24 +180,30 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
* @gpu_addr: GPU addr of the pinned BO
* @cpu_addr: optional CPU address mapping
*
- * Allocates and pins a BO for kernel internal use.
+ * Allocates and pins a BO for kernel internal use, and returns it still
+ * reserved.
*
* Returns 0 on success, negative error code otherwise.
*/
-int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
- unsigned long size, int align,
- u32 domain, struct amdgpu_bo **bo_ptr,
- u64 *gpu_addr, void **cpu_addr)
+int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr)
{
+ bool free = false;
int r;
- r = amdgpu_bo_create(adev, size, align, true, domain,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, bo_ptr);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
- return r;
+ if (!*bo_ptr) {
+ r = amdgpu_bo_create(adev, size, align, true, domain,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+ NULL, NULL, 0, bo_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
+ r);
+ return r;
+ }
+ free = true;
}
r = amdgpu_bo_reserve(*bo_ptr, false);
@@ -270,20 +226,52 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
}
}
- amdgpu_bo_unreserve(*bo_ptr);
-
return 0;
error_unreserve:
amdgpu_bo_unreserve(*bo_ptr);
error_free:
- amdgpu_bo_unref(bo_ptr);
+ if (free)
+ amdgpu_bo_unref(bo_ptr);
return r;
}
/**
+ * amdgpu_bo_create_kernel - create BO for kernel use
+ *
+ * @adev: amdgpu device object
+ * @size: size for the new BO
+ * @align: alignment for the new BO
+ * @domain: where to place it
+ * @bo_ptr: resulting BO
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Allocates and pins a BO for kernel internal use.
+ *
+ * Returns 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr)
+{
+ int r;
+
+ r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
+ gpu_addr, cpu_addr);
+
+ if (r)
+ return r;
+
+ amdgpu_bo_unreserve(*bo_ptr);
+
+ return 0;
+}
+
+/**
* amdgpu_bo_free_kernel - free BO for kernel use
*
* @bo: amdgpu BO to free
@@ -318,6 +306,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
struct sg_table *sg,
struct ttm_placement *placement,
struct reservation_object *resv,
+ uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
struct amdgpu_bo *bo;
@@ -352,13 +341,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
}
INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
- bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
+ bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU |
AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS |
AMDGPU_GEM_DOMAIN_OA);
- bo->allowed_domains = bo->prefered_domains;
+ bo->allowed_domains = bo->preferred_domains;
if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
@@ -418,7 +407,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
struct dma_fence *fence;
- r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+ r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence);
if (unlikely(r))
goto fail_unreserve;
@@ -470,6 +459,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
AMDGPU_GEM_CREATE_CPU_GTT_USWC,
NULL, &placement,
bo->tbo.resv,
+ 0,
&bo->shadow);
if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo);
@@ -481,11 +471,15 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
return r;
}
+/* init_value will only take effect when flags contains
+ * AMDGPU_GEM_CREATE_VRAM_CLEARED.
+ */
int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct reservation_object *resv,
+ uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
struct ttm_placement placement = {0};
@@ -500,7 +494,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
domain, flags, sg, &placement,
- resv, bo_ptr);
+ resv, init_value, bo_ptr);
if (r)
return r;
@@ -562,7 +556,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
if (bo->pin_count)
return 0;
- domain = bo->prefered_domains;
+ domain = bo->preferred_domains;
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
@@ -609,16 +603,16 @@ err:
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
{
- bool is_iomem;
+ void *kptr;
long r;
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
return -EPERM;
- if (bo->kptr) {
- if (ptr) {
- *ptr = bo->kptr;
- }
+ kptr = amdgpu_bo_kptr(bo);
+ if (kptr) {
+ if (ptr)
+ *ptr = kptr;
return 0;
}
@@ -631,19 +625,23 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
if (r)
return r;
- bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
if (ptr)
- *ptr = bo->kptr;
+ *ptr = amdgpu_bo_kptr(bo);
return 0;
}
+void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
+{
+ bool is_iomem;
+
+ return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+}
+
void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
{
- if (bo->kptr == NULL)
- return;
- bo->kptr = NULL;
- ttm_bo_kunmap(&bo->kmap);
+ if (bo->kmap.bo)
+ ttm_bo_kunmap(&bo->kmap);
}
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
@@ -944,8 +942,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
/* move_notify is called before move happens */
- amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
-
trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 833b172a2c2a..a288fa6d72c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -33,6 +33,61 @@
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
+/* bo virtual addresses in a vm */
+struct amdgpu_bo_va_mapping {
+ struct list_head list;
+ struct rb_node rb;
+ uint64_t start;
+ uint64_t last;
+ uint64_t __subtree_last;
+ uint64_t offset;
+ uint64_t flags;
+};
+
+/* User space allocated BO in a VM */
+struct amdgpu_bo_va {
+ struct amdgpu_vm_bo_base base;
+
+ /* protected by bo being reserved */
+ struct dma_fence *last_pt_update;
+ unsigned ref_count;
+
+ /* mappings for this bo_va */
+ struct list_head invalids;
+ struct list_head valids;
+};
+
+struct amdgpu_bo {
+ /* Protected by tbo.reserved */
+ u32 preferred_domains;
+ u32 allowed_domains;
+ struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ struct ttm_placement placement;
+ struct ttm_buffer_object tbo;
+ struct ttm_bo_kmap_obj kmap;
+ u64 flags;
+ unsigned pin_count;
+ u64 tiling_flags;
+ u64 metadata_flags;
+ void *metadata;
+ u32 metadata_size;
+ unsigned prime_shared_count;
+ /* list of all virtual address to which this bo is associated to */
+ struct list_head va;
+ /* Constant after initialization */
+ struct drm_gem_object gem_base;
+ struct amdgpu_bo *parent;
+ struct amdgpu_bo *shadow;
+
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ struct amdgpu_mn *mn;
+
+ union {
+ struct list_head mn_list;
+ struct list_head shadow_list;
+ };
+};
+
/**
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type
* @mem_type: ttm memory type
@@ -132,6 +187,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct reservation_object *resv,
+ uint64_t init_value,
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
unsigned long size, int byte_align,
@@ -139,7 +195,12 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
struct sg_table *sg,
struct ttm_placement *placement,
struct reservation_object *resv,
+ uint64_t init_value,
struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr);
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
@@ -147,6 +208,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
+void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
void amdgpu_bo_unref(struct amdgpu_bo **bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index c19c4d138751..f21a7716b90e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -30,6 +30,7 @@ struct cg_flag_name
const char *name;
};
+void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 6bdc866570ab..5b3f92891f89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -69,7 +69,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
ww_mutex_lock(&resv->lock, NULL);
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
- AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
+ AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo);
ww_mutex_unlock(&resv->lock);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 15b7149d1204..6c5646b48d1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -184,47 +184,22 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
- if (ring->funcs->support_64bit_ptrs) {
- r = amdgpu_wb_get_64bit(adev, &ring->rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
- return r;
- }
-
- r = amdgpu_wb_get_64bit(adev, &ring->wptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
- return r;
- }
-
- } else {
- r = amdgpu_wb_get(adev, &ring->rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
- return r;
- }
-
- r = amdgpu_wb_get(adev, &ring->wptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
- return r;
- }
-
+ r = amdgpu_wb_get(adev, &ring->rptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
+ return r;
}
- if (amdgpu_sriov_vf(adev) && ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
- r = amdgpu_wb_get_256Bit(adev, &ring->fence_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_wb_get(adev, &ring->wptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
+ return r;
+ }
- } else {
- r = amdgpu_wb_get(adev, &ring->fence_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_wb_get(adev, &ring->fence_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
+ return r;
}
r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
@@ -286,19 +261,15 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
ring->ready = false;
- if (ring->funcs->support_64bit_ptrs) {
- amdgpu_wb_free_64bit(ring->adev, ring->rptr_offs);
- amdgpu_wb_free_64bit(ring->adev, ring->wptr_offs);
- } else {
- amdgpu_wb_free(ring->adev, ring->rptr_offs);
- amdgpu_wb_free(ring->adev, ring->wptr_offs);
- }
+ /* Not to finish a ring which is not initialized */
+ if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
+ return;
+
+ amdgpu_wb_free(ring->adev, ring->rptr_offs);
+ amdgpu_wb_free(ring->adev, ring->wptr_offs);
amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
- if (amdgpu_sriov_vf(ring->adev) && ring->funcs->type == AMDGPU_RING_TYPE_GFX)
- amdgpu_wb_free_256bit(ring->adev, ring->fence_offs);
- else
- amdgpu_wb_free(ring->adev, ring->fence_offs);
+ amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_bo_free_kernel(&ring->ring_obj,
&ring->gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 5ca75a456ad2..3144400435b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -64,7 +64,7 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&sa_manager->flist[i]);
r = amdgpu_bo_create(adev, size, align, true, domain,
- 0, NULL, NULL, &sa_manager->bo);
+ 0, NULL, NULL, 0, &sa_manager->bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a6899180b265..c586f44312f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct dma_fence *f = e->fence;
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ if (dma_fence_is_signaled(f)) {
+ hash_del(&e->node);
+ dma_fence_put(f);
+ kmem_cache_free(amdgpu_sync_slab, e);
+ continue;
+ }
if (ring && s_fence) {
/* For fences from the same ring it is sufficient
* when they are scheduled.
@@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
}
}
- if (dma_fence_is_signaled(f)) {
- hash_del(&e->node);
- dma_fence_put(f);
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
return f;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 3c4d7574d704..ed8c3739015b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -61,7 +61,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM, 0,
- NULL, NULL, &vram_obj);
+ NULL, NULL, 0, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -82,7 +82,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
- NULL, gtt_obj + i);
+ NULL, 0, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 509f7a63d40c..1c88bd5e29ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -14,6 +14,62 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
+TRACE_EVENT(amdgpu_ttm_tt_populate,
+ TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
+ TP_ARGS(adev, dma_address, phys_address),
+ TP_STRUCT__entry(
+ __field(uint16_t, domain)
+ __field(uint8_t, bus)
+ __field(uint8_t, slot)
+ __field(uint8_t, func)
+ __field(uint64_t, dma)
+ __field(uint64_t, phys)
+ ),
+ TP_fast_assign(
+ __entry->domain = pci_domain_nr(adev->pdev->bus);
+ __entry->bus = adev->pdev->bus->number;
+ __entry->slot = PCI_SLOT(adev->pdev->devfn);
+ __entry->func = PCI_FUNC(adev->pdev->devfn);
+ __entry->dma = dma_address;
+ __entry->phys = phys_address;
+ ),
+ TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
+ (unsigned)__entry->domain,
+ (unsigned)__entry->bus,
+ (unsigned)__entry->slot,
+ (unsigned)__entry->func,
+ (unsigned long long)__entry->dma,
+ (unsigned long long)__entry->phys)
+);
+
+TRACE_EVENT(amdgpu_ttm_tt_unpopulate,
+ TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
+ TP_ARGS(adev, dma_address, phys_address),
+ TP_STRUCT__entry(
+ __field(uint16_t, domain)
+ __field(uint8_t, bus)
+ __field(uint8_t, slot)
+ __field(uint8_t, func)
+ __field(uint64_t, dma)
+ __field(uint64_t, phys)
+ ),
+ TP_fast_assign(
+ __entry->domain = pci_domain_nr(adev->pdev->bus);
+ __entry->bus = adev->pdev->bus->number;
+ __entry->slot = PCI_SLOT(adev->pdev->devfn);
+ __entry->func = PCI_FUNC(adev->pdev->devfn);
+ __entry->dma = dma_address;
+ __entry->phys = phys_address;
+ ),
+ TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
+ (unsigned)__entry->domain,
+ (unsigned)__entry->bus,
+ (unsigned)__entry->slot,
+ (unsigned)__entry->func,
+ (unsigned long long)__entry->dma,
+ (unsigned long long)__entry->phys)
+);
+
TRACE_EVENT(amdgpu_mm_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
@@ -105,12 +161,12 @@ TRACE_EVENT(amdgpu_bo_create,
__entry->bo = bo;
__entry->pages = bo->tbo.num_pages;
__entry->type = bo->tbo.mem.mem_type;
- __entry->prefer = bo->prefered_domains;
+ __entry->prefer = bo->preferred_domains;
__entry->allow = bo->allowed_domains;
__entry->visible = bo->flags;
),
- TP_printk("bo=%p, pages=%u, type=%d, prefered=%d, allowed=%d, visible=%d",
+ TP_printk("bo=%p, pages=%u, type=%d, preferred=%d, allowed=%d, visible=%d",
__entry->bo, __entry->pages, __entry->type,
__entry->prefer, __entry->allow, __entry->visible)
);
@@ -228,7 +284,7 @@ TRACE_EVENT(amdgpu_vm_bo_map,
),
TP_fast_assign(
- __entry->bo = bo_va ? bo_va->bo : NULL;
+ __entry->bo = bo_va ? bo_va->base.bo : NULL;
__entry->start = mapping->start;
__entry->last = mapping->last;
__entry->offset = mapping->offset;
@@ -252,7 +308,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
),
TP_fast_assign(
- __entry->bo = bo_va->bo;
+ __entry->bo = bo_va->base.bo;
__entry->start = mapping->start;
__entry->last = mapping->last;
__entry->offset = mapping->offset;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index e6f9a54c959d..8b2c294f6f79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -43,6 +43,7 @@
#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include "amdgpu.h"
+#include "amdgpu_trace.h"
#include "bif/bif_4_1_d.h"
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
@@ -662,6 +663,38 @@ release_pages:
return r;
}
+static void amdgpu_trace_dma_map(struct ttm_tt *ttm)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+
+ if (unlikely(trace_amdgpu_ttm_tt_populate_enabled())) {
+ for (i = 0; i < ttm->num_pages; i++) {
+ trace_amdgpu_ttm_tt_populate(
+ adev,
+ gtt->ttm.dma_address[i],
+ page_to_phys(ttm->pages[i]));
+ }
+ }
+}
+
+static void amdgpu_trace_dma_unmap(struct ttm_tt *ttm)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+
+ if (unlikely(trace_amdgpu_ttm_tt_unpopulate_enabled())) {
+ for (i = 0; i < ttm->num_pages; i++) {
+ trace_amdgpu_ttm_tt_unpopulate(
+ adev,
+ gtt->ttm.dma_address[i],
+ page_to_phys(ttm->pages[i]));
+ }
+ }
+}
+
/* prepare the sg table with the user pages */
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
{
@@ -688,6 +721,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
+ amdgpu_trace_dma_map(ttm);
+
return 0;
release_sg:
@@ -721,6 +756,8 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
put_page(page);
}
+ amdgpu_trace_dma_unmap(ttm);
+
sg_free_table(ttm->sg);
}
@@ -753,7 +790,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct amdgpu_ttm_tt *gtt = (void*)ttm;
- int r;
+ int r = 0;
if (gtt->userptr) {
r = amdgpu_ttm_tt_pin_userptr(ttm);
@@ -892,7 +929,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
int r;
@@ -915,14 +952,14 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
ttm->state = tt_unbound;
- return 0;
+ r = 0;
+ goto trace_mappings;
}
- adev = amdgpu_ttm_adev(ttm->bdev);
-
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
- return ttm_dma_populate(&gtt->ttm, adev->dev);
+ r = ttm_dma_populate(&gtt->ttm, adev->dev);
+ goto trace_mappings;
}
#endif
@@ -945,7 +982,12 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
return -EFAULT;
}
}
- return 0;
+
+ r = 0;
+trace_mappings:
+ if (likely(!r))
+ amdgpu_trace_dma_map(ttm);
+ return r;
}
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
@@ -966,6 +1008,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
adev = amdgpu_ttm_adev(ttm->bdev);
+ amdgpu_trace_dma_unmap(ttm);
+
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
ttm_dma_unpopulate(&gtt->ttm, adev->dev);
@@ -1232,23 +1276,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
- r = amdgpu_bo_create(adev, adev->mc.stolen_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->stollen_vga_memory);
- if (r) {
- return r;
- }
- r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
+ r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->stolen_vga_memory,
+ NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL);
- amdgpu_bo_unreserve(adev->stollen_vga_memory);
- if (r) {
- amdgpu_bo_unref(&adev->stollen_vga_memory);
- return r;
- }
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
@@ -1319,13 +1352,13 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
if (!adev->mman.initialized)
return;
amdgpu_ttm_debugfs_fini(adev);
- if (adev->stollen_vga_memory) {
- r = amdgpu_bo_reserve(adev->stollen_vga_memory, true);
+ if (adev->stolen_vga_memory) {
+ r = amdgpu_bo_reserve(adev->stolen_vga_memory, true);
if (r == 0) {
- amdgpu_bo_unpin(adev->stollen_vga_memory);
- amdgpu_bo_unreserve(adev->stollen_vga_memory);
+ amdgpu_bo_unpin(adev->stolen_vga_memory);
+ amdgpu_bo_unreserve(adev->stolen_vga_memory);
}
- amdgpu_bo_unref(&adev->stollen_vga_memory);
+ amdgpu_bo_unref(&adev->stolen_vga_memory);
}
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
@@ -1509,11 +1542,12 @@ error_free:
}
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint32_t src_data,
+ uint64_t src_data,
struct reservation_object *resv,
struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ /* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/
uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -1545,7 +1579,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
num_pages -= mm_node->size;
++mm_node;
}
- num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
+
+ /* 10 double words for each SDMA_OP_PTEPDE cmd */
+ num_dw = num_loops * 10;
/* for IB padding */
num_dw += 64;
@@ -1570,12 +1606,16 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t byte_count = mm_node->size << PAGE_SHIFT;
uint64_t dst_addr;
+ WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8");
+
dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
while (byte_count) {
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
- amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
- dst_addr, cur_size_in_bytes);
+ amdgpu_vm_set_pte_pde(adev, &job->ibs[0],
+ dst_addr, 0,
+ cur_size_in_bytes >> 3, 0,
+ src_data);
dst_addr += cur_size_in_bytes;
byte_count -= cur_size_in_bytes;
@@ -1601,32 +1641,16 @@ error_free:
#if defined(CONFIG_DEBUG_FS)
-extern void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager
- *man);
static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
unsigned ttm_pl = *(int *)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv;
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
+ struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&glob->lru_lock);
- drm_mm_print(mm, &p);
- spin_unlock(&glob->lru_lock);
- switch (ttm_pl) {
- case TTM_PL_VRAM:
- seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
- adev->mman.bdev.man[ttm_pl].size,
- (u64)atomic64_read(&adev->vram_usage) >> 20,
- (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
- break;
- case TTM_PL_TT:
- amdgpu_gtt_mgr_print(m, &adev->mman.bdev.man[TTM_PL_TT]);
- break;
- }
+ man->func->debug(man, &p);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index f137c2458ee8..f22a4758719d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -66,6 +66,10 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
struct ttm_mem_reg *mem);
+uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
+
+uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
+uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
@@ -73,7 +77,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint32_t src_data,
+ uint64_t src_data,
struct reservation_object *resv,
struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index fcfb9d4f7477..36c763310df5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -358,8 +358,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
(le32_to_cpu(header->jt_offset) * 4);
memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
- ucode->ucode_size += le32_to_cpu(header->jt_size) * 4;
-
return 0;
}
@@ -381,7 +379,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, bo);
+ NULL, NULL, 0, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
goto failed;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2ca09f111f08..e19928dae8e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -588,6 +588,10 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
}
break;
+ case 8: /* MJPEG */
+ min_dpb_size = 0;
+ break;
+
case 16: /* H265 */
image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
image_size = ALIGN(image_size, 256);
@@ -1051,7 +1055,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &bo);
+ NULL, NULL, 0, &bo);
if (r)
return r;
@@ -1101,7 +1105,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &bo);
+ NULL, NULL, 0, &bo);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b692ad402252..c855366521ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -937,9 +937,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r, timeout = adev->usec_timeout;
- /* workaround VCE ring test slow issue for sriov*/
+ /* skip ring test for sriov*/
if (amdgpu_sriov_vf(adev))
- timeout *= 10;
+ return 0;
r = amdgpu_ring_alloc(ring, 16);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 09190fadd228..041e0121590c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -209,9 +209,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
if (fences == 0) {
if (adev->pm.dpm_enabled) {
+ /* might be used when with pg/cg
amdgpu_dpm_enable_uvd(adev, false);
- } else {
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+ */
}
} else {
schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
@@ -223,12 +223,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
- if (set_clocks) {
- if (adev->pm.dpm_enabled) {
- amdgpu_dpm_enable_uvd(adev, true);
- } else {
- amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
- }
+ if (set_clocks && adev->pm.dpm_enabled) {
+ /* might be used when with pg/cg
+ amdgpu_dpm_enable_uvd(adev, true);
+ */
}
}
@@ -361,7 +359,7 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &bo);
+ NULL, NULL, 0, &bo);
if (r)
return r;
@@ -413,7 +411,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &bo);
+ NULL, NULL, 0, &bo);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 8a081e162d13..ab05121b9272 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -46,14 +46,14 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
* address within META_DATA init package to support SRIOV gfx preemption.
*/
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_va **bo_va)
{
- int r;
- struct amdgpu_bo_va *bo_va;
struct ww_acquire_ctx ticket;
struct list_head list;
struct amdgpu_bo_list_entry pd;
struct ttm_validate_buffer csa_tv;
+ int r;
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&csa_tv.head);
@@ -69,34 +69,33 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
return r;
}
- bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
- if (!bo_va) {
+ *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
+ if (!*bo_va) {
ttm_eu_backoff_reservation(&ticket, &list);
DRM_ERROR("failed to create bo_va for static CSA\n");
return -ENOMEM;
}
- r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR,
- AMDGPU_CSA_SIZE);
+ r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
+ AMDGPU_CSA_SIZE);
if (r) {
DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, bo_va);
+ amdgpu_vm_bo_rmv(adev, *bo_va);
ttm_eu_backoff_reservation(&ticket, &list);
return r;
}
- r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE,
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
+ r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
+ AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+ AMDGPU_PTE_EXECUTABLE);
if (r) {
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, bo_va);
+ amdgpu_vm_bo_rmv(adev, *bo_va);
ttm_eu_backoff_reservation(&ticket, &list);
return r;
}
- vm->csa_bo_va = bo_va;
ttm_eu_backoff_reservation(&ticket, &list);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index e5b1baf387c1..afcfb8bcfb65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -90,7 +90,8 @@ static inline bool is_virtual_machine(void)
struct amdgpu_vm;
int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_va **bo_va);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 250c8e80e646..6b1343e5541d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -159,11 +159,20 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
*/
static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
int (*validate)(void *, struct amdgpu_bo *),
- void *param, bool use_cpu_for_update)
+ void *param, bool use_cpu_for_update,
+ struct ttm_bo_global *glob)
{
unsigned i;
int r;
+ if (parent->bo->shadow) {
+ struct amdgpu_bo *shadow = parent->bo->shadow;
+
+ r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+ if (r)
+ return r;
+ }
+
if (use_cpu_for_update) {
r = amdgpu_bo_kmap(parent->bo, NULL);
if (r)
@@ -183,12 +192,18 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
if (r)
return r;
+ spin_lock(&glob->lru_lock);
+ ttm_bo_move_to_lru_tail(&entry->bo->tbo);
+ if (entry->bo->shadow)
+ ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo);
+ spin_unlock(&glob->lru_lock);
+
/*
* Recurse into the sub directory. This is harmless because we
* have only a maximum of 5 layers.
*/
r = amdgpu_vm_validate_level(entry, validate, param,
- use_cpu_for_update);
+ use_cpu_for_update, glob);
if (r)
return r;
}
@@ -220,54 +235,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return 0;
return amdgpu_vm_validate_level(&vm->root, validate, param,
- vm->use_cpu_for_update);
+ vm->use_cpu_for_update,
+ adev->mman.bdev.glob);
}
/**
- * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail
- *
- * @adev: amdgpu device instance
- * @vm: vm providing the BOs
- *
- * Move the PT BOs to the tail of the LRU.
- */
-static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent)
-{
- unsigned i;
-
- if (!parent->entries)
- return;
-
- for (i = 0; i <= parent->last_entry_used; ++i) {
- struct amdgpu_vm_pt *entry = &parent->entries[i];
-
- if (!entry->bo)
- continue;
-
- ttm_bo_move_to_lru_tail(&entry->bo->tbo);
- amdgpu_vm_move_level_in_lru(entry);
- }
-}
-
-/**
- * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
- *
- * @adev: amdgpu device instance
- * @vm: vm providing the BOs
- *
- * Move the PT BOs to the tail of the LRU.
- */
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
-{
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
-
- spin_lock(&glob->lru_lock);
- amdgpu_vm_move_level_in_lru(&vm->root);
- spin_unlock(&glob->lru_lock);
-}
-
- /**
* amdgpu_vm_alloc_levels - allocate the PD/PT levels
*
* @adev: amdgpu_device pointer
@@ -288,6 +260,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
unsigned pt_idx, from, to;
int r;
u64 flags;
+ uint64_t init_value = 0;
if (!parent->entries) {
unsigned num_entries = amdgpu_vm_num_entries(adev, level);
@@ -321,6 +294,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_SHADOW);
+ if (vm->pte_support_ats) {
+ init_value = AMDGPU_PTE_SYSTEM;
+ if (level != adev->vm_manager.num_level - 1)
+ init_value |= AMDGPU_PDE_PTE;
+ }
+
/* walk over the address space and allocate the page tables */
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
struct reservation_object *resv = vm->root.bo->tbo.resv;
@@ -333,7 +312,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
flags,
- NULL, resv, &pt);
+ NULL, resv, init_value, &pt);
if (r)
return r;
@@ -352,7 +331,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
entry->bo = pt;
entry->addr = 0;
- entry->huge_page = false;
}
if (level < adev->vm_manager.num_level) {
@@ -892,8 +870,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
{
struct amdgpu_bo_va *bo_va;
- list_for_each_entry(bo_va, &bo->va, bo_list) {
- if (bo_va->vm == vm) {
+ list_for_each_entry(bo_va, &bo->va, base.bo_list) {
+ if (bo_va->base.vm == vm) {
return bo_va;
}
}
@@ -1060,18 +1038,13 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
shadow = parent->bo->shadow;
if (vm->use_cpu_for_update) {
- pd_addr = (unsigned long)parent->bo->kptr;
+ pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
if (unlikely(r))
return r;
params.func = amdgpu_vm_cpu_set_ptes;
} else {
- if (shadow) {
- r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
- if (r)
- return r;
- }
ring = container_of(vm->entity.sched, struct amdgpu_ring,
sched);
@@ -1107,22 +1080,14 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
if (bo == NULL)
continue;
- if (bo->shadow) {
- struct amdgpu_bo *pt_shadow = bo->shadow;
-
- r = amdgpu_ttm_bind(&pt_shadow->tbo,
- &pt_shadow->tbo.mem);
- if (r)
- return r;
- }
-
pt = amdgpu_bo_gpu_offset(bo);
pt = amdgpu_gart_get_vm_pde(adev, pt);
- if (parent->entries[pt_idx].addr == pt ||
- parent->entries[pt_idx].huge_page)
+ /* Don't update huge pages here */
+ if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) ||
+ parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID))
continue;
- parent->entries[pt_idx].addr = pt;
+ parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
@@ -1300,15 +1265,14 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
*
* Check if we can update the PD with a huge page.
*/
-static int amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
- struct amdgpu_vm_pt *entry,
- struct amdgpu_vm_pt *parent,
- unsigned nptes, uint64_t dst,
- uint64_t flags)
+static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
+ struct amdgpu_vm_pt *entry,
+ struct amdgpu_vm_pt *parent,
+ unsigned nptes, uint64_t dst,
+ uint64_t flags)
{
bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
uint64_t pd_addr, pde;
- int r;
/* In the case of a mixed PT the PDE must point to it*/
if (p->adev->asic_type < CHIP_VEGA10 ||
@@ -1320,21 +1284,17 @@ static int amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
dst = amdgpu_gart_get_vm_pde(p->adev, dst);
flags = AMDGPU_PTE_VALID;
} else {
+ /* Set the huge page flag to stop scanning at this PDE */
flags |= AMDGPU_PDE_PTE;
}
- if (entry->addr == dst &&
- entry->huge_page == !!(flags & AMDGPU_PDE_PTE))
- return 0;
+ if (entry->addr == (dst | flags))
+ return;
- entry->addr = dst;
- entry->huge_page = !!(flags & AMDGPU_PDE_PTE);
+ entry->addr = (dst | flags);
if (use_cpu_update) {
- r = amdgpu_bo_kmap(parent->bo, (void *)&pd_addr);
- if (r)
- return r;
-
+ pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
pde = pd_addr + (entry - parent->entries) * 8;
amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
} else {
@@ -1347,8 +1307,6 @@ static int amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
pde = pd_addr + (entry - parent->entries) * 8;
amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
}
-
- return 0;
}
/**
@@ -1375,7 +1333,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
struct amdgpu_bo *pt;
unsigned nptes;
bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
- int r;
/* walk over the address space and update the page tables */
for (addr = start; addr < end; addr += nptes,
@@ -1391,17 +1348,15 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
else
nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
- r = amdgpu_vm_handle_huge_pages(params, entry, parent,
- nptes, dst, flags);
- if (r)
- return r;
-
- if (entry->huge_page)
+ amdgpu_vm_handle_huge_pages(params, entry, parent,
+ nptes, dst, flags);
+ /* We don't need to update PTEs for huge pages */
+ if (entry->addr & AMDGPU_PDE_PTE)
continue;
pt = entry->bo;
if (use_cpu_update) {
- pe_start = (unsigned long)pt->kptr;
+ pe_start = (unsigned long)amdgpu_bo_kptr(pt);
} else {
if (pt->shadow) {
pe_start = amdgpu_bo_gpu_offset(pt->shadow);
@@ -1455,9 +1410,7 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* Userspace can support this by aligning virtual base address and
* allocation size to the fragment size.
*/
-
- /* SI and newer are optimized for 64KB */
- unsigned pages_per_frag = AMDGPU_LOG2_PAGES_PER_FRAG(params->adev);
+ unsigned pages_per_frag = params->adev->vm_manager.fragment_size;
uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag);
uint64_t frag_align = 1 << pages_per_frag;
@@ -1771,7 +1724,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear)
{
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
uint64_t gtt_flags, flags;
@@ -1780,27 +1734,27 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct dma_fence *exclusive;
int r;
- if (clear || !bo_va->bo) {
+ if (clear || !bo_va->base.bo) {
mem = NULL;
nodes = NULL;
exclusive = NULL;
} else {
struct ttm_dma_tt *ttm;
- mem = &bo_va->bo->tbo.mem;
+ mem = &bo_va->base.bo->tbo.mem;
nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) {
- ttm = container_of(bo_va->bo->tbo.ttm, struct
- ttm_dma_tt, ttm);
+ ttm = container_of(bo_va->base.bo->tbo.ttm,
+ struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
+ exclusive = reservation_object_get_excl(bo->tbo.resv);
}
- if (bo_va->bo) {
- flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
- gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
- adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
+ if (bo) {
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
+ gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) &&
+ adev == amdgpu_ttm_adev(bo->tbo.bdev)) ?
flags : 0;
} else {
flags = 0x0;
@@ -1808,7 +1762,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
}
spin_lock(&vm->status_lock);
- if (!list_empty(&bo_va->vm_status))
+ if (!list_empty(&bo_va->base.vm_status))
list_splice_init(&bo_va->valids, &bo_va->invalids);
spin_unlock(&vm->status_lock);
@@ -1831,9 +1785,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_lock(&vm->status_lock);
list_splice_init(&bo_va->invalids, &bo_va->valids);
- list_del_init(&bo_va->vm_status);
+ list_del_init(&bo_va->base.vm_status);
if (clear)
- list_add(&bo_va->vm_status, &vm->cleared);
+ list_add(&bo_va->base.vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
if (vm->use_cpu_for_update) {
@@ -1995,15 +1949,19 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
struct dma_fence *f = NULL;
int r;
+ uint64_t init_pte_value = 0;
while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
+ if (vm->pte_support_ats)
+ init_pte_value = AMDGPU_PTE_SYSTEM;
+
r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
mapping->start, mapping->last,
- 0, 0, &f);
+ init_pte_value, 0, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
@@ -2023,26 +1981,26 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
+ * amdgpu_vm_clear_moved - clear moved BOs in the PT
*
* @adev: amdgpu_device pointer
* @vm: requested vm
*
- * Make sure all invalidated BOs are cleared in the PT.
+ * Make sure all moved BOs are cleared in the PT.
* Returns 0 for success.
*
* PTs have to be reserved and mutex must be locked!
*/
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, struct amdgpu_sync *sync)
+int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = NULL;
int r = 0;
spin_lock(&vm->status_lock);
- while (!list_empty(&vm->invalidated)) {
- bo_va = list_first_entry(&vm->invalidated,
- struct amdgpu_bo_va, vm_status);
+ while (!list_empty(&vm->moved)) {
+ bo_va = list_first_entry(&vm->moved,
+ struct amdgpu_bo_va, base.vm_status);
spin_unlock(&vm->status_lock);
r = amdgpu_vm_bo_update(adev, bo_va, true);
@@ -2082,16 +2040,17 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
if (bo_va == NULL) {
return NULL;
}
- bo_va->vm = vm;
- bo_va->bo = bo;
+ bo_va->base.vm = vm;
+ bo_va->base.bo = bo;
+ INIT_LIST_HEAD(&bo_va->base.bo_list);
+ INIT_LIST_HEAD(&bo_va->base.vm_status);
+
bo_va->ref_count = 1;
- INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
- INIT_LIST_HEAD(&bo_va->vm_status);
if (bo)
- list_add_tail(&bo_va->bo_list, &bo->va);
+ list_add_tail(&bo_va->base.bo_list, &bo->va);
return bo_va;
}
@@ -2116,7 +2075,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t size, uint64_t flags)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
/* validate the parameters */
@@ -2127,7 +2087,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
- (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
+ (bo && offset + size > amdgpu_bo_size(bo)))
return -EINVAL;
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2137,7 +2097,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (tmp) {
/* bo and tmp overlap, invalid addr */
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
- "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
+ "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
tmp->start, tmp->last + 1);
return -EINVAL;
}
@@ -2182,7 +2142,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t size, uint64_t flags)
{
struct amdgpu_bo_va_mapping *mapping;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
int r;
@@ -2194,7 +2155,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
- (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
+ (bo && offset + size > amdgpu_bo_size(bo)))
return -EINVAL;
/* Allocate all the needed memory */
@@ -2202,7 +2163,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
if (!mapping)
return -ENOMEM;
- r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
+ r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
if (r) {
kfree(mapping);
return r;
@@ -2242,7 +2203,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
uint64_t saddr)
{
struct amdgpu_bo_va_mapping *mapping;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_vm *vm = bo_va->base.vm;
bool valid = true;
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2390,12 +2351,12 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va)
{
struct amdgpu_bo_va_mapping *mapping, *next;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_vm *vm = bo_va->base.vm;
- list_del(&bo_va->bo_list);
+ list_del(&bo_va->base.bo_list);
spin_lock(&vm->status_lock);
- list_del(&bo_va->vm_status);
+ list_del(&bo_va->base.vm_status);
spin_unlock(&vm->status_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
@@ -2427,13 +2388,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo)
{
- struct amdgpu_bo_va *bo_va;
+ struct amdgpu_vm_bo_base *bo_base;
- list_for_each_entry(bo_va, &bo->va, bo_list) {
- spin_lock(&bo_va->vm->status_lock);
- if (list_empty(&bo_va->vm_status))
- list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
- spin_unlock(&bo_va->vm->status_lock);
+ list_for_each_entry(bo_base, &bo->va, bo_list) {
+ spin_lock(&bo_base->vm->status_lock);
+ if (list_empty(&bo_base->vm_status))
+ list_add(&bo_base->vm_status,
+ &bo_base->vm->moved);
+ spin_unlock(&bo_base->vm->status_lock);
}
}
@@ -2451,12 +2413,26 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
}
/**
- * amdgpu_vm_adjust_size - adjust vm size and block size
+ * amdgpu_vm_set_fragment_size - adjust fragment size in PTE
+ *
+ * @adev: amdgpu_device pointer
+ * @fragment_size_default: the default fragment size if it's set auto
+ */
+void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default)
+{
+ if (amdgpu_vm_fragment_size == -1)
+ adev->vm_manager.fragment_size = fragment_size_default;
+ else
+ adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
+}
+
+/**
+ * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
*
* @adev: amdgpu_device pointer
* @vm_size: the default vm size if it's set auto
*/
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default)
{
/* adjust vm size firstly */
if (amdgpu_vm_size == -1)
@@ -2471,8 +2447,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
else
adev->vm_manager.block_size = amdgpu_vm_block_size;
- DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
- adev->vm_manager.vm_size, adev->vm_manager.block_size);
+ amdgpu_vm_set_fragment_size(adev, fragment_size_default);
+
+ DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n",
+ adev->vm_manager.vm_size, adev->vm_manager.block_size,
+ adev->vm_manager.fragment_size);
}
/**
@@ -2494,13 +2473,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amd_sched_rq *rq;
int r, i;
u64 flags;
+ uint64_t init_pde_value = 0;
vm->va = RB_ROOT;
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
spin_lock_init(&vm->status_lock);
- INIT_LIST_HEAD(&vm->invalidated);
+ INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
@@ -2515,10 +2495,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
return r;
- if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
+ vm->pte_support_ats = false;
+
+ if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
- else
+
+ if (adev->asic_type == CHIP_RAVEN) {
+ vm->pte_support_ats = true;
+ init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE;
+ }
+ } else
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_GFX);
DRM_DEBUG_DRIVER("VM update mode is %s\n",
@@ -2538,7 +2525,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
AMDGPU_GEM_DOMAIN_VRAM,
flags,
- NULL, NULL, &vm->root.bo);
+ NULL, NULL, init_pde_value, &vm->root.bo);
if (r)
goto error_free_sched_entity;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 34d9174ebff2..ba6691b58ee7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -50,11 +50,6 @@ struct amdgpu_bo_list_entry;
/* PTBs (Page Table Blocks) need to be aligned to 32K */
#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
-/* LOG2 number of continuous pages for the fragment field */
-#define AMDGPU_LOG2_PAGES_PER_FRAG(adev) \
- ((adev)->asic_type < CHIP_VEGA10 ? 4 : \
- (adev)->vm_manager.block_size)
-
#define AMDGPU_PTE_VALID (1ULL << 0)
#define AMDGPU_PTE_SYSTEM (1ULL << 1)
#define AMDGPU_PTE_SNOOPED (1ULL << 2)
@@ -99,11 +94,22 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
+/* base structure for tracking BO usage in a VM */
+struct amdgpu_vm_bo_base {
+ /* constant after initialization */
+ struct amdgpu_vm *vm;
+ struct amdgpu_bo *bo;
+
+ /* protected by bo being reserved */
+ struct list_head bo_list;
+
+ /* protected by spinlock */
+ struct list_head vm_status;
+};
struct amdgpu_vm_pt {
struct amdgpu_bo *bo;
uint64_t addr;
- bool huge_page;
/* array of page tables, one for each directory entry */
struct amdgpu_vm_pt *entries;
@@ -118,7 +124,7 @@ struct amdgpu_vm {
spinlock_t status_lock;
/* BOs moved, but not yet updated in the PT */
- struct list_head invalidated;
+ struct list_head moved;
/* BOs cleared in the PT because of a move */
struct list_head cleared;
@@ -141,11 +147,12 @@ struct amdgpu_vm {
u64 client_id;
/* dedicated to vm */
struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
- /* each VM will map on CSA */
- struct amdgpu_bo_va *csa_bo_va;
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
bool use_cpu_for_update;
+
+ /* Flag to indicate ATS support from PTE for GFX9 */
+ bool pte_support_ats;
};
struct amdgpu_vm_id {
@@ -188,6 +195,7 @@ struct amdgpu_vm_manager {
uint32_t num_level;
uint64_t vm_size;
uint32_t block_size;
+ uint32_t fragment_size;
/* vram base address for page table entry */
u64 vram_base_offset;
/* vm pte handling */
@@ -220,8 +228,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size);
@@ -237,8 +243,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync);
+int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_sync *sync);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
@@ -265,7 +271,10 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
uint64_t saddr, uint64_t size);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
+void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
+ uint32_t fragment_size_default);
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size,
+ uint32_t fragment_size_default);
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index a2c59a08b2bd..26e900627971 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -28,6 +28,8 @@
struct amdgpu_vram_mgr {
struct drm_mm mm;
spinlock_t lock;
+ atomic64_t usage;
+ atomic64_t vis_usage;
};
/**
@@ -79,6 +81,27 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
}
/**
+ * amdgpu_vram_mgr_vis_size - Calculate visible node size
+ *
+ * @adev: amdgpu device structure
+ * @node: MM node structure
+ *
+ * Calculate how many bytes of the MM node are inside visible VRAM
+ */
+static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
+ struct drm_mm_node *node)
+{
+ uint64_t start = node->start << PAGE_SHIFT;
+ uint64_t end = (node->size + node->start) << PAGE_SHIFT;
+
+ if (start >= adev->mc.visible_vram_size)
+ return 0;
+
+ return (end > adev->mc.visible_vram_size ?
+ adev->mc.visible_vram_size : end) - start;
+}
+
+/**
* amdgpu_vram_mgr_new - allocate new ranges
*
* @man: TTM memory type manager
@@ -93,11 +116,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm *mm = &mgr->mm;
struct drm_mm_node *nodes;
enum drm_mm_insert_mode mode;
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
+ uint64_t usage = 0, vis_usage = 0;
unsigned i;
int r;
@@ -142,6 +167,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (unlikely(r))
goto error;
+ usage += nodes[i].size << PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
+
/* Calculate a virtual BO start address to easily check if
* everything is CPU accessible.
*/
@@ -155,6 +183,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
}
spin_unlock(&mgr->lock);
+ atomic64_add(usage, &mgr->usage);
+ atomic64_add(vis_usage, &mgr->vis_usage);
+
mem->mm_node = nodes;
return 0;
@@ -181,8 +212,10 @@ error:
static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm_node *nodes = mem->mm_node;
+ uint64_t usage = 0, vis_usage = 0;
unsigned pages = mem->num_pages;
if (!mem->mm_node)
@@ -192,31 +225,67 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
while (pages) {
pages -= nodes->size;
drm_mm_remove_node(nodes);
+ usage += nodes->size << PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
++nodes;
}
spin_unlock(&mgr->lock);
+ atomic64_sub(usage, &mgr->usage);
+ atomic64_sub(vis_usage, &mgr->vis_usage);
+
kfree(mem->mm_node);
mem->mm_node = NULL;
}
/**
+ * amdgpu_vram_mgr_usage - how many bytes are used in this domain
+ *
+ * @man: TTM memory type manager
+ *
+ * Returns how many bytes are used in this domain.
+ */
+uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ return atomic64_read(&mgr->usage);
+}
+
+/**
+ * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
+ *
+ * @man: TTM memory type manager
+ *
+ * Returns how many bytes are used in the visible part of VRAM
+ */
+uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ return atomic64_read(&mgr->vis_usage);
+}
+
+/**
* amdgpu_vram_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
- * @prefix: text prefix
+ * @printer: DRM printer to use
*
* Dump the table content using printk.
*/
static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
+ struct drm_printer *printer)
{
struct amdgpu_vram_mgr *mgr = man->priv;
- struct drm_printer p = drm_debug_printer(prefix);
spin_lock(&mgr->lock);
- drm_mm_print(&mgr->mm, &p);
+ drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock);
+
+ drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
+ man->size, amdgpu_vram_mgr_usage(man) >> 20,
+ amdgpu_vram_mgr_vis_usage(man) >> 20);
}
const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 490e84944851..4e519dc42916 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2431,7 +2431,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2439,7 +2439,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2473,7 +2473,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 921c6f772f11..11edc75edaa9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2506,7 +2506,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2514,7 +2514,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2548,7 +2548,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index bcd9521237f4..a51e35f824a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -42,6 +42,7 @@
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
#include "gca/gfx_7_2_enum.h"
+#include "dce_v6_0.h"
#include "si_enums.h"
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
@@ -2321,7 +2322,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2329,7 +2330,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2363,7 +2364,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 609438fe8584..9cf14b8b2db9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2335,7 +2335,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2343,7 +2343,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2377,7 +2377,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 5ed919e45351..b9ee9073cb0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -479,6 +479,8 @@ static int dce_virtual_hw_init(void *handle)
#endif
/* no DCE */
break;
+ case CHIP_VEGA10:
+ break;
default:
DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 4ac85f47f287..d228f5a99044 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -2217,40 +2217,9 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->gfx.rlc.save_restore_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
- adev->gfx.rlc.save_restore_obj = NULL;
- }
-
- if (adev->gfx.rlc.clear_state_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
- adev->gfx.rlc.clear_state_obj = NULL;
- }
-
- if (adev->gfx.rlc.cp_table_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
- adev->gfx.rlc.cp_table_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
}
static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
@@ -2273,43 +2242,23 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
if (src_ptr) {
/* save restore block */
- if (adev->gfx.rlc.save_restore_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL,
- &adev->gfx.rlc.save_restore_obj);
-
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
- if (unlikely(r != 0)) {
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.save_restore_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.save_restore_obj,
+ &adev->gfx.rlc.save_restore_gpu_addr,
+ (void **)&adev->gfx.rlc.sr_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
- dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
+ r);
gfx_v6_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
/* write the sr buffer */
dst_ptr = adev->gfx.rlc.sr_ptr;
for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+
amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
}
@@ -2319,39 +2268,17 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
dws = adev->gfx.rlc.clear_state_size + (256 / 4);
- if (adev->gfx.rlc.clear_state_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL,
- &adev->gfx.rlc.clear_state_obj);
-
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v6_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 17b7c6934b0a..00868764a0dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1823,7 +1823,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
}
/**
- * gmc_v7_0_init_compute_vmid - gart enable
+ * gfx_v7_0_init_compute_vmid - gart enable
*
* @adev: amdgpu_device pointer
*
@@ -1833,7 +1833,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
#define DEFAULT_SH_MEM_BASES (0x6000)
#define FIRST_COMPUTE_VMID (8)
#define LAST_COMPUTE_VMID (16)
-static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
+static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
{
int i;
uint32_t sh_mem_config;
@@ -1921,6 +1921,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
ELEMENT_SIZE, 1);
sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
INDEX_STRIDE, 3);
+ WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
@@ -1934,12 +1935,11 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
WREG32(mmSH_MEM_APE1_BASE, 1);
WREG32(mmSH_MEM_APE1_LIMIT, 0);
WREG32(mmSH_MEM_BASES, sh_mem_base);
- WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
}
cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- gmc_v7_0_init_compute_vmid(adev);
+ gfx_v7_0_init_compute_vmid(adev);
WREG32(mmSX_DEBUG_1, 0x20);
@@ -2774,39 +2774,18 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
*/
static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
{
- int i, r;
+ int i;
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
- if (ring->mqd_obj) {
- r = amdgpu_bo_reserve(ring->mqd_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
-
- amdgpu_bo_unpin(ring->mqd_obj);
- amdgpu_bo_unreserve(ring->mqd_obj);
-
- amdgpu_bo_unref(&ring->mqd_obj);
- ring->mqd_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL);
}
}
static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->gfx.mec.hpd_eop_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
-
- amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
- adev->gfx.mec.hpd_eop_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
}
static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
@@ -2823,33 +2802,14 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
/* allocate space for ALL pipes (even the ones we don't own) */
mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec
* GFX7_MEC_HPD_SIZE * 2;
- if (adev->gfx.mec.hpd_eop_obj == NULL) {
- r = amdgpu_bo_create(adev,
- mec_hpd_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->gfx.mec.hpd_eop_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
- if (unlikely(r != 0)) {
- gfx_v7_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.hpd_eop_gpu_addr);
- if (r) {
- dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
- gfx_v7_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
if (r) {
- dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r);
gfx_v7_0_mec_fini(adev);
return r;
}
@@ -3108,32 +3068,12 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
struct cik_mqd *mqd;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
- if (ring->mqd_obj == NULL) {
- r = amdgpu_bo_create(adev,
- sizeof(struct cik_mqd),
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &ring->mqd_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto out;
-
- r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
- &mqd_gpu_addr);
- if (r) {
- dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
- goto out_unreserve;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
+ r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
+ &mqd_gpu_addr, (void **)&mqd);
if (r) {
- dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
- goto out_unreserve;
+ dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
+ return r;
}
mutex_lock(&adev->srbm_mutex);
@@ -3147,9 +3087,7 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
mutex_unlock(&adev->srbm_mutex);
amdgpu_bo_kunmap(ring->mqd_obj);
-out_unreserve:
amdgpu_bo_unreserve(ring->mqd_obj);
-out:
return 0;
}
@@ -3361,43 +3299,9 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
*/
static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
{
- int r;
-
- /* save restore block */
- if (adev->gfx.rlc.save_restore_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
- adev->gfx.rlc.save_restore_obj = NULL;
- }
-
- /* clear state block */
- if (adev->gfx.rlc.clear_state_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
- adev->gfx.rlc.clear_state_obj = NULL;
- }
-
- /* clear state block */
- if (adev->gfx.rlc.cp_table_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
- adev->gfx.rlc.cp_table_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
}
static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
@@ -3432,39 +3336,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (src_ptr) {
/* save restore block */
- if (adev->gfx.rlc.save_restore_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.save_restore_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
- if (unlikely(r != 0)) {
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.save_restore_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.save_restore_obj,
+ &adev->gfx.rlc.save_restore_gpu_addr,
+ (void **)&adev->gfx.rlc.sr_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
- dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
/* write the sr buffer */
dst_ptr = adev->gfx.rlc.sr_ptr;
for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
@@ -3477,39 +3359,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
/* clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
- if (adev->gfx.rlc.clear_state_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.clear_state_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
gfx_v7_0_get_csb_buffer(adev, dst_ptr);
@@ -3518,37 +3378,14 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
}
if (adev->gfx.rlc.cp_table_size) {
- if (adev->gfx.rlc.cp_table_obj == NULL) {
- r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.cp_table_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
- if (unlikely(r != 0)) {
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
- dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
if (r) {
- dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 05436b8730b4..832e592fcd07 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1238,29 +1238,8 @@ static void cz_init_cp_jump_table(struct amdgpu_device *adev)
static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
{
- int r;
-
- /* clear state block */
- if (adev->gfx.rlc.clear_state_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
- adev->gfx.rlc.clear_state_obj = NULL;
- }
-
- /* jump table block */
- if (adev->gfx.rlc.cp_table_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
- adev->gfx.rlc.cp_table_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
}
static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
@@ -1278,39 +1257,17 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
/* clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
- if (adev->gfx.rlc.clear_state_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.clear_state_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v8_0_rlc_fini(adev);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- gfx_v8_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v8_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r);
- gfx_v8_0_rlc_fini(adev);
- return r;
- }
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
gfx_v8_0_get_csb_buffer(adev, dst_ptr);
@@ -1321,34 +1278,13 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
if ((adev->asic_type == CHIP_CARRIZO) ||
(adev->asic_type == CHIP_STONEY)) {
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
- if (adev->gfx.rlc.cp_table_obj == NULL) {
- r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.cp_table_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
- if (unlikely(r != 0)) {
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
- dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
if (r) {
- dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
return r;
}
@@ -1363,17 +1299,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->gfx.mec.hpd_eop_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
- adev->gfx.mec.hpd_eop_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
}
static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
@@ -1389,34 +1315,13 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
- if (adev->gfx.mec.hpd_eop_obj == NULL) {
- r = amdgpu_bo_create(adev,
- mec_hpd_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->gfx.mec.hpd_eop_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
- if (unlikely(r != 0)) {
- gfx_v8_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.hpd_eop_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
if (r) {
- dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
- gfx_v8_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
- if (r) {
- dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
- gfx_v8_0_mec_fini(adev);
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
return r;
}
@@ -3802,6 +3707,8 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
ELEMENT_SIZE, 1);
sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
INDEX_STRIDE, 3);
+ WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
+
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
vi_srbm_select(adev, 0, 0, 0, i);
@@ -3825,7 +3732,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
WREG32(mmSH_MEM_APE1_BASE, 1);
WREG32(mmSH_MEM_APE1_LIMIT, 0);
- WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
}
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 435db6f5efcf..69182eeca264 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -116,7 +116,9 @@ static const u32 golden_settings_gc_9_0[] =
SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
+ SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000,
SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107,
+ SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000,
SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68,
SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197,
@@ -772,18 +774,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
if (cs_data) {
/* clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
- if (adev->gfx.rlc.clear_state_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_err(adev->dev,
- "(%d) failed to create rlc csb bo\n", r);
- gfx_v9_0_rlc_fini(adev);
- return r;
- }
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
+ r);
+ gfx_v9_0_rlc_fini(adev);
+ return r;
}
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
@@ -795,18 +795,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_RAVEN) {
/* TODO: double check the cp_table_size for RV */
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
- if (adev->gfx.rlc.cp_table_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
- if (r) {
- dev_err(adev->dev,
- "(%d) failed to create cp table bo\n", r);
- gfx_v9_0_rlc_fini(adev);
- return r;
- }
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ if (r) {
+ dev_err(adev->dev,
+ "(%d) failed to create cp table bo\n", r);
+ gfx_v9_0_rlc_fini(adev);
+ return r;
}
rv_init_cp_jump_table(adev);
@@ -821,28 +819,8 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->gfx.mec.hpd_eop_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
-
- amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
- adev->gfx.mec.hpd_eop_obj = NULL;
- }
- if (adev->gfx.mec.mec_fw_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
-
- amdgpu_bo_unref(&adev->gfx.mec.mec_fw_obj);
- adev->gfx.mec.mec_fw_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
}
static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
@@ -862,33 +840,13 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
- if (adev->gfx.mec.hpd_eop_obj == NULL) {
- r = amdgpu_bo_create(adev,
- mec_hpd_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->gfx.mec.hpd_eop_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
- if (unlikely(r != 0)) {
- gfx_v9_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.hpd_eop_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
if (r) {
- dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
- gfx_v9_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
- if (r) {
- dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
gfx_v9_0_mec_fini(adev);
return r;
}
@@ -905,42 +863,22 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
- if (adev->gfx.mec.mec_fw_obj == NULL) {
- r = amdgpu_bo_create(adev,
- mec_hdr->header.ucode_size_bytes,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->gfx.mec.mec_fw_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false);
- if (unlikely(r != 0)) {
- gfx_v9_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.mec.mec_fw_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.mec_fw_gpu_addr);
- if (r) {
- dev_warn(adev->dev, "(%d) pin mec firmware bo failed\n", r);
- gfx_v9_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.mec.mec_fw_obj, (void **)&fw);
+ r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.mec_fw_obj,
+ &adev->gfx.mec.mec_fw_gpu_addr,
+ (void **)&fw);
if (r) {
- dev_warn(adev->dev, "(%d) map firmware bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
gfx_v9_0_mec_fini(adev);
return r;
}
+
memcpy(fw, fw_data, fw_size);
amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
-
return 0;
}
@@ -2219,7 +2157,7 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
- int r, i;
+ int r, i, tmp;
/* init the CP */
WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
@@ -2227,7 +2165,7 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
gfx_v9_0_cp_gfx_enable(adev, true);
- r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4);
+ r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
return r;
@@ -2265,6 +2203,12 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0x8000);
amdgpu_ring_write(ring, 0x8000);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
+ tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
+ amdgpu_ring_write(ring, tmp);
+ amdgpu_ring_write(ring, 0);
+
amdgpu_ring_commit(ring);
return 0;
@@ -4158,7 +4102,7 @@ static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
return 0;
}
-const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
.late_init = gfx_v9_0_late_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
index 56ef652a575d..fa5a3fbaf6ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
@@ -24,7 +24,6 @@
#ifndef __GFX_V9_0_H__
#define __GFX_V9_0_H__
-extern const struct amd_ip_funcs gfx_v9_0_ip_funcs;
extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block;
void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 408723ef157c..4f2788b61a08 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -124,7 +124,7 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
{
- uint32_t tmp;
+ uint32_t tmp, field;
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL);
@@ -143,9 +143,10 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp);
+ field = adev->vm_manager.fragment_size;
tmp = mmVM_L2_CNTL3_DEFAULT;
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
index d2dbb085f480..206e29cad753 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
@@ -30,7 +30,5 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value);
void gfxhub_v1_0_init(struct amdgpu_device *adev);
u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
-extern const struct amd_ip_funcs gfxhub_v1_0_ip_funcs;
-extern const struct amdgpu_ip_block_version gfxhub_v1_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 93c45f26b7c8..12b0c4cd7a5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -461,6 +461,7 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
{
int r, i;
+ u32 field;
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -488,10 +489,12 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_L2_CNTL2,
VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
+
+ field = adev->vm_manager.fragment_size;
WREG32(mmVM_L2_CNTL3,
VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
- (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
- (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
+ (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
+ (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
/* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
@@ -811,7 +814,7 @@ static int gmc_v6_0_sw_init(void *handle)
if (r)
return r;
- amdgpu_vm_adjust_size(adev, 64);
+ amdgpu_vm_adjust_size(adev, 64, 4);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
adev->mc.mc_mask = 0xffffffffffULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 4a9e84062874..e42c1ad3af5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -562,7 +562,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
{
int r, i;
- u32 tmp;
+ u32 tmp, field;
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -592,10 +592,12 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32(mmVM_L2_CNTL2, tmp);
+
+ field = adev->vm_manager.fragment_size;
tmp = RREG32(mmVM_L2_CNTL3);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
WREG32(mmVM_L2_CNTL3, tmp);
/* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
@@ -948,7 +950,7 @@ static int gmc_v7_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- amdgpu_vm_adjust_size(adev, 64);
+ amdgpu_vm_adjust_size(adev, 64, 4);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 85c937b5e40b..7ca2dae8237a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -762,7 +762,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
{
int r, i;
- u32 tmp;
+ u32 tmp, field;
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -793,10 +793,12 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32(mmVM_L2_CNTL2, tmp);
+
+ field = adev->vm_manager.fragment_size;
tmp = RREG32(mmVM_L2_CNTL3);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
WREG32(mmVM_L2_CNTL3, tmp);
/* XXX: set to enable PTE/PDE in system memory */
tmp = RREG32(mmVM_L2_CNTL4);
@@ -1046,7 +1048,7 @@ static int gmc_v8_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- amdgpu_vm_adjust_size(adev, 64);
+ amdgpu_vm_adjust_size(adev, 64, 4);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index c22899a08106..2769c2b3b56e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -541,9 +541,10 @@ static int gmc_v9_0_sw_init(void *handle)
adev->vm_manager.vm_size = 1U << 18;
adev->vm_manager.block_size = 9;
adev->vm_manager.num_level = 3;
+ amdgpu_vm_set_fragment_size(adev, 9);
} else {
- /* vm_size is 64GB for legacy 2-level page support*/
- amdgpu_vm_adjust_size(adev, 64);
+ /* vm_size is 64GB for legacy 2-level page support */
+ amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.num_level = 1;
}
break;
@@ -558,14 +559,16 @@ static int gmc_v9_0_sw_init(void *handle)
adev->vm_manager.vm_size = 1U << 18;
adev->vm_manager.block_size = 9;
adev->vm_manager.num_level = 3;
+ amdgpu_vm_set_fragment_size(adev, 9);
break;
default:
break;
}
- DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
+ DRM_INFO("vm size is %llu GB, block size is %u-bit,fragment size is %u-bit\n",
adev->vm_manager.vm_size,
- adev->vm_manager.block_size);
+ adev->vm_manager.block_size,
+ adev->vm_manager.fragment_size);
/* This interrupt is VMC page fault.*/
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index ad8def3cc343..4395a4f12149 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -138,7 +138,7 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
{
- uint32_t tmp;
+ uint32_t tmp, field;
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
@@ -157,9 +157,10 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
+ field = adev->vm_manager.fragment_size;
tmp = mmVM_L2_CNTL3_DEFAULT;
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index 57bb940c0ecd..5d38229baf69 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -36,7 +36,4 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev);
void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
bool enable);
-extern const struct amd_ip_funcs mmhub_v1_0_ip_funcs;
-extern const struct amdgpu_ip_block_version mmhub_v1_0_ip_block;
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 591f3e7fb508..fd7c72aaafa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -291,6 +291,8 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
DRM_DEBUG("Setting write pointer\n");
if (ring->use_doorbell) {
+ u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
DRM_DEBUG("Using doorbell -- "
"wptr_offs == 0x%08x "
"lower_32_bits(ring->wptr) << 2 == 0x%08x "
@@ -299,8 +301,7 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
lower_32_bits(ring->wptr << 2),
upper_32_bits(ring->wptr << 2));
/* XXX check if swapping is necessary on BE */
- adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2);
- adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2);
+ WRITE_ONCE(*wb, (ring->wptr << 2));
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
@@ -573,12 +574,13 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- u32 rb_cntl, ib_cntl;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
u32 rb_bufsz;
u32 wb_offset;
u32 doorbell;
u32 doorbell_offset;
u32 temp;
+ u64 wptr_gpu_addr;
int i, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -660,6 +662,19 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), temp);
}
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
+ lower_32_bits(wptr_gpu_addr));
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
+ if (amdgpu_sriov_vf(adev))
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
+ else
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
+
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
@@ -687,6 +702,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+
}
return 0;
@@ -783,15 +799,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
const struct sdma_firmware_header_v1_0 *hdr;
const __le32 *fw_data;
u32 fw_size;
- u32 digest_size = 0;
int i, j;
/* halt the MEs */
sdma_v4_0_enable(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
- uint16_t version_major;
- uint16_t version_minor;
if (!adev->sdma.instance[i].fw)
return -EINVAL;
@@ -799,23 +812,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- version_major = le16_to_cpu(hdr->header.header_version_major);
- version_minor = le16_to_cpu(hdr->header.header_version_minor);
-
- if (version_major == 1 && version_minor >= 1) {
- const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr = (const struct sdma_firmware_header_v1_1 *) hdr;
- digest_size = le32_to_cpu(sdma_v1_1_hdr->digest_size);
- }
-
- fw_size -= digest_size;
-
fw_data = (const __le32 *)
(adev->sdma.instance[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), 0);
-
for (j = 0; j < fw_size; j++)
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 812a24dd1204..8284d5dbfc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1413,6 +1413,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
amdgpu_program_register_sequence(adev,
pitcairn_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+ break;
case CHIP_VERDE:
amdgpu_program_register_sequence(adev,
verde_golden_registers,
@@ -1437,6 +1438,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
amdgpu_program_register_sequence(adev,
oland_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+ break;
case CHIP_HAINAN:
amdgpu_program_register_sequence(adev,
hainan_golden_registers,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index e79befd80eed..7f408f85fdb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -250,6 +250,7 @@
#define PACKET3_SET_UCONFIG_REG 0x79
#define PACKET3_SET_UCONFIG_REG_START 0x0000c000
#define PACKET3_SET_UCONFIG_REG_END 0x0000c400
+#define PACKET3_SET_UCONFIG_REG_INDEX_TYPE (2 << 28)
#define PACKET3_SCRATCH_RAM_WRITE 0x7D
#define PACKET3_SCRATCH_RAM_READ 0x7E
#define PACKET3_LOAD_CONST_RAM 0x80
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 987b958368ac..23a85750edd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -165,6 +165,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
r = amdgpu_ring_alloc(ring, 16);
if (r) {
DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
@@ -432,13 +435,19 @@ static int uvd_v7_0_sw_init(void *handle)
return r;
}
-
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.ring_enc[i];
sprintf(ring->name, "uvd_enc%d", i);
if (amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+
+ /* currently only use the first enconding ring for
+ * sriov, so set unused location for other unused rings.
+ */
+ if (i == 0)
+ ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+ else
+ ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
if (r)
@@ -685,6 +694,11 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
/* 4, set resp to zero */
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
+ WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0);
+ adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0;
+ adev->uvd.ring_enc[0].wptr = 0;
+ adev->uvd.ring_enc[0].wptr_old = 0;
+
/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
@@ -702,7 +716,6 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
return -EBUSY;
}
- WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0);
return 0;
}
@@ -736,11 +749,9 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
init_table += header->uvd_table_offset;
ring = &adev->uvd.ring;
+ ring->wptr = 0;
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
- /* disable clock gating */
- MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
- ~UVD_POWER_STATUS__UVD_PG_MODE_MASK, 0);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
0xFFFFFFFF, 0x00000004);
/* mc resume*/
@@ -777,12 +788,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG),
- adev->gfx.config.gb_addr_config);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG),
- adev->gfx.config.gb_addr_config);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG),
- adev->gfx.config.gb_addr_config);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
/* mc resume end*/
@@ -819,17 +824,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
UVD_LMI_CTRL__REQ_MODE_MASK |
0x00100000L));
- /* disable byte swapping */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), 0);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), 0);
-
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88);
-
/* take all subblocks out of reset, except VCPU */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
@@ -838,15 +832,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__CLK_EN_MASK);
- /* enable UMC */
- MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
-
- /* boot up the VCPU */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
-
- MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
-
/* enable master interrupt */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
@@ -859,40 +844,31 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
/* force RBC into idle state */
size = order_base_2(ring->ring_size);
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
- /* set the write pointer delay */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0);
-
- /* set the wb address */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR),
- (upper_32_bits(ring->gpu_addr) >> 2));
-
- /* programm the RB_BASE for ring buffer */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
- lower_32_bits(ring->gpu_addr));
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
- upper_32_bits(ring->gpu_addr));
-
- ring->wptr = 0;
ring = &adev->uvd.ring_enc[0];
+ ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
+ /* boot up the VCPU */
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
+
+ /* enable UMC */
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
+
+ MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
+
/* add end packet */
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
header->uvd_table_size = table_size;
- return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
}
- return -EINVAL; /* already initializaed ? */
+ return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 1ecd6bb90c1f..11134d5f7443 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -173,6 +173,11 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
/* 4, set resp to zero */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
+ WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
+ adev->wb.wb[adev->vce.ring[0].wptr_offs] = 0;
+ adev->vce.ring[0].wptr = 0;
+ adev->vce.ring[0].wptr_old = 0;
+
/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001);
@@ -190,7 +195,6 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
return -EBUSY;
}
- WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
return 0;
}
@@ -274,7 +278,8 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
- 0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
+ VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
/* end of MC_RESUME */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
@@ -296,11 +301,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
header->vce_table_size = table_size;
-
- return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table);
}
- return -EINVAL; /* already initializaed ? */
+ return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table);
}
/**
@@ -443,12 +446,14 @@ static int vce_v4_0_sw_init(void *handle)
if (amdgpu_sriov_vf(adev)) {
/* DOORBELL only works under SRIOV */
ring->use_doorbell = true;
+
+ /* currently only use the first encoding ring for sriov,
+ * so set unused location for other unused rings.
+ */
if (i == 0)
- ring->doorbell_index = AMDGPU_DOORBELL64_RING0_1 * 2;
- else if (i == 1)
- ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2;
+ ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2;
else
- ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2 + 1;
+ ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
if (r)
@@ -990,11 +995,13 @@ static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
{
uint32_t val = 0;
- if (state == AMDGPU_IRQ_STATE_ENABLE)
- val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
+ if (!amdgpu_sriov_vf(adev)) {
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
- WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
- ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
+ ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 6cac291c96da..9ff69b90df36 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1028,8 +1028,7 @@ static int vi_common_early_init(void *handle)
/* rev0 hardware requires workarounds to support PG */
adev->pg_flags = 0;
if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_GFX_SMG |
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_UVD |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 6316aad43a73..e4a8c2e52cb2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -142,12 +142,12 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
struct kfd_ioctl_create_queue_args *args)
{
if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
- pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
+ pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
- pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
+ pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL;
}
@@ -155,26 +155,26 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
(!access_ok(VERIFY_WRITE,
(const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
- pr_err("kfd: can't access ring base address\n");
+ pr_err("Can't access ring base address\n");
return -EFAULT;
}
if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
- pr_err("kfd: ring size must be a power of 2 or 0\n");
+ pr_err("Ring size must be a power of 2 or 0\n");
return -EINVAL;
}
if (!access_ok(VERIFY_WRITE,
(const void __user *) args->read_pointer_address,
sizeof(uint32_t))) {
- pr_err("kfd: can't access read pointer\n");
+ pr_err("Can't access read pointer\n");
return -EFAULT;
}
if (!access_ok(VERIFY_WRITE,
(const void __user *) args->write_pointer_address,
sizeof(uint32_t))) {
- pr_err("kfd: can't access write pointer\n");
+ pr_err("Can't access write pointer\n");
return -EFAULT;
}
@@ -182,7 +182,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
!access_ok(VERIFY_WRITE,
(const void __user *) args->eop_buffer_address,
sizeof(uint32_t))) {
- pr_debug("kfd: can't access eop buffer");
+ pr_debug("Can't access eop buffer");
return -EFAULT;
}
@@ -190,7 +190,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
!access_ok(VERIFY_WRITE,
(const void __user *) args->ctx_save_restore_address,
sizeof(uint32_t))) {
- pr_debug("kfd: can't access ctx save restore buffer");
+ pr_debug("Can't access ctx save restore buffer");
return -EFAULT;
}
@@ -219,27 +219,27 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
else
q_properties->format = KFD_QUEUE_FORMAT_PM4;
- pr_debug("Queue Percentage (%d, %d)\n",
+ pr_debug("Queue Percentage: %d, %d\n",
q_properties->queue_percent, args->queue_percentage);
- pr_debug("Queue Priority (%d, %d)\n",
+ pr_debug("Queue Priority: %d, %d\n",
q_properties->priority, args->queue_priority);
- pr_debug("Queue Address (0x%llX, 0x%llX)\n",
+ pr_debug("Queue Address: 0x%llX, 0x%llX\n",
q_properties->queue_address, args->ring_base_address);
- pr_debug("Queue Size (0x%llX, %u)\n",
+ pr_debug("Queue Size: 0x%llX, %u\n",
q_properties->queue_size, args->ring_size);
- pr_debug("Queue r/w Pointers (0x%llX, 0x%llX)\n",
- (uint64_t) q_properties->read_ptr,
- (uint64_t) q_properties->write_ptr);
+ pr_debug("Queue r/w Pointers: %p, %p\n",
+ q_properties->read_ptr,
+ q_properties->write_ptr);
- pr_debug("Queue Format (%d)\n", q_properties->format);
+ pr_debug("Queue Format: %d\n", q_properties->format);
- pr_debug("Queue EOP (0x%llX)\n", q_properties->eop_ring_buffer_address);
+ pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
- pr_debug("Queue CTX save arex (0x%llX)\n",
+ pr_debug("Queue CTX save area: 0x%llX\n",
q_properties->ctx_save_restore_area_address);
return 0;
@@ -257,16 +257,16 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
memset(&q_properties, 0, sizeof(struct queue_properties));
- pr_debug("kfd: creating queue ioctl\n");
+ pr_debug("Creating queue ioctl\n");
err = set_queue_properties_from_user(&q_properties, args);
if (err)
return err;
- pr_debug("kfd: looking for gpu id 0x%x\n", args->gpu_id);
+ pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL) {
- pr_debug("kfd: gpu id 0x%x was not found\n", args->gpu_id);
+ if (!dev) {
+ pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
return -EINVAL;
}
@@ -278,7 +278,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
- pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n",
+ pr_debug("Creating queue for PASID %d on gpu 0x%x\n",
p->pasid,
dev->id);
@@ -296,15 +296,15 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
mutex_unlock(&p->mutex);
- pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);
+ pr_debug("Queue id %d was created successfully\n", args->queue_id);
- pr_debug("ring buffer address == 0x%016llX\n",
+ pr_debug("Ring buffer address == 0x%016llX\n",
args->ring_base_address);
- pr_debug("read ptr address == 0x%016llX\n",
+ pr_debug("Read ptr address == 0x%016llX\n",
args->read_pointer_address);
- pr_debug("write ptr address == 0x%016llX\n",
+ pr_debug("Write ptr address == 0x%016llX\n",
args->write_pointer_address);
return 0;
@@ -321,7 +321,7 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
int retval;
struct kfd_ioctl_destroy_queue_args *args = data;
- pr_debug("kfd: destroying queue id %d for PASID %d\n",
+ pr_debug("Destroying queue id %d for pasid %d\n",
args->queue_id,
p->pasid);
@@ -341,12 +341,12 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
struct queue_properties properties;
if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
- pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
+ pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
- pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
+ pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL;
}
@@ -354,12 +354,12 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
(!access_ok(VERIFY_WRITE,
(const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
- pr_err("kfd: can't access ring base address\n");
+ pr_err("Can't access ring base address\n");
return -EFAULT;
}
if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
- pr_err("kfd: ring size must be a power of 2 or 0\n");
+ pr_err("Ring size must be a power of 2 or 0\n");
return -EINVAL;
}
@@ -368,7 +368,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.queue_percent = args->queue_percentage;
properties.priority = args->queue_priority;
- pr_debug("kfd: updating queue id %d for PASID %d\n",
+ pr_debug("Updating queue id %d for pasid %d\n",
args->queue_id, p->pasid);
mutex_lock(&p->mutex);
@@ -400,7 +400,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
}
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
mutex_lock(&p->mutex);
@@ -443,7 +443,7 @@ static int kfd_ioctl_dbg_register(struct file *filep,
long status = 0;
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
@@ -460,12 +460,11 @@ static int kfd_ioctl_dbg_register(struct file *filep,
*/
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
- mutex_unlock(&p->mutex);
- mutex_unlock(kfd_get_dbgmgr_mutex());
- return PTR_ERR(pdd);
+ status = PTR_ERR(pdd);
+ goto out;
}
- if (dev->dbgmgr == NULL) {
+ if (!dev->dbgmgr) {
/* In case of a legal call, we have no dbgmgr yet */
create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev);
if (create_ok) {
@@ -480,6 +479,7 @@ static int kfd_ioctl_dbg_register(struct file *filep,
status = -EINVAL;
}
+out:
mutex_unlock(&p->mutex);
mutex_unlock(kfd_get_dbgmgr_mutex());
@@ -494,7 +494,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep,
long status;
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
@@ -505,7 +505,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep,
mutex_lock(kfd_get_dbgmgr_mutex());
status = kfd_dbgmgr_unregister(dev->dbgmgr, p);
- if (status == 0) {
+ if (!status) {
kfd_dbgmgr_destroy(dev->dbgmgr);
dev->dbgmgr = NULL;
}
@@ -539,7 +539,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info));
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
@@ -580,8 +580,8 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points;
if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) {
- kfree(args_buff);
- return -EINVAL;
+ status = -EINVAL;
+ goto out;
}
watch_mask_value = (uint64_t) args_buff[args_idx];
@@ -604,8 +604,8 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
}
if (args_idx >= args->buf_size_in_bytes - sizeof(args)) {
- kfree(args_buff);
- return -EINVAL;
+ status = -EINVAL;
+ goto out;
}
/* Currently HSA Event is not supported for DBG */
@@ -617,6 +617,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
mutex_unlock(kfd_get_dbgmgr_mutex());
+out:
kfree(args_buff);
return status;
@@ -646,7 +647,7 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep,
sizeof(wac_info.trapId);
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
@@ -782,8 +783,9 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
"scratch_limit %llX\n", pdd->scratch_limit);
args->num_of_nodes++;
- } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
- (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
+
+ pdd = kfd_get_next_process_device_data(p, pdd);
+ } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
}
mutex_unlock(&p->mutex);
@@ -846,9 +848,84 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
return err;
}
+static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_set_scratch_backing_va_args *args = data;
+ struct kfd_process_device *pdd;
+ struct kfd_dev *dev;
+ long err;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ err = PTR_ERR(pdd);
+ goto bind_process_to_device_fail;
+ }
+
+ pdd->qpd.sh_hidden_private_base = args->va_addr;
+
+ mutex_unlock(&p->mutex);
+
+ if (sched_policy == KFD_SCHED_POLICY_NO_HWS && pdd->qpd.vmid != 0)
+ dev->kfd2kgd->set_scratch_backing_va(
+ dev->kgd, args->va_addr, pdd->qpd.vmid);
+
+ return 0;
+
+bind_process_to_device_fail:
+ mutex_unlock(&p->mutex);
+ return err;
+}
+
+static int kfd_ioctl_get_tile_config(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_get_tile_config_args *args = data;
+ struct kfd_dev *dev;
+ struct tile_config config;
+ int err = 0;
+
+ dev = kfd_device_by_id(args->gpu_id);
+
+ dev->kfd2kgd->get_tile_config(dev->kgd, &config);
+
+ args->gb_addr_config = config.gb_addr_config;
+ args->num_banks = config.num_banks;
+ args->num_ranks = config.num_ranks;
+
+ if (args->num_tile_configs > config.num_tile_configs)
+ args->num_tile_configs = config.num_tile_configs;
+ err = copy_to_user((void __user *)args->tile_config_ptr,
+ config.tile_config_ptr,
+ args->num_tile_configs * sizeof(uint32_t));
+ if (err) {
+ args->num_tile_configs = 0;
+ return -EFAULT;
+ }
+
+ if (args->num_macro_tile_configs > config.num_macro_tile_configs)
+ args->num_macro_tile_configs =
+ config.num_macro_tile_configs;
+ err = copy_to_user((void __user *)args->macro_tile_config_ptr,
+ config.macro_tile_config_ptr,
+ args->num_macro_tile_configs * sizeof(uint32_t));
+ if (err) {
+ args->num_macro_tile_configs = 0;
+ return -EFAULT;
+ }
+
+ return 0;
+}
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
- [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
+ .cmd_drv = 0, .name = #ioctl}
/** Ioctl table */
static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
@@ -899,6 +976,12 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL,
kfd_ioctl_dbg_wave_control, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
+ kfd_ioctl_set_scratch_backing_va, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
+ kfd_ioctl_get_tile_config, 0)
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index d5e19b5fbbfb..0aa021aa0aa1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -42,8 +42,6 @@
static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
{
- BUG_ON(!dev || !dev->kfd2kgd);
-
dev->kfd2kgd->address_watch_disable(dev->kgd);
}
@@ -62,7 +60,8 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
unsigned int *ib_packet_buff;
int status;
- BUG_ON(!dbgdev || !dbgdev->kq || !packet_buff || !size_in_bytes);
+ if (WARN_ON(!size_in_bytes))
+ return -EINVAL;
kq = dbgdev->kq;
@@ -77,8 +76,8 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
status = kq->ops.acquire_packet_buffer(kq,
pq_packets_size_in_bytes / sizeof(uint32_t),
&ib_packet_buff);
- if (status != 0) {
- pr_err("amdkfd: acquire_packet_buffer failed\n");
+ if (status) {
+ pr_err("acquire_packet_buffer failed\n");
return status;
}
@@ -115,8 +114,8 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
status = kfd_gtt_sa_allocate(dbgdev->dev, sizeof(uint64_t),
&mem_obj);
- if (status != 0) {
- pr_err("amdkfd: Failed to allocate GART memory\n");
+ if (status) {
+ pr_err("Failed to allocate GART memory\n");
kq->ops.rollback_packet(kq);
return status;
}
@@ -168,8 +167,6 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
static int dbgdev_register_nodiq(struct kfd_dbgdev *dbgdev)
{
- BUG_ON(!dbgdev);
-
/*
* no action is needed in this case,
* just make sure diq will not be used
@@ -187,14 +184,12 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
struct kernel_queue *kq = NULL;
int status;
- BUG_ON(!dbgdev || !dbgdev->pqm || !dbgdev->dev);
-
status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,
&properties, 0, KFD_QUEUE_TYPE_DIQ,
&qid);
if (status) {
- pr_err("amdkfd: Failed to create DIQ\n");
+ pr_err("Failed to create DIQ\n");
return status;
}
@@ -202,8 +197,8 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
kq = pqm_get_kernel_queue(dbgdev->pqm, qid);
- if (kq == NULL) {
- pr_err("amdkfd: Error getting DIQ\n");
+ if (!kq) {
+ pr_err("Error getting DIQ\n");
pqm_destroy_queue(dbgdev->pqm, qid);
return -EFAULT;
}
@@ -215,8 +210,6 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
static int dbgdev_unregister_nodiq(struct kfd_dbgdev *dbgdev)
{
- BUG_ON(!dbgdev || !dbgdev->dev);
-
/* disable watch address */
dbgdev_address_watch_disable_nodiq(dbgdev->dev);
return 0;
@@ -227,8 +220,6 @@ static int dbgdev_unregister_diq(struct kfd_dbgdev *dbgdev)
/* todo - disable address watch */
int status;
- BUG_ON(!dbgdev || !dbgdev->pqm || !dbgdev->kq);
-
status = pqm_destroy_queue(dbgdev->pqm,
dbgdev->kq->queue->properties.queue_id);
dbgdev->kq = NULL;
@@ -245,14 +236,12 @@ static void dbgdev_address_watch_set_registers(
{
union ULARGE_INTEGER addr;
- BUG_ON(!adw_info || !addrHi || !addrLo || !cntl);
-
addr.quad_part = 0;
addrHi->u32All = 0;
addrLo->u32All = 0;
cntl->u32All = 0;
- if (adw_info->watch_mask != NULL)
+ if (adw_info->watch_mask)
cntl->bitfields.mask =
(uint32_t) (adw_info->watch_mask[index] &
ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK);
@@ -279,7 +268,7 @@ static void dbgdev_address_watch_set_registers(
}
static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
- struct dbg_address_watch_info *adw_info)
+ struct dbg_address_watch_info *adw_info)
{
union TCP_WATCH_ADDR_H_BITS addrHi;
union TCP_WATCH_ADDR_L_BITS addrLo;
@@ -287,13 +276,11 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
struct kfd_process_device *pdd;
unsigned int i;
- BUG_ON(!dbgdev || !dbgdev->dev || !adw_info);
-
/* taking the vmid for that process on the safe way using pdd */
pdd = kfd_get_process_device_data(dbgdev->dev,
adw_info->process);
if (!pdd) {
- pr_err("amdkfd: Failed to get pdd for wave control no DIQ\n");
+ pr_err("Failed to get pdd for wave control no DIQ\n");
return -EFAULT;
}
@@ -303,17 +290,16 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) ||
(adw_info->num_watch_points == 0)) {
- pr_err("amdkfd: num_watch_points is invalid\n");
+ pr_err("num_watch_points is invalid\n");
return -EINVAL;
}
- if ((adw_info->watch_mode == NULL) ||
- (adw_info->watch_address == NULL)) {
- pr_err("amdkfd: adw_info fields are not valid\n");
+ if (!adw_info->watch_mode || !adw_info->watch_address) {
+ pr_err("adw_info fields are not valid\n");
return -EINVAL;
}
- for (i = 0 ; i < adw_info->num_watch_points ; i++) {
+ for (i = 0; i < adw_info->num_watch_points; i++) {
dbgdev_address_watch_set_registers(adw_info, &addrHi, &addrLo,
&cntl, i, pdd->qpd.vmid);
@@ -348,7 +334,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
}
static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
- struct dbg_address_watch_info *adw_info)
+ struct dbg_address_watch_info *adw_info)
{
struct pm4__set_config_reg *packets_vec;
union TCP_WATCH_ADDR_H_BITS addrHi;
@@ -363,28 +349,25 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
/* we do not control the vmid in DIQ mode, just a place holder */
unsigned int vmid = 0;
- BUG_ON(!dbgdev || !dbgdev->dev || !adw_info);
-
addrHi.u32All = 0;
addrLo.u32All = 0;
cntl.u32All = 0;
if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) ||
(adw_info->num_watch_points == 0)) {
- pr_err("amdkfd: num_watch_points is invalid\n");
+ pr_err("num_watch_points is invalid\n");
return -EINVAL;
}
- if ((NULL == adw_info->watch_mode) ||
- (NULL == adw_info->watch_address)) {
- pr_err("amdkfd: adw_info fields are not valid\n");
+ if (!adw_info->watch_mode || !adw_info->watch_address) {
+ pr_err("adw_info fields are not valid\n");
return -EINVAL;
}
status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj);
- if (status != 0) {
- pr_err("amdkfd: Failed to allocate GART memory\n");
+ if (status) {
+ pr_err("Failed to allocate GART memory\n");
return status;
}
@@ -442,8 +425,6 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
i,
ADDRESS_WATCH_REG_CNTL);
- aw_reg_add_dword /= sizeof(uint32_t);
-
packets_vec[0].bitfields2.reg_offset =
aw_reg_add_dword - AMD_CONFIG_REG_BASE;
@@ -455,8 +436,6 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
i,
ADDRESS_WATCH_REG_ADDR_HI);
- aw_reg_add_dword /= sizeof(uint32_t);
-
packets_vec[1].bitfields2.reg_offset =
aw_reg_add_dword - AMD_CONFIG_REG_BASE;
packets_vec[1].reg_data[0] = addrHi.u32All;
@@ -467,8 +446,6 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
i,
ADDRESS_WATCH_REG_ADDR_LO);
- aw_reg_add_dword /= sizeof(uint32_t);
-
packets_vec[2].bitfields2.reg_offset =
aw_reg_add_dword - AMD_CONFIG_REG_BASE;
packets_vec[2].reg_data[0] = addrLo.u32All;
@@ -485,8 +462,6 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
i,
ADDRESS_WATCH_REG_CNTL);
- aw_reg_add_dword /= sizeof(uint32_t);
-
packets_vec[3].bitfields2.reg_offset =
aw_reg_add_dword - AMD_CONFIG_REG_BASE;
packets_vec[3].reg_data[0] = cntl.u32All;
@@ -498,8 +473,8 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
packet_buff_uint,
ib_size);
- if (status != 0) {
- pr_err("amdkfd: Failed to submit IB to DIQ\n");
+ if (status) {
+ pr_err("Failed to submit IB to DIQ\n");
break;
}
}
@@ -518,8 +493,6 @@ static int dbgdev_wave_control_set_registers(
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct HsaDbgWaveMsgAMDGen2 *pMsg;
- BUG_ON(!wac_info || !in_reg_sq_cmd || !in_reg_gfx_index);
-
reg_sq_cmd.u32All = 0;
reg_gfx_index.u32All = 0;
pMsg = &wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2;
@@ -620,18 +593,16 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
struct pm4__set_config_reg *packets_vec;
size_t ib_size = sizeof(struct pm4__set_config_reg) * 3;
- BUG_ON(!dbgdev || !wac_info);
-
reg_sq_cmd.u32All = 0;
status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd,
&reg_gfx_index);
if (status) {
- pr_err("amdkfd: Failed to set wave control registers\n");
+ pr_err("Failed to set wave control registers\n");
return status;
}
- /* we do not control the VMID in DIQ,so reset it to a known value */
+ /* we do not control the VMID in DIQ, so reset it to a known value */
reg_sq_cmd.bits.vm_id = 0;
pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
@@ -667,7 +638,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj);
if (status != 0) {
- pr_err("amdkfd: Failed to allocate GART memory\n");
+ pr_err("Failed to allocate GART memory\n");
return status;
}
@@ -719,8 +690,8 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
packet_buff_uint,
ib_size);
- if (status != 0)
- pr_err("amdkfd: Failed to submit IB to DIQ\n");
+ if (status)
+ pr_err("Failed to submit IB to DIQ\n");
kfd_gtt_sa_free(dbgdev->dev, mem_obj);
@@ -735,21 +706,19 @@ static int dbgdev_wave_control_nodiq(struct kfd_dbgdev *dbgdev,
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct kfd_process_device *pdd;
- BUG_ON(!dbgdev || !dbgdev->dev || !wac_info);
-
reg_sq_cmd.u32All = 0;
/* taking the VMID for that process on the safe way using PDD */
pdd = kfd_get_process_device_data(dbgdev->dev, wac_info->process);
if (!pdd) {
- pr_err("amdkfd: Failed to get pdd for wave control no DIQ\n");
+ pr_err("Failed to get pdd for wave control no DIQ\n");
return -EFAULT;
}
status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd,
&reg_gfx_index);
if (status) {
- pr_err("amdkfd: Failed to set wave control registers\n");
+ pr_err("Failed to set wave control registers\n");
return status;
}
@@ -818,12 +787,13 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
/* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
* ATC_VMID15_PASID_MAPPING
- * to check which VMID the current process is mapped to. */
+ * to check which VMID the current process is mapped to.
+ */
for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
(dev->kgd, vmid)) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
+ if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid
(dev->kgd, vmid) == p->pasid) {
pr_debug("Killing wave fronts of vmid %d and pasid %d\n",
vmid, p->pasid);
@@ -833,7 +803,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
}
if (vmid > last_vmid_to_scan) {
- pr_err("amdkfd: didn't found vmid for pasid (%d)\n", p->pasid);
+ pr_err("Didn't find vmid for pasid %d\n", p->pasid);
return -EFAULT;
}
@@ -860,8 +830,6 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
enum DBGDEV_TYPE type)
{
- BUG_ON(!pdbgdev || !pdev);
-
pdbgdev->dev = pdev;
pdbgdev->kq = NULL;
pdbgdev->type = type;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
index 56d676396342..3da25f7bda6b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
@@ -44,8 +44,6 @@ struct mutex *kfd_get_dbgmgr_mutex(void)
static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr)
{
- BUG_ON(!pmgr);
-
kfree(pmgr->dbgdev);
pmgr->dbgdev = NULL;
@@ -55,7 +53,7 @@ static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr)
void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr)
{
- if (pmgr != NULL) {
+ if (pmgr) {
kfd_dbgmgr_uninitialize(pmgr);
kfree(pmgr);
}
@@ -66,12 +64,12 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ;
struct kfd_dbgmgr *new_buff;
- BUG_ON(pdev == NULL);
- BUG_ON(!pdev->init_complete);
+ if (WARN_ON(!pdev->init_complete))
+ return false;
new_buff = kfd_alloc_struct(new_buff);
if (!new_buff) {
- pr_err("amdkfd: Failed to allocate dbgmgr instance\n");
+ pr_err("Failed to allocate dbgmgr instance\n");
return false;
}
@@ -79,7 +77,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
new_buff->dev = pdev;
new_buff->dbgdev = kfd_alloc_struct(new_buff->dbgdev);
if (!new_buff->dbgdev) {
- pr_err("amdkfd: Failed to allocate dbgdev instance\n");
+ pr_err("Failed to allocate dbgdev instance\n");
kfree(new_buff);
return false;
}
@@ -96,8 +94,6 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
- BUG_ON(!p || !pmgr || !pmgr->dbgdev);
-
if (pmgr->pasid != 0) {
pr_debug("H/W debugger is already active using pasid %d\n",
pmgr->pasid);
@@ -118,8 +114,6 @@ long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
- BUG_ON(!p || !pmgr || !pmgr->dbgdev);
-
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != p->pasid) {
pr_debug("H/W debugger is not registered by calling pasid %d\n",
@@ -137,8 +131,6 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
struct dbg_wave_control_info *wac_info)
{
- BUG_ON(!pmgr || !pmgr->dbgdev || !wac_info);
-
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != wac_info->process->pasid) {
pr_debug("H/W debugger support was not registered for requester pasid %d\n",
@@ -152,9 +144,6 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
struct dbg_address_watch_info *adw_info)
{
- BUG_ON(!pmgr || !pmgr->dbgdev || !adw_info);
-
-
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != adw_info->process->pasid) {
pr_debug("H/W debugger support was not registered for requester pasid %d\n",
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
index 257a745ad0b5..a04a1fe1d0d9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
@@ -30,13 +30,11 @@
#pragma pack(push, 4)
enum HSA_DBG_WAVEOP {
- HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */
- HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */
- HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */
- HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter
- debug mode */
- HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take
- a trap */
+ HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */
+ HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */
+ HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */
+ HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter dbg mode */
+ HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take a trap */
HSA_DBG_NUM_WAVEOP = 5,
HSA_DBG_MAX_WAVEOP = 0xFFFFFFFF
};
@@ -81,15 +79,13 @@ struct HsaDbgWaveMsgAMDGen2 {
uint32_t UserData:8; /* user data */
uint32_t ShaderArray:1; /* Shader array */
uint32_t Priv:1; /* Privileged */
- uint32_t Reserved0:4; /* This field is reserved,
- should be 0 */
+ uint32_t Reserved0:4; /* Reserved, should be 0 */
uint32_t WaveId:4; /* wave id */
uint32_t SIMD:2; /* SIMD id */
uint32_t HSACU:4; /* Compute unit */
uint32_t ShaderEngine:2;/* Shader engine */
uint32_t MessageType:2; /* see HSA_DBG_WAVEMSG_TYPE */
- uint32_t Reserved1:4; /* This field is reserved,
- should be 0 */
+ uint32_t Reserved1:4; /* Reserved, should be 0 */
} ui32;
uint32_t Value;
};
@@ -121,20 +117,23 @@ struct HsaDbgWaveMessage {
* in the user mode instruction stream. The OS scheduler event is typically
* associated and signaled by an interrupt issued by the GPU, but other HSA
* system interrupt conditions from other HW (e.g. IOMMUv2) may be surfaced
- * by the KFD by this mechanism, too. */
+ * by the KFD by this mechanism, too.
+ */
/* these are the new definitions for events */
enum HSA_EVENTTYPE {
HSA_EVENTTYPE_SIGNAL = 0, /* user-mode generated GPU signal */
HSA_EVENTTYPE_NODECHANGE = 1, /* HSA node change (attach/detach) */
HSA_EVENTTYPE_DEVICESTATECHANGE = 2, /* HSA device state change
- (start/stop) */
+ * (start/stop)
+ */
HSA_EVENTTYPE_HW_EXCEPTION = 3, /* GPU shader exception event */
HSA_EVENTTYPE_SYSTEM_EVENT = 4, /* GPU SYSCALL with parameter info */
HSA_EVENTTYPE_DEBUG_EVENT = 5, /* GPU signal for debugging */
HSA_EVENTTYPE_PROFILE_EVENT = 6,/* GPU signal for profiling */
HSA_EVENTTYPE_QUEUE_EVENT = 7, /* GPU signal queue idle state
- (EOP pm4) */
+ * (EOP pm4)
+ */
/* ... */
HSA_EVENTTYPE_MAXID,
HSA_EVENTTYPE_TYPE_SIZE = 0xFFFFFFFF
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 3f95f7cb4019..61fff25b4ce7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -26,7 +26,7 @@
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
-#include "kfd_pm4_headers.h"
+#include "kfd_pm4_headers_vi.h"
#define MQD_SIZE_ALIGNED 768
@@ -98,11 +98,14 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did)
for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
if (supported_devices[i].did == did) {
- BUG_ON(supported_devices[i].device_info == NULL);
+ WARN_ON(!supported_devices[i].device_info);
return supported_devices[i].device_info;
}
}
+ dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
+ did);
+
return NULL;
}
@@ -114,8 +117,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
const struct kfd_device_info *device_info =
lookup_device_info(pdev->device);
- if (!device_info)
+ if (!device_info) {
+ dev_err(kfd_device, "kgd2kfd_probe failed\n");
return NULL;
+ }
kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
if (!kfd)
@@ -152,15 +157,16 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd)
}
if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
- dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n",
+ dev_err(kfd_device, "error required iommu flags ats %i, pri %i, pasid %i\n",
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
- (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0);
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
+ != 0);
return false;
}
pasid_limit = min_t(unsigned int,
- (unsigned int)1 << kfd->device_info->max_pasid_bits,
+ (unsigned int)(1 << kfd->device_info->max_pasid_bits),
iommu_info.max_pasids);
/*
* last pasid is used for kernel queues doorbells
@@ -211,9 +217,8 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
flags);
dev = kfd_device_by_pci_dev(pdev);
- BUG_ON(dev == NULL);
-
- kfd_signal_iommu_event(dev, pasid, address,
+ if (!WARN_ON(!dev))
+ kfd_signal_iommu_event(dev, pasid, address,
flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
return AMD_IOMMU_INV_PRI_RSP_INVALID;
@@ -234,9 +239,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
* calculate max size of runlist packet.
* There can be only 2 packets at once
*/
- size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_map_process) +
- max_num_of_queues_per_device *
- sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2;
+ size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) +
+ max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
+ + sizeof(struct pm4_mes_runlist)) * 2;
/* Add size of HIQ & DIQ */
size += KFD_KERNEL_QUEUE_SIZE * 2;
@@ -247,42 +252,37 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (kfd->kfd2kgd->init_gtt_mem_allocation(
kfd->kgd, size, &kfd->gtt_mem,
&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
- dev_err(kfd_device,
- "Could not allocate %d bytes for device (%x:%x)\n",
- size, kfd->pdev->vendor, kfd->pdev->device);
+ dev_err(kfd_device, "Could not allocate %d bytes\n", size);
goto out;
}
- dev_info(kfd_device,
- "Allocated %d bytes on gart for device(%x:%x)\n",
- size, kfd->pdev->vendor, kfd->pdev->device);
+ dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
/* Initialize GTT sa with 512 byte chunk size */
if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
- dev_err(kfd_device,
- "Error initializing gtt sub-allocator\n");
+ dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
goto kfd_gtt_sa_init_error;
}
- kfd_doorbell_init(kfd);
-
- if (kfd_topology_add_device(kfd) != 0) {
+ if (kfd_doorbell_init(kfd)) {
dev_err(kfd_device,
- "Error adding device (%x:%x) to topology\n",
- kfd->pdev->vendor, kfd->pdev->device);
+ "Error initializing doorbell aperture\n");
+ goto kfd_doorbell_error;
+ }
+
+ if (kfd_topology_add_device(kfd)) {
+ dev_err(kfd_device, "Error adding device to topology\n");
goto kfd_topology_add_device_error;
}
if (kfd_interrupt_init(kfd)) {
- dev_err(kfd_device,
- "Error initializing interrupts for device (%x:%x)\n",
- kfd->pdev->vendor, kfd->pdev->device);
+ dev_err(kfd_device, "Error initializing interrupts\n");
goto kfd_interrupt_error;
}
if (!device_iommu_pasid_init(kfd)) {
dev_err(kfd_device,
- "Error initializing iommuv2 for device (%x:%x)\n",
+ "Error initializing iommuv2 for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
goto device_iommu_pasid_error;
}
@@ -292,15 +292,13 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->dqm = device_queue_manager_init(kfd);
if (!kfd->dqm) {
- dev_err(kfd_device,
- "Error initializing queue manager for device (%x:%x)\n",
- kfd->pdev->vendor, kfd->pdev->device);
+ dev_err(kfd_device, "Error initializing queue manager\n");
goto device_queue_manager_error;
}
- if (kfd->dqm->ops.start(kfd->dqm) != 0) {
+ if (kfd->dqm->ops.start(kfd->dqm)) {
dev_err(kfd_device,
- "Error starting queuen manager for device (%x:%x)\n",
+ "Error starting queue manager for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
goto dqm_start_error;
}
@@ -308,10 +306,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->dbgmgr = NULL;
kfd->init_complete = true;
- dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor,
+ dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
kfd->pdev->device);
- pr_debug("kfd: Starting kfd with the following scheduling policy %d\n",
+ pr_debug("Starting kfd with the following scheduling policy %d\n",
sched_policy);
goto out;
@@ -325,11 +323,13 @@ device_iommu_pasid_error:
kfd_interrupt_error:
kfd_topology_remove_device(kfd);
kfd_topology_add_device_error:
+ kfd_doorbell_fini(kfd);
+kfd_doorbell_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
dev_err(kfd_device,
- "device (%x:%x) NOT added due to errors\n",
+ "device %x:%x NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device);
out:
return kfd->init_complete;
@@ -342,6 +342,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
amd_iommu_free_device(kfd->pdev);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
+ kfd_doorbell_fini(kfd);
kfd_gtt_sa_fini(kfd);
kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
}
@@ -351,8 +352,6 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
void kgd2kfd_suspend(struct kfd_dev *kfd)
{
- BUG_ON(kfd == NULL);
-
if (kfd->init_complete) {
kfd->dqm->ops.stop(kfd->dqm);
amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
@@ -366,14 +365,15 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
unsigned int pasid_limit;
int err;
- BUG_ON(kfd == NULL);
-
pasid_limit = kfd_get_pasid_limit();
if (kfd->init_complete) {
err = amd_iommu_init_device(kfd->pdev, pasid_limit);
- if (err < 0)
+ if (err < 0) {
+ dev_err(kfd_device, "failed to initialize iommu\n");
return -ENXIO;
+ }
+
amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
iommu_pasid_shutdown_callback);
amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
@@ -402,26 +402,27 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size)
{
- unsigned int num_of_bits;
+ unsigned int num_of_longs;
- BUG_ON(!kfd);
- BUG_ON(!kfd->gtt_mem);
- BUG_ON(buf_size < chunk_size);
- BUG_ON(buf_size == 0);
- BUG_ON(chunk_size == 0);
+ if (WARN_ON(buf_size < chunk_size))
+ return -EINVAL;
+ if (WARN_ON(buf_size == 0))
+ return -EINVAL;
+ if (WARN_ON(chunk_size == 0))
+ return -EINVAL;
kfd->gtt_sa_chunk_size = chunk_size;
kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
- num_of_bits = kfd->gtt_sa_num_of_chunks / BITS_PER_BYTE;
- BUG_ON(num_of_bits == 0);
+ num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
+ BITS_PER_LONG;
- kfd->gtt_sa_bitmap = kzalloc(num_of_bits, GFP_KERNEL);
+ kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
if (!kfd->gtt_sa_bitmap)
return -ENOMEM;
- pr_debug("kfd: gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
+ pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
mutex_init(&kfd->gtt_sa_lock);
@@ -455,8 +456,6 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
{
unsigned int found, start_search, cur_size;
- BUG_ON(!kfd);
-
if (size == 0)
return -EINVAL;
@@ -467,7 +466,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
if ((*mem_obj) == NULL)
return -ENOMEM;
- pr_debug("kfd: allocated mem_obj = %p for size = %d\n", *mem_obj, size);
+ pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
start_search = 0;
@@ -479,7 +478,7 @@ kfd_gtt_restart_search:
kfd->gtt_sa_num_of_chunks,
start_search);
- pr_debug("kfd: found = %d\n", found);
+ pr_debug("Found = %d\n", found);
/* If there wasn't any free chunk, bail out */
if (found == kfd->gtt_sa_num_of_chunks)
@@ -497,12 +496,12 @@ kfd_gtt_restart_search:
found,
kfd->gtt_sa_chunk_size);
- pr_debug("kfd: gpu_addr = %p, cpu_addr = %p\n",
+ pr_debug("gpu_addr = %p, cpu_addr = %p\n",
(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
/* If we need only one chunk, mark it as allocated and get out */
if (size <= kfd->gtt_sa_chunk_size) {
- pr_debug("kfd: single bit\n");
+ pr_debug("Single bit\n");
set_bit(found, kfd->gtt_sa_bitmap);
goto kfd_gtt_out;
}
@@ -537,7 +536,7 @@ kfd_gtt_restart_search:
} while (cur_size > 0);
- pr_debug("kfd: range_start = %d, range_end = %d\n",
+ pr_debug("range_start = %d, range_end = %d\n",
(*mem_obj)->range_start, (*mem_obj)->range_end);
/* Mark the chunks as allocated */
@@ -551,7 +550,7 @@ kfd_gtt_out:
return 0;
kfd_gtt_no_free_chunk:
- pr_debug("kfd: allocation failed with mem_obj = %p\n", mem_obj);
+ pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
mutex_unlock(&kfd->gtt_sa_lock);
kfree(mem_obj);
return -ENOMEM;
@@ -561,13 +560,11 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
{
unsigned int bit;
- BUG_ON(!kfd);
-
/* Act like kfree when trying to free a NULL object */
if (!mem_obj)
return 0;
- pr_debug("kfd: free mem_obj = %p, range_start = %d, range_end = %d\n",
+ pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
mem_obj, mem_obj->range_start, mem_obj->range_end);
mutex_lock(&kfd->gtt_sa_lock);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 42de22bbe14c..53a66e821624 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -79,20 +79,17 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
unsigned int get_queues_num(struct device_queue_manager *dqm)
{
- BUG_ON(!dqm || !dqm->dev);
return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
KGD_MAX_QUEUES);
}
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
{
- BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.num_queue_per_pipe;
}
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
{
- BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.num_pipe_per_mec;
}
@@ -121,7 +118,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
/* Kaveri kfd vmid's starts from vmid 8 */
allocated_vmid = bit + KFD_VMID_START_OFFSET;
- pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
+ pr_debug("vmid allocation %d\n", allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
@@ -152,42 +149,38 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
{
int retval;
- BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
-
- pr_debug("kfd: In func %s\n", __func__);
print_queue(q);
mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
- pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
- return -EPERM;
+ retval = -EPERM;
+ goto out_unlock;
}
if (list_empty(&qpd->queues_list)) {
retval = allocate_vmid(dqm, qpd, q);
- if (retval != 0) {
- mutex_unlock(&dqm->lock);
- return retval;
- }
+ if (retval)
+ goto out_unlock;
}
*allocated_vmid = qpd->vmid;
q->properties.vmid = qpd->vmid;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
retval = create_compute_queue_nocpsch(dqm, q, qpd);
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
retval = create_sdma_queue_nocpsch(dqm, q, qpd);
+ else
+ retval = -EINVAL;
- if (retval != 0) {
+ if (retval) {
if (list_empty(&qpd->queues_list)) {
deallocate_vmid(dqm, qpd, q);
*allocated_vmid = 0;
}
- mutex_unlock(&dqm->lock);
- return retval;
+ goto out_unlock;
}
list_add(&q->list, &qpd->queues_list);
@@ -205,8 +198,9 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
+out_unlock:
mutex_unlock(&dqm->lock);
- return 0;
+ return retval;
}
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
@@ -216,7 +210,8 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
set = false;
- for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_per_mec(dqm);
+ for (pipe = dqm->next_pipe_to_allocate, i = 0;
+ i < get_pipes_per_mec(dqm);
pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
if (!is_pipe_enabled(dqm, 0, pipe))
@@ -239,8 +234,7 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
if (!set)
return -EBUSY;
- pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
- __func__, q->pipe, q->queue);
+ pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
/* horizontal hqd allocation */
dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
@@ -260,36 +254,38 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
int retval;
struct mqd_manager *mqd;
- BUG_ON(!dqm || !q || !qpd);
-
mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (mqd == NULL)
+ if (!mqd)
return -ENOMEM;
retval = allocate_hqd(dqm, q);
- if (retval != 0)
+ if (retval)
return retval;
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
- if (retval != 0) {
- deallocate_hqd(dqm, q);
- return retval;
- }
+ if (retval)
+ goto out_deallocate_hqd;
- pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
- q->pipe,
- q->queue);
+ pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
+ q->pipe, q->queue);
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
- q->queue, (uint32_t __user *) q->properties.write_ptr);
- if (retval != 0) {
- deallocate_hqd(dqm, q);
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
- return retval;
- }
+ dqm->dev->kfd2kgd->set_scratch_backing_va(
+ dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
+
+ retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
+ q->process->mm);
+ if (retval)
+ goto out_uninit_mqd;
return 0;
+
+out_uninit_mqd:
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+out_deallocate_hqd:
+ deallocate_hqd(dqm, q);
+
+ return retval;
}
static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
@@ -299,12 +295,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
int retval;
struct mqd_manager *mqd;
- BUG_ON(!dqm || !q || !q->mqd || !qpd);
-
retval = 0;
- pr_debug("kfd: In Func %s\n", __func__);
-
mutex_lock(&dqm->lock);
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
@@ -323,7 +315,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
dqm->sdma_queue_count--;
deallocate_sdma_queue(dqm, q->sdma_id);
} else {
- pr_debug("q->properties.type is invalid (%d)\n",
+ pr_debug("q->properties.type %d is invalid\n",
q->properties.type);
retval = -EINVAL;
goto out;
@@ -334,7 +326,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
q->pipe, q->queue);
- if (retval != 0)
+ if (retval)
goto out;
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
@@ -364,14 +356,12 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
struct mqd_manager *mqd;
bool prev_active = false;
- BUG_ON(!dqm || !q || !q->mqd);
-
mutex_lock(&dqm->lock);
mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (mqd == NULL) {
- mutex_unlock(&dqm->lock);
- return -ENOMEM;
+ if (!mqd) {
+ retval = -ENOMEM;
+ goto out_unlock;
}
if (q->properties.is_active)
@@ -385,12 +375,13 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
if ((q->properties.is_active) && (!prev_active))
dqm->queue_count++;
- else if ((!q->properties.is_active) && (prev_active))
+ else if (!q->properties.is_active && prev_active)
dqm->queue_count--;
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
retval = execute_queues_cpsch(dqm, false);
+out_unlock:
mutex_unlock(&dqm->lock);
return retval;
}
@@ -400,15 +391,16 @@ static struct mqd_manager *get_mqd_manager_nocpsch(
{
struct mqd_manager *mqd;
- BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
- pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
+ pr_debug("mqd type %d\n", type);
mqd = dqm->mqds[type];
if (!mqd) {
mqd = mqd_manager_init(type, dqm->dev);
- if (mqd == NULL)
- pr_err("kfd: mqd manager is NULL");
+ if (!mqd)
+ pr_err("mqd manager is NULL");
dqm->mqds[type] = mqd;
}
@@ -421,11 +413,7 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
struct device_process_node *n;
int retval;
- BUG_ON(!dqm || !qpd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
- n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n)
return -ENOMEM;
@@ -449,10 +437,6 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
int retval;
struct device_process_node *cur, *next;
- BUG_ON(!dqm || !qpd);
-
- pr_debug("In func %s\n", __func__);
-
pr_debug("qpd->queues_list is %s\n",
list_empty(&qpd->queues_list) ? "empty" : "not empty");
@@ -493,51 +477,39 @@ static void init_interrupts(struct device_queue_manager *dqm)
{
unsigned int i;
- BUG_ON(dqm == NULL);
-
for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
if (is_pipe_enabled(dqm, 0, i))
dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
}
-static int init_scheduler(struct device_queue_manager *dqm)
-{
- int retval = 0;
-
- BUG_ON(!dqm);
-
- pr_debug("kfd: In %s\n", __func__);
-
- return retval;
-}
-
static int initialize_nocpsch(struct device_queue_manager *dqm)
{
- int i;
+ int pipe, queue;
- BUG_ON(!dqm);
+ pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
- pr_debug("kfd: In func %s num of pipes: %d\n",
- __func__, get_pipes_per_mec(dqm));
+ dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
+ sizeof(unsigned int), GFP_KERNEL);
+ if (!dqm->allocated_queues)
+ return -ENOMEM;
mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
dqm->sdma_queue_count = 0;
- dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
- sizeof(unsigned int), GFP_KERNEL);
- if (!dqm->allocated_queues) {
- mutex_destroy(&dqm->lock);
- return -ENOMEM;
- }
- for (i = 0; i < get_pipes_per_mec(dqm); i++)
- dqm->allocated_queues[i] = (1 << get_queues_per_pipe(dqm)) - 1;
+ for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
+ int pipe_offset = pipe * get_queues_per_pipe(dqm);
+
+ for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
+ if (test_bit(pipe_offset + queue,
+ dqm->dev->shared_resources.queue_bitmap))
+ dqm->allocated_queues[pipe] |= 1 << queue;
+ }
dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
- init_scheduler(dqm);
return 0;
}
@@ -545,9 +517,7 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm)
{
int i;
- BUG_ON(!dqm);
-
- BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
+ WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
@@ -604,33 +574,34 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
return -ENOMEM;
retval = allocate_sdma_queue(dqm, &q->sdma_id);
- if (retval != 0)
+ if (retval)
return retval;
q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
- pr_debug("kfd: sdma id is: %d\n", q->sdma_id);
- pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
- pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
+ pr_debug("SDMA id is: %d\n", q->sdma_id);
+ pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
+ pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
- if (retval != 0) {
- deallocate_sdma_queue(dqm, q->sdma_id);
- return retval;
- }
+ if (retval)
+ goto out_deallocate_sdma_queue;
- retval = mqd->load_mqd(mqd, q->mqd, 0,
- 0, NULL);
- if (retval != 0) {
- deallocate_sdma_queue(dqm, q->sdma_id);
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
- return retval;
- }
+ retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
+ if (retval)
+ goto out_uninit_mqd;
return 0;
+
+out_uninit_mqd:
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+out_deallocate_sdma_queue:
+ deallocate_sdma_queue(dqm, q->sdma_id);
+
+ return retval;
}
/*
@@ -642,10 +613,6 @@ static int set_sched_resources(struct device_queue_manager *dqm)
int i, mec;
struct scheduling_resources res;
- BUG_ON(!dqm);
-
- pr_debug("kfd: In func %s\n", __func__);
-
res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
res.vmid_mask <<= KFD_VMID_START_OFFSET;
@@ -663,7 +630,8 @@ static int set_sched_resources(struct device_queue_manager *dqm)
/* This situation may be hit in the future if a new HW
* generation exposes more than 64 queues. If so, the
- * definition of res.queue_mask needs updating */
+ * definition of res.queue_mask needs updating
+ */
if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
pr_err("Invalid queue enabled by amdgpu: %d\n", i);
break;
@@ -674,9 +642,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
res.gws_mask = res.oac_mask = res.gds_heap_base =
res.gds_heap_size = 0;
- pr_debug("kfd: scheduling resources:\n"
- " vmid mask: 0x%8X\n"
- " queue mask: 0x%8llX\n",
+ pr_debug("Scheduling resources:\n"
+ "vmid mask: 0x%8X\n"
+ "queue mask: 0x%8llX\n",
res.vmid_mask, res.queue_mask);
return pm_send_set_resources(&dqm->packets, &res);
@@ -686,10 +654,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
{
int retval;
- BUG_ON(!dqm);
-
- pr_debug("kfd: In func %s num of pipes: %d\n",
- __func__, get_pipes_per_mec(dqm));
+ pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues);
@@ -697,13 +662,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->sdma_queue_count = 0;
dqm->active_runlist = false;
retval = dqm->ops_asic_specific.initialize(dqm);
- if (retval != 0)
- goto fail_init_pipelines;
-
- return 0;
+ if (retval)
+ mutex_destroy(&dqm->lock);
-fail_init_pipelines:
- mutex_destroy(&dqm->lock);
return retval;
}
@@ -712,25 +673,23 @@ static int start_cpsch(struct device_queue_manager *dqm)
struct device_process_node *node;
int retval;
- BUG_ON(!dqm);
-
retval = 0;
retval = pm_init(&dqm->packets, dqm);
- if (retval != 0)
+ if (retval)
goto fail_packet_manager_init;
retval = set_sched_resources(dqm);
- if (retval != 0)
+ if (retval)
goto fail_set_sched_resources;
- pr_debug("kfd: allocating fence memory\n");
+ pr_debug("Allocating fence memory\n");
/* allocate fence memory on the gart */
retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
&dqm->fence_mem);
- if (retval != 0)
+ if (retval)
goto fail_allocate_vidmem;
dqm->fence_addr = dqm->fence_mem->cpu_ptr;
@@ -758,8 +717,6 @@ static int stop_cpsch(struct device_queue_manager *dqm)
struct device_process_node *node;
struct kfd_process_device *pdd;
- BUG_ON(!dqm);
-
destroy_queues_cpsch(dqm, true, true);
list_for_each_entry(node, &dqm->queues, list) {
@@ -776,13 +733,9 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
- BUG_ON(!dqm || !kq || !qpd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
- pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
+ pr_warn("Can't create new kernel queue because %d queues were already created\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock);
return -EPERM;
@@ -809,10 +762,6 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
- BUG_ON(!dqm || !kq);
-
- pr_debug("kfd: In %s\n", __func__);
-
mutex_lock(&dqm->lock);
/* here we actually preempt the DIQ */
destroy_queues_cpsch(dqm, true, false);
@@ -844,8 +793,6 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
int retval;
struct mqd_manager *mqd;
- BUG_ON(!dqm || !q || !qpd);
-
retval = 0;
if (allocate_vmid)
@@ -854,7 +801,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
- pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
retval = -EPERM;
goto out;
@@ -866,15 +813,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (mqd == NULL) {
- mutex_unlock(&dqm->lock);
- return -ENOMEM;
+ if (!mqd) {
+ retval = -ENOMEM;
+ goto out;
}
dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
- if (retval != 0)
+ if (retval)
goto out;
list_add(&q->list, &qpd->queues_list);
@@ -884,7 +831,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- dqm->sdma_queue_count++;
+ dqm->sdma_queue_count++;
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
@@ -903,12 +850,11 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
unsigned int fence_value,
unsigned long timeout)
{
- BUG_ON(!fence_addr);
timeout += jiffies;
while (*fence_addr != fence_value) {
if (time_after(jiffies, timeout)) {
- pr_err("kfd: qcm fence wait loop timeout expired\n");
+ pr_err("qcm fence wait loop timeout expired\n");
return -ETIME;
}
schedule();
@@ -932,8 +878,6 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_preempt_type_filter preempt_type;
struct kfd_process_device *pdd;
- BUG_ON(!dqm);
-
retval = 0;
if (lock)
@@ -941,7 +885,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->active_runlist)
goto out;
- pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
+ pr_debug("Before destroying queues, sdma queue count is : %u\n",
dqm->sdma_queue_count);
if (dqm->sdma_queue_count > 0) {
@@ -955,7 +899,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
preempt_type, 0, false, 0);
- if (retval != 0)
+ if (retval)
goto out;
*dqm->fence_addr = KFD_FENCE_INIT;
@@ -964,7 +908,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
/* should be timed out */
retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
- if (retval != 0) {
+ if (retval) {
pdd = kfd_get_process_device_data(dqm->dev,
kfd_get_process(current));
pdd->reset_wavefronts = true;
@@ -983,14 +927,12 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
{
int retval;
- BUG_ON(!dqm);
-
if (lock)
mutex_lock(&dqm->lock);
retval = destroy_queues_cpsch(dqm, false, false);
- if (retval != 0) {
- pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
+ if (retval) {
+ pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
goto out;
}
@@ -1005,8 +947,8 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
}
retval = pm_send_runlist(&dqm->packets, &dqm->queues);
- if (retval != 0) {
- pr_err("kfd: failed to execute runlist");
+ if (retval) {
+ pr_err("failed to execute runlist");
goto out;
}
dqm->active_runlist = true;
@@ -1025,8 +967,6 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
struct mqd_manager *mqd;
bool preempt_all_queues;
- BUG_ON(!dqm || !qpd || !q);
-
preempt_all_queues = false;
retval = 0;
@@ -1098,8 +1038,6 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
{
bool retval;
- pr_debug("kfd: In func %s\n", __func__);
-
mutex_lock(&dqm->lock);
if (alternate_aperture_size == 0) {
@@ -1120,14 +1058,11 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
uint64_t base = (uintptr_t)alternate_aperture_base;
uint64_t limit = base + alternate_aperture_size - 1;
- if (limit <= base)
- goto out;
-
- if ((base & APE1_FIXED_BITS_MASK) != 0)
- goto out;
-
- if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
+ if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
+ (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
+ retval = false;
goto out;
+ }
qpd->sh_mem_ape1_base = base >> 16;
qpd->sh_mem_ape1_limit = limit >> 16;
@@ -1144,27 +1079,22 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
program_sh_mem_settings(dqm, qpd);
- pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
+ pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
qpd->sh_mem_config, qpd->sh_mem_ape1_base,
qpd->sh_mem_ape1_limit);
- mutex_unlock(&dqm->lock);
- return retval;
-
out:
mutex_unlock(&dqm->lock);
- return false;
+ return retval;
}
struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
{
struct device_queue_manager *dqm;
- BUG_ON(!dev);
+ pr_debug("Loading device queue manager\n");
- pr_debug("kfd: loading device queue manager\n");
-
- dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
+ dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
if (!dqm)
return NULL;
@@ -1202,8 +1132,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
break;
default:
- BUG();
- break;
+ pr_err("Invalid scheduling policy %d\n", sched_policy);
+ goto out_free;
}
switch (dev->device_info->asic_family) {
@@ -1216,18 +1146,16 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
break;
}
- if (dqm->ops.initialize(dqm) != 0) {
- kfree(dqm);
- return NULL;
- }
+ if (!dqm->ops.initialize(dqm))
+ return dqm;
- return dqm;
+out_free:
+ kfree(dqm);
+ return NULL;
}
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
- BUG_ON(!dqm);
-
dqm->ops.uninitialize(dqm);
kfree(dqm);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 48dc0561b402..72c3cbabc0a7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -24,6 +24,7 @@
#include "kfd_device_queue_manager.h"
#include "cik_regs.h"
#include "oss/oss_2_4_sh_mask.h"
+#include "gca/gfx_7_2_sh_mask.h"
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
@@ -65,7 +66,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
* for LDS/Scratch and GPUVM.
*/
- BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+ WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
top_address_nybble == 0);
return PRIVATE_BASE(top_address_nybble << 12) |
@@ -104,8 +105,6 @@ static int register_process_cik(struct device_queue_manager *dqm,
struct kfd_process_device *pdd;
unsigned int temp;
- BUG_ON(!dqm || !qpd);
-
pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */
@@ -125,9 +124,10 @@ static int register_process_cik(struct device_queue_manager *dqm,
} else {
temp = get_sh_mem_bases_nybble_64(pdd);
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+ qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__PRIVATE_ATC__SHIFT;
}
- pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 7e9cae9d349b..40e9ddd096cd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -67,7 +67,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
* for LDS/Scratch and GPUVM.
*/
- BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+ WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
top_address_nybble == 0);
return top_address_nybble << 12 |
@@ -110,8 +110,6 @@ static int register_process_vi(struct device_queue_manager *dqm,
struct kfd_process_device *pdd;
unsigned int temp;
- BUG_ON(!dqm || !qpd);
-
pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */
@@ -137,9 +135,11 @@ static int register_process_vi(struct device_queue_manager *dqm,
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 <<
SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+ qpd->sh_mem_config |= 1 <<
+ SH_MEM_CONFIG__PRIVATE_ATC__SHIFT;
}
- pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index 453c5d66e5c3..acf4d2a977ad 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -59,7 +59,7 @@ static inline size_t doorbell_process_allocation(void)
}
/* Doorbell calculations for device init. */
-void kfd_doorbell_init(struct kfd_dev *kfd)
+int kfd_doorbell_init(struct kfd_dev *kfd)
{
size_t doorbell_start_offset;
size_t doorbell_aperture_size;
@@ -95,26 +95,35 @@ void kfd_doorbell_init(struct kfd_dev *kfd)
kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
doorbell_process_allocation());
- BUG_ON(!kfd->doorbell_kernel_ptr);
+ if (!kfd->doorbell_kernel_ptr)
+ return -ENOMEM;
- pr_debug("kfd: doorbell initialization:\n");
- pr_debug("kfd: doorbell base == 0x%08lX\n",
+ pr_debug("Doorbell initialization:\n");
+ pr_debug("doorbell base == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base);
- pr_debug("kfd: doorbell_id_offset == 0x%08lX\n",
+ pr_debug("doorbell_id_offset == 0x%08lX\n",
kfd->doorbell_id_offset);
- pr_debug("kfd: doorbell_process_limit == 0x%08lX\n",
+ pr_debug("doorbell_process_limit == 0x%08lX\n",
doorbell_process_limit);
- pr_debug("kfd: doorbell_kernel_offset == 0x%08lX\n",
+ pr_debug("doorbell_kernel_offset == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base);
- pr_debug("kfd: doorbell aperture size == 0x%08lX\n",
+ pr_debug("doorbell aperture size == 0x%08lX\n",
kfd->shared_resources.doorbell_aperture_size);
- pr_debug("kfd: doorbell kernel address == 0x%08lX\n",
+ pr_debug("doorbell kernel address == 0x%08lX\n",
(uintptr_t)kfd->doorbell_kernel_ptr);
+
+ return 0;
+}
+
+void kfd_doorbell_fini(struct kfd_dev *kfd)
+{
+ if (kfd->doorbell_kernel_ptr)
+ iounmap(kfd->doorbell_kernel_ptr);
}
int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
@@ -131,7 +140,7 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
/* Find kfd device according to gpu id */
dev = kfd_device_by_id(vma->vm_pgoff);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
/* Calculate physical address of doorbell */
@@ -142,12 +151,11 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("kfd: mapping doorbell page in %s\n"
+ pr_debug("Mapping doorbell page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
" size == 0x%04lX\n",
- __func__,
(unsigned long long) vma->vm_start, address, vma->vm_flags,
doorbell_process_allocation());
@@ -166,8 +174,6 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
{
u32 inx;
- BUG_ON(!kfd || !doorbell_off);
-
mutex_lock(&kfd->doorbell_mutex);
inx = find_first_zero_bit(kfd->doorbell_available_index,
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
@@ -185,7 +191,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
*doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() /
sizeof(u32)) + inx;
- pr_debug("kfd: get kernel queue doorbell\n"
+ pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
" kernel address == 0x%08lX\n",
*doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
@@ -197,8 +203,6 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
{
unsigned int inx;
- BUG_ON(!kfd || !db_addr);
-
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
mutex_lock(&kfd->doorbell_mutex);
@@ -210,7 +214,7 @@ inline void write_kernel_doorbell(u32 __iomem *db, u32 value)
{
if (db) {
writel(value, db);
- pr_debug("writing %d to doorbell address 0x%p\n", value, db);
+ pr_debug("Writing %d to doorbell address 0x%p\n", value, db);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index d1ce83d73a87..5979158c3f7b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -110,7 +110,7 @@ static bool allocate_free_slot(struct kfd_process *process,
*out_page = page;
*out_slot_index = slot;
- pr_debug("allocated event signal slot in page %p, slot %d\n",
+ pr_debug("Allocated event signal slot in page %p, slot %d\n",
page, slot);
return true;
@@ -155,9 +155,9 @@ static bool allocate_signal_page(struct file *devkfd, struct kfd_process *p)
struct signal_page,
event_pages)->page_index + 1;
- pr_debug("allocated new event signal page at %p, for process %p\n",
+ pr_debug("Allocated new event signal page at %p, for process %p\n",
page, p);
- pr_debug("page index is %d\n", page->page_index);
+ pr_debug("Page index is %d\n", page->page_index);
list_add(&page->event_pages, &p->signal_event_pages);
@@ -194,7 +194,8 @@ static void release_event_notification_slot(struct signal_page *page,
page->free_slots++;
/* We don't free signal pages, they are retained by the process
- * and reused until it exits. */
+ * and reused until it exits.
+ */
}
static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
@@ -246,7 +247,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p)
for (id = p->next_nonsignal_event_id;
id < KFD_LAST_NONSIGNAL_EVENT_ID &&
- lookup_event_by_id(p, id) != NULL;
+ lookup_event_by_id(p, id);
id++)
;
@@ -265,7 +266,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p)
for (id = KFD_FIRST_NONSIGNAL_EVENT_ID;
id < KFD_LAST_NONSIGNAL_EVENT_ID &&
- lookup_event_by_id(p, id) != NULL;
+ lookup_event_by_id(p, id);
id++)
;
@@ -291,13 +292,13 @@ static int create_signal_event(struct file *devkfd,
struct kfd_event *ev)
{
if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
- pr_warn("amdkfd: Signal event wasn't created because limit was reached\n");
+ pr_warn("Signal event wasn't created because limit was reached\n");
return -ENOMEM;
}
if (!allocate_event_notification_slot(devkfd, p, &ev->signal_page,
&ev->signal_slot_index)) {
- pr_warn("amdkfd: Signal event wasn't created because out of kernel memory\n");
+ pr_warn("Signal event wasn't created because out of kernel memory\n");
return -ENOMEM;
}
@@ -309,11 +310,7 @@ static int create_signal_event(struct file *devkfd,
ev->event_id = make_signal_event_id(ev->signal_page,
ev->signal_slot_index);
- pr_debug("signal event number %zu created with id %d, address %p\n",
- p->signal_event_count, ev->event_id,
- ev->user_signal_address);
-
- pr_debug("signal event number %zu created with id %d, address %p\n",
+ pr_debug("Signal event number %zu created with id %d, address %p\n",
p->signal_event_count, ev->event_id,
ev->user_signal_address);
@@ -345,7 +342,7 @@ void kfd_event_init_process(struct kfd_process *p)
static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
{
- if (ev->signal_page != NULL) {
+ if (ev->signal_page) {
release_event_notification_slot(ev->signal_page,
ev->signal_slot_index);
p->signal_event_count--;
@@ -584,7 +581,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
* search faster.
*/
struct signal_page *page;
- unsigned i;
+ unsigned int i;
list_for_each_entry(page, &p->signal_event_pages, event_pages)
for (i = 0; i < SLOTS_PER_PAGE; i++)
@@ -816,7 +813,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
/* check required size is logical */
if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
get_order(vma->vm_end - vma->vm_start)) {
- pr_err("amdkfd: event page mmap requested illegal size\n");
+ pr_err("Event page mmap requested illegal size\n");
return -EINVAL;
}
@@ -825,7 +822,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
page = lookup_signal_page_by_index(p, page_index);
if (!page) {
/* Probably KFD bug, but mmap is user-accessible. */
- pr_debug("signal page could not be found for page_index %u\n",
+ pr_debug("Signal page could not be found for page_index %u\n",
page_index);
return -EINVAL;
}
@@ -836,7 +833,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
| VM_DONTDUMP | VM_PFNMAP;
- pr_debug("mapping signal page\n");
+ pr_debug("Mapping signal page\n");
pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
pr_debug(" pfn == 0x%016lX\n", pfn);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 2b655103ba79..c59384bbbc5f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -304,7 +304,7 @@ int kfd_init_apertures(struct kfd_process *process)
id < NUM_OF_SUPPORTED_GPUS) {
pdd = kfd_create_process_device_data(dev, process);
- if (pdd == NULL) {
+ if (!pdd) {
pr_err("Failed to create process device data\n");
return -1;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index 7f134aa9bfd3..70b3a99cffc2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -179,7 +179,7 @@ static void interrupt_wq(struct work_struct *work)
bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry)
{
/* integer and bitwise OR so there is no boolean short-circuiting */
- unsigned wanted = 0;
+ unsigned int wanted = 0;
wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
ih_ring_entry);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index d135cd002a95..681b639f5133 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -41,11 +41,11 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
int retval;
union PM4_MES_TYPE_3_HEADER nop;
- BUG_ON(!kq || !dev);
- BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
+ if (WARN_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ))
+ return false;
- pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
- __func__, KFD_QUEUE_TYPE_HIQ, queue_size);
+ pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
+ queue_size);
memset(&prop, 0, sizeof(prop));
memset(&nop, 0, sizeof(nop));
@@ -63,23 +63,23 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
KFD_MQD_TYPE_HIQ);
break;
default:
- BUG();
- break;
+ pr_err("Invalid queue type %d\n", type);
+ return false;
}
- if (kq->mqd == NULL)
+ if (!kq->mqd)
return false;
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
- if (prop.doorbell_ptr == NULL) {
- pr_err("amdkfd: error init doorbell");
+ if (!prop.doorbell_ptr) {
+ pr_err("Failed to initialize doorbell");
goto err_get_kernel_doorbell;
}
retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
if (retval != 0) {
- pr_err("amdkfd: error init pq queues size (%d)\n", queue_size);
+ pr_err("Failed to init pq queues size %d\n", queue_size);
goto err_pq_allocate_vidmem;
}
@@ -87,7 +87,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->pq_gpu_addr = kq->pq->gpu_addr;
retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size);
- if (retval == false)
+ if (!retval)
goto err_eop_allocate_vidmem;
retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
@@ -139,11 +139,12 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
/* assign HIQ to HQD */
if (type == KFD_QUEUE_TYPE_HIQ) {
- pr_debug("assigning hiq to hqd\n");
+ pr_debug("Assigning hiq to hqd\n");
kq->queue->pipe = KFD_CIK_HIQ_PIPE;
kq->queue->queue = KFD_CIK_HIQ_QUEUE;
kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
- kq->queue->queue, NULL);
+ kq->queue->queue, &kq->queue->properties,
+ NULL);
} else {
/* allocate fence for DIQ */
@@ -180,8 +181,6 @@ err_get_kernel_doorbell:
static void uninitialize(struct kernel_queue *kq)
{
- BUG_ON(!kq);
-
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
kq->mqd->destroy_mqd(kq->mqd,
NULL,
@@ -211,8 +210,6 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
uint32_t wptr, rptr;
unsigned int *queue_address;
- BUG_ON(!kq || !buffer_ptr);
-
rptr = *kq->rptr_kernel;
wptr = *kq->wptr_kernel;
queue_address = (unsigned int *)kq->pq_kernel_addr;
@@ -252,11 +249,7 @@ static void submit_packet(struct kernel_queue *kq)
{
#ifdef DEBUG
int i;
-#endif
-
- BUG_ON(!kq);
-#ifdef DEBUG
for (i = *kq->wptr_kernel; i < kq->pending_wptr; i++) {
pr_debug("0x%2X ", kq->pq_kernel_addr[i]);
if (i % 15 == 0)
@@ -272,7 +265,6 @@ static void submit_packet(struct kernel_queue *kq)
static void rollback_packet(struct kernel_queue *kq)
{
- BUG_ON(!kq);
kq->pending_wptr = *kq->queue->properties.write_ptr;
}
@@ -281,9 +273,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
{
struct kernel_queue *kq;
- BUG_ON(!dev);
-
- kq = kzalloc(sizeof(struct kernel_queue), GFP_KERNEL);
+ kq = kzalloc(sizeof(*kq), GFP_KERNEL);
if (!kq)
return NULL;
@@ -304,7 +294,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
}
if (!kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) {
- pr_err("amdkfd: failed to init kernel queue\n");
+ pr_err("Failed to init kernel queue\n");
kfree(kq);
return NULL;
}
@@ -313,32 +303,37 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
void kernel_queue_uninit(struct kernel_queue *kq)
{
- BUG_ON(!kq);
-
kq->ops.uninitialize(kq);
kfree(kq);
}
+/* FIXME: Can this test be removed? */
static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
{
struct kernel_queue *kq;
uint32_t *buffer, i;
int retval;
- BUG_ON(!dev);
-
- pr_err("amdkfd: starting kernel queue test\n");
+ pr_err("Starting kernel queue test\n");
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
- BUG_ON(!kq);
+ if (unlikely(!kq)) {
+ pr_err(" Failed to initialize HIQ\n");
+ pr_err("Kernel queue test failed\n");
+ return;
+ }
retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
- BUG_ON(retval != 0);
+ if (unlikely(retval != 0)) {
+ pr_err(" Failed to acquire packet buffer\n");
+ pr_err("Kernel queue test failed\n");
+ return;
+ }
for (i = 0; i < 5; i++)
buffer[i] = kq->nop_packet;
kq->ops.submit_packet(kq);
- pr_err("amdkfd: ending kernel queue test\n");
+ pr_err("Ending kernel queue test\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 850a5623661f..0d73bea22c45 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(send_sigterm,
static int amdkfd_init_completed;
-int kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f)
+int kgd2kfd_init(unsigned int interface_version,
+ const struct kgd2kfd_calls **g2f)
{
if (!amdkfd_init_completed)
return -EPROBE_DEFER;
@@ -90,7 +91,7 @@ static int __init kfd_module_init(void)
/* Verify module parameters */
if ((sched_policy < KFD_SCHED_POLICY_HWS) ||
(sched_policy > KFD_SCHED_POLICY_NO_HWS)) {
- pr_err("kfd: sched_policy has invalid value\n");
+ pr_err("sched_policy has invalid value\n");
return -1;
}
@@ -98,13 +99,13 @@ static int __init kfd_module_init(void)
if ((max_num_of_queues_per_device < 1) ||
(max_num_of_queues_per_device >
KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
- pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
+ pr_err("max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
return -1;
}
err = kfd_pasid_init();
if (err < 0)
- goto err_pasid;
+ return err;
err = kfd_chardev_init();
if (err < 0)
@@ -126,7 +127,6 @@ err_topology:
kfd_chardev_exit();
err_ioctl:
kfd_pasid_exit();
-err_pasid:
return err;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 213a71e0b6c7..1f3a6ba7eed2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -67,7 +67,8 @@ struct mqd_manager {
int (*load_mqd)(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t __user *wptr);
+ struct queue_properties *p,
+ struct mm_struct *mms);
int (*update_mqd)(struct mqd_manager *mm, void *mqd,
struct queue_properties *q);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 6acc4313363e..44ffd23348fc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -44,10 +44,6 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
struct cik_mqd *m;
int retval;
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
mqd_mem_obj);
@@ -101,7 +97,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_iq_rptr = AQL_ENABLE;
*mqd = m;
- if (gart_addr != NULL)
+ if (gart_addr)
*gart_addr = addr;
retval = mm->update_mqd(mm, m, q);
@@ -115,8 +111,6 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
int retval;
struct cik_sdma_rlc_registers *m;
- BUG_ON(!mm || !mqd || !mqd_mem_obj);
-
retval = kfd_gtt_sa_allocate(mm->dev,
sizeof(struct cik_sdma_rlc_registers),
mqd_mem_obj);
@@ -129,7 +123,7 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
memset(m, 0, sizeof(struct cik_sdma_rlc_registers));
*mqd = m;
- if (gart_addr != NULL)
+ if (gart_addr)
*gart_addr = (*mqd_mem_obj)->gpu_addr;
retval = mm->update_mqd(mm, m, q);
@@ -140,27 +134,31 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
static void uninit_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- BUG_ON(!mm || !mqd);
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- BUG_ON(!mm || !mqd);
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr)
+ uint32_t queue_id, struct queue_properties *p,
+ struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_load
- (mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+ /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
+ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
+ uint32_t wptr_mask = (uint32_t)((p->queue_size / sizeof(uint32_t)) - 1);
+
+ return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+ (uint32_t __user *)p->write_ptr,
+ wptr_shift, wptr_mask, mms);
}
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t __user *wptr)
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
{
return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
}
@@ -170,10 +168,6 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
{
struct cik_mqd *m;
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;
@@ -188,21 +182,17 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
- m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
- DOORBELL_OFFSET(q->doorbell_off);
+ m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(q->doorbell_off);
m->cp_hqd_vmid = q->vmid;
- if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
- }
- m->cp_hqd_active = 0;
q->is_active = false;
if (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0) {
- m->cp_hqd_active = 1;
q->is_active = true;
}
@@ -214,8 +204,6 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
{
struct cik_sdma_rlc_registers *m;
- BUG_ON(!mm || !mqd || !q);
-
m = get_sdma_mqd(mqd);
m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
@@ -254,7 +242,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
+ return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, mqd, type, timeout,
pipe_id, queue_id);
}
@@ -301,10 +289,6 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct cik_mqd *m;
int retval;
- BUG_ON(!mm || !q || !mqd || !mqd_mem_obj);
-
- pr_debug("kfd: In func %s\n", __func__);
-
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
mqd_mem_obj);
@@ -359,10 +343,6 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
{
struct cik_mqd *m;
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE |
@@ -400,8 +380,6 @@ struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
{
struct cik_sdma_rlc_registers *m;
- BUG_ON(!mqd);
-
m = (struct cik_sdma_rlc_registers *)mqd;
return m;
@@ -412,12 +390,10 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
{
struct mqd_manager *mqd;
- BUG_ON(!dev);
- BUG_ON(type >= KFD_MQD_TYPE_MAX);
-
- pr_debug("kfd: In func %s\n", __func__);
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
- mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index a9b9882a9a77..73cbfe186dd2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -85,7 +85,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_iq_rptr = 1;
*mqd = m;
- if (gart_addr != NULL)
+ if (gart_addr)
*gart_addr = addr;
retval = mm->update_mqd(mm, m, q);
@@ -94,10 +94,15 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
static int load_mqd(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t __user *wptr)
+ struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_load
- (mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+ /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
+ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
+ uint32_t wptr_mask = (uint32_t)((p->queue_size / sizeof(uint32_t)) - 1);
+
+ return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+ (uint32_t __user *)p->write_ptr,
+ wptr_shift, wptr_mask, mms);
}
static int __update_mqd(struct mqd_manager *mm, void *mqd,
@@ -106,10 +111,6 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
{
struct vi_mqd *m;
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
m = get_mqd(mqd);
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
@@ -117,7 +118,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- pr_debug("kfd: cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+ pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
@@ -126,10 +127,9 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_doorbell_control =
- 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT |
q->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
- pr_debug("kfd: cp_hqd_pq_doorbell_control 0x%x\n",
+ pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control);
m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT |
@@ -139,8 +139,15 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
mtype << CP_HQD_IB_CONTROL__MTYPE__SHIFT;
- m->cp_hqd_eop_control |=
- ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1;
+ /*
+ * HW does not clamp this field correctly. Maximum EOP queue size
+ * is constrained by per-SE EOP done signal count, which is 8-bit.
+ * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
+ * more than (EOP entry count - 1) so a queue size of 0x800 dwords
+ * is safe, giving a maximum field value of 0xA.
+ */
+ m->cp_hqd_eop_control |= min(0xA,
+ ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
m->cp_hqd_eop_base_addr_lo =
lower_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_eop_base_addr_hi =
@@ -156,12 +163,10 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
}
- m->cp_hqd_active = 0;
q->is_active = false;
if (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0) {
- m->cp_hqd_active = 1;
q->is_active = true;
}
@@ -181,14 +186,13 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_destroy
- (mm->dev->kgd, type, timeout,
+ (mm->dev->kgd, mqd, type, timeout,
pipe_id, queue_id);
}
static void uninit_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- BUG_ON(!mm || !mqd);
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
@@ -238,12 +242,10 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
{
struct mqd_manager *mqd;
- BUG_ON(!dev);
- BUG_ON(type >= KFD_MQD_TYPE_MAX);
-
- pr_debug("kfd: In func %s\n", __func__);
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
- mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 7131998848d7..1d312603de9f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -26,7 +26,6 @@
#include "kfd_device_queue_manager.h"
#include "kfd_kernel_queue.h"
#include "kfd_priv.h"
-#include "kfd_pm4_headers.h"
#include "kfd_pm4_headers_vi.h"
#include "kfd_pm4_opcodes.h"
@@ -35,7 +34,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
{
unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
- BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes);
+ WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
+ "Runlist IB overflow");
*wptr = temp;
}
@@ -43,12 +43,12 @@ static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
{
union PM4_MES_TYPE_3_HEADER header;
- header.u32all = 0;
+ header.u32All = 0;
header.opcode = opcode;
header.count = packet_size/sizeof(uint32_t) - 2;
header.type = PM4_TYPE_3;
- return header.u32all;
+ return header.u32All;
}
static void pm_calc_rlib_size(struct packet_manager *pm,
@@ -58,8 +58,6 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int process_count, queue_count;
unsigned int map_queue_size;
- BUG_ON(!pm || !rlib_size || !over_subscription);
-
process_count = pm->dqm->processes_count;
queue_count = pm->dqm->queue_count;
@@ -67,15 +65,12 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
*over_subscription = false;
if ((process_count > 1) || queue_count > get_queues_num(pm->dqm)) {
*over_subscription = true;
- pr_debug("kfd: over subscribed runlist\n");
+ pr_debug("Over subscribed runlist\n");
}
- map_queue_size =
- (pm->dqm->dev->device_info->asic_family == CHIP_CARRIZO) ?
- sizeof(struct pm4_mes_map_queues) :
- sizeof(struct pm4_map_queues);
+ map_queue_size = sizeof(struct pm4_mes_map_queues);
/* calculate run list ib allocation size */
- *rlib_size = process_count * sizeof(struct pm4_map_process) +
+ *rlib_size = process_count * sizeof(struct pm4_mes_map_process) +
queue_count * map_queue_size;
/*
@@ -83,9 +78,9 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
* when over subscription
*/
if (*over_subscription)
- *rlib_size += sizeof(struct pm4_runlist);
+ *rlib_size += sizeof(struct pm4_mes_runlist);
- pr_debug("kfd: runlist ib size %d\n", *rlib_size);
+ pr_debug("runlist ib size %d\n", *rlib_size);
}
static int pm_allocate_runlist_ib(struct packet_manager *pm,
@@ -96,17 +91,16 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
{
int retval;
- BUG_ON(!pm);
- BUG_ON(pm->allocated);
- BUG_ON(is_over_subscription == NULL);
+ if (WARN_ON(pm->allocated))
+ return -EINVAL;
pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
&pm->ib_buffer_obj);
- if (retval != 0) {
- pr_err("kfd: failed to allocate runlist IB\n");
+ if (retval) {
+ pr_err("Failed to allocate runlist IB\n");
return retval;
}
@@ -121,15 +115,16 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
uint64_t ib, size_t ib_size_in_dwords, bool chain)
{
- struct pm4_runlist *packet;
+ struct pm4_mes_runlist *packet;
- BUG_ON(!pm || !buffer || !ib);
+ if (WARN_ON(!ib))
+ return -EFAULT;
- packet = (struct pm4_runlist *)buffer;
+ packet = (struct pm4_mes_runlist *)buffer;
- memset(buffer, 0, sizeof(struct pm4_runlist));
- packet->header.u32all = build_pm4_header(IT_RUN_LIST,
- sizeof(struct pm4_runlist));
+ memset(buffer, 0, sizeof(struct pm4_mes_runlist));
+ packet->header.u32All = build_pm4_header(IT_RUN_LIST,
+ sizeof(struct pm4_mes_runlist));
packet->bitfields4.ib_size = ib_size_in_dwords;
packet->bitfields4.chain = chain ? 1 : 0;
@@ -144,20 +139,16 @@ static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
struct qcm_process_device *qpd)
{
- struct pm4_map_process *packet;
+ struct pm4_mes_map_process *packet;
struct queue *cur;
uint32_t num_queues;
- BUG_ON(!pm || !buffer || !qpd);
-
- packet = (struct pm4_map_process *)buffer;
-
- pr_debug("kfd: In func %s\n", __func__);
+ packet = (struct pm4_mes_map_process *)buffer;
- memset(buffer, 0, sizeof(struct pm4_map_process));
+ memset(buffer, 0, sizeof(struct pm4_mes_map_process));
- packet->header.u32all = build_pm4_header(IT_MAP_PROCESS,
- sizeof(struct pm4_map_process));
+ packet->header.u32All = build_pm4_header(IT_MAP_PROCESS,
+ sizeof(struct pm4_mes_map_process));
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 1;
packet->bitfields2.pasid = qpd->pqm->process->pasid;
@@ -175,27 +166,26 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
+ /* TODO: scratch support */
+ packet->sh_hidden_private_base_vmid = 0;
+
packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
return 0;
}
-static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
+static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
struct queue *q, bool is_static)
{
struct pm4_mes_map_queues *packet;
bool use_static = is_static;
- BUG_ON(!pm || !buffer || !q);
-
- pr_debug("kfd: In func %s\n", __func__);
-
packet = (struct pm4_mes_map_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_map_queues));
+ memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
- packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
- sizeof(struct pm4_map_queues));
+ packet->header.u32All = build_pm4_header(IT_MAP_QUEUES,
+ sizeof(struct pm4_mes_map_queues));
packet->bitfields2.alloc_format =
alloc_format__mes_map_queues__one_per_pipe_vi;
packet->bitfields2.num_queues = 1;
@@ -223,10 +213,8 @@ static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
use_static = false; /* no static queues under SDMA */
break;
default:
- pr_err("kfd: in %s queue type %d\n", __func__,
- q->properties.type);
- BUG();
- break;
+ WARN(1, "queue type %d", q->properties.type);
+ return -EINVAL;
}
packet->bitfields3.doorbell_offset =
q->properties.doorbell_off;
@@ -246,68 +234,6 @@ static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
-static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
- struct queue *q, bool is_static)
-{
- struct pm4_map_queues *packet;
- bool use_static = is_static;
-
- BUG_ON(!pm || !buffer || !q);
-
- pr_debug("kfd: In func %s\n", __func__);
-
- packet = (struct pm4_map_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_map_queues));
-
- packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
- sizeof(struct pm4_map_queues));
- packet->bitfields2.alloc_format =
- alloc_format__mes_map_queues__one_per_pipe;
- packet->bitfields2.num_queues = 1;
- packet->bitfields2.queue_sel =
- queue_sel__mes_map_queues__map_to_hws_determined_queue_slots;
-
- packet->bitfields2.vidmem = (q->properties.is_interop) ?
- vidmem__mes_map_queues__uses_video_memory :
- vidmem__mes_map_queues__uses_no_video_memory;
-
- switch (q->properties.type) {
- case KFD_QUEUE_TYPE_COMPUTE:
- case KFD_QUEUE_TYPE_DIQ:
- packet->bitfields2.engine_sel =
- engine_sel__mes_map_queues__compute;
- break;
- case KFD_QUEUE_TYPE_SDMA:
- packet->bitfields2.engine_sel =
- engine_sel__mes_map_queues__sdma0;
- use_static = false; /* no static queues under SDMA */
- break;
- default:
- BUG();
- break;
- }
-
- packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
- q->properties.doorbell_off;
-
- packet->mes_map_queues_ordinals[0].bitfields3.is_static =
- (use_static) ? 1 : 0;
-
- packet->mes_map_queues_ordinals[0].mqd_addr_lo =
- lower_32_bits(q->gart_mqd_addr);
-
- packet->mes_map_queues_ordinals[0].mqd_addr_hi =
- upper_32_bits(q->gart_mqd_addr);
-
- packet->mes_map_queues_ordinals[0].wptr_addr_lo =
- lower_32_bits((uint64_t)q->properties.write_ptr);
-
- packet->mes_map_queues_ordinals[0].wptr_addr_hi =
- upper_32_bits((uint64_t)q->properties.write_ptr);
-
- return 0;
-}
-
static int pm_create_runlist_ib(struct packet_manager *pm,
struct list_head *queues,
uint64_t *rl_gpu_addr,
@@ -322,19 +248,16 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
struct kernel_queue *kq;
bool is_over_subscription;
- BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr);
-
rl_wptr = retval = proccesses_mapped = 0;
retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
&alloc_size_bytes, &is_over_subscription);
- if (retval != 0)
+ if (retval)
return retval;
*rl_size_bytes = alloc_size_bytes;
- pr_debug("kfd: In func %s\n", __func__);
- pr_debug("kfd: building runlist ib process count: %d queues count %d\n",
+ pr_debug("Building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->queue_count);
/* build the run list ib packet */
@@ -342,42 +265,35 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
qpd = cur->qpd;
/* build map process packet */
if (proccesses_mapped >= pm->dqm->processes_count) {
- pr_debug("kfd: not enough space left in runlist IB\n");
+ pr_debug("Not enough space left in runlist IB\n");
pm_release_ib(pm);
return -ENOMEM;
}
retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
- if (retval != 0)
+ if (retval)
return retval;
proccesses_mapped++;
- inc_wptr(&rl_wptr, sizeof(struct pm4_map_process),
+ inc_wptr(&rl_wptr, sizeof(struct pm4_mes_map_process),
alloc_size_bytes);
list_for_each_entry(kq, &qpd->priv_queue_list, list) {
if (!kq->queue->properties.is_active)
continue;
- pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
+ pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
kq->queue->queue, qpd->is_debug);
- if (pm->dqm->dev->device_info->asic_family ==
- CHIP_CARRIZO)
- retval = pm_create_map_queue_vi(pm,
- &rl_buffer[rl_wptr],
- kq->queue,
- qpd->is_debug);
- else
- retval = pm_create_map_queue(pm,
+ retval = pm_create_map_queue(pm,
&rl_buffer[rl_wptr],
kq->queue,
qpd->is_debug);
- if (retval != 0)
+ if (retval)
return retval;
inc_wptr(&rl_wptr,
- sizeof(struct pm4_map_queues),
+ sizeof(struct pm4_mes_map_queues),
alloc_size_bytes);
}
@@ -385,51 +301,44 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
if (!q->properties.is_active)
continue;
- pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
+ pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
q->queue, qpd->is_debug);
- if (pm->dqm->dev->device_info->asic_family ==
- CHIP_CARRIZO)
- retval = pm_create_map_queue_vi(pm,
- &rl_buffer[rl_wptr],
- q,
- qpd->is_debug);
- else
- retval = pm_create_map_queue(pm,
+ retval = pm_create_map_queue(pm,
&rl_buffer[rl_wptr],
q,
qpd->is_debug);
- if (retval != 0)
+ if (retval)
return retval;
inc_wptr(&rl_wptr,
- sizeof(struct pm4_map_queues),
+ sizeof(struct pm4_mes_map_queues),
alloc_size_bytes);
}
}
- pr_debug("kfd: finished map process and queues to runlist\n");
+ pr_debug("Finished map process and queues to runlist\n");
if (is_over_subscription)
- pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr,
- alloc_size_bytes / sizeof(uint32_t), true);
+ retval = pm_create_runlist(pm, &rl_buffer[rl_wptr],
+ *rl_gpu_addr,
+ alloc_size_bytes / sizeof(uint32_t),
+ true);
for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
pr_debug("0x%2X ", rl_buffer[i]);
pr_debug("\n");
- return 0;
+ return retval;
}
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
{
- BUG_ON(!dqm);
-
pm->dqm = dqm;
mutex_init(&pm->lock);
pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
- if (pm->priv_queue == NULL) {
+ if (!pm->priv_queue) {
mutex_destroy(&pm->lock);
return -ENOMEM;
}
@@ -440,8 +349,6 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
void pm_uninit(struct packet_manager *pm)
{
- BUG_ON(!pm);
-
mutex_destroy(&pm->lock);
kernel_queue_uninit(pm->priv_queue);
}
@@ -449,25 +356,22 @@ void pm_uninit(struct packet_manager *pm)
int pm_send_set_resources(struct packet_manager *pm,
struct scheduling_resources *res)
{
- struct pm4_set_resources *packet;
-
- BUG_ON(!pm || !res);
-
- pr_debug("kfd: In func %s\n", __func__);
+ struct pm4_mes_set_resources *packet;
+ int retval = 0;
mutex_lock(&pm->lock);
pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
sizeof(*packet) / sizeof(uint32_t),
- (unsigned int **)&packet);
- if (packet == NULL) {
- mutex_unlock(&pm->lock);
- pr_err("kfd: failed to allocate buffer on kernel queue\n");
- return -ENOMEM;
+ (unsigned int **)&packet);
+ if (!packet) {
+ pr_err("Failed to allocate buffer on kernel queue\n");
+ retval = -ENOMEM;
+ goto out;
}
- memset(packet, 0, sizeof(struct pm4_set_resources));
- packet->header.u32all = build_pm4_header(IT_SET_RESOURCES,
- sizeof(struct pm4_set_resources));
+ memset(packet, 0, sizeof(struct pm4_mes_set_resources));
+ packet->header.u32All = build_pm4_header(IT_SET_RESOURCES,
+ sizeof(struct pm4_mes_set_resources));
packet->bitfields2.queue_type =
queue_type__mes_set_resources__hsa_interface_queue_hiq;
@@ -485,9 +389,10 @@ int pm_send_set_resources(struct packet_manager *pm,
pm->priv_queue->ops.submit_packet(pm->priv_queue);
+out:
mutex_unlock(&pm->lock);
- return 0;
+ return retval;
}
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
@@ -497,26 +402,24 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
size_t rl_ib_size, packet_size_dwords;
int retval;
- BUG_ON(!pm || !dqm_queues);
-
retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
&rl_ib_size);
- if (retval != 0)
+ if (retval)
goto fail_create_runlist_ib;
- pr_debug("kfd: runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
+ pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
- packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
+ packet_size_dwords = sizeof(struct pm4_mes_runlist) / sizeof(uint32_t);
mutex_lock(&pm->lock);
retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
packet_size_dwords, &rl_buffer);
- if (retval != 0)
+ if (retval)
goto fail_acquire_packet_buffer;
retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
rl_ib_size / sizeof(uint32_t), false);
- if (retval != 0)
+ if (retval)
goto fail_create_runlist;
pm->priv_queue->ops.submit_packet(pm->priv_queue);
@@ -530,8 +433,7 @@ fail_create_runlist:
fail_acquire_packet_buffer:
mutex_unlock(&pm->lock);
fail_create_runlist_ib:
- if (pm->allocated)
- pm_release_ib(pm);
+ pm_release_ib(pm);
return retval;
}
@@ -539,20 +441,21 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
uint32_t fence_value)
{
int retval;
- struct pm4_query_status *packet;
+ struct pm4_mes_query_status *packet;
- BUG_ON(!pm || !fence_address);
+ if (WARN_ON(!fence_address))
+ return -EFAULT;
mutex_lock(&pm->lock);
retval = pm->priv_queue->ops.acquire_packet_buffer(
pm->priv_queue,
- sizeof(struct pm4_query_status) / sizeof(uint32_t),
+ sizeof(struct pm4_mes_query_status) / sizeof(uint32_t),
(unsigned int **)&packet);
- if (retval != 0)
+ if (retval)
goto fail_acquire_packet_buffer;
- packet->header.u32all = build_pm4_header(IT_QUERY_STATUS,
- sizeof(struct pm4_query_status));
+ packet->header.u32All = build_pm4_header(IT_QUERY_STATUS,
+ sizeof(struct pm4_mes_query_status));
packet->bitfields2.context_id = 0;
packet->bitfields2.interrupt_sel =
@@ -566,9 +469,6 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
packet->data_lo = lower_32_bits((uint64_t)fence_value);
pm->priv_queue->ops.submit_packet(pm->priv_queue);
- mutex_unlock(&pm->lock);
-
- return 0;
fail_acquire_packet_buffer:
mutex_unlock(&pm->lock);
@@ -582,24 +482,22 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
{
int retval;
uint32_t *buffer;
- struct pm4_unmap_queues *packet;
-
- BUG_ON(!pm);
+ struct pm4_mes_unmap_queues *packet;
mutex_lock(&pm->lock);
retval = pm->priv_queue->ops.acquire_packet_buffer(
pm->priv_queue,
- sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
+ sizeof(struct pm4_mes_unmap_queues) / sizeof(uint32_t),
&buffer);
- if (retval != 0)
+ if (retval)
goto err_acquire_packet_buffer;
- packet = (struct pm4_unmap_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_unmap_queues));
- pr_debug("kfd: static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
+ packet = (struct pm4_mes_unmap_queues *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
+ pr_debug("static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
mode, reset, type);
- packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
- sizeof(struct pm4_unmap_queues));
+ packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES,
+ sizeof(struct pm4_mes_unmap_queues));
switch (type) {
case KFD_QUEUE_TYPE_COMPUTE:
case KFD_QUEUE_TYPE_DIQ:
@@ -611,8 +509,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
break;
default:
- BUG();
- break;
+ WARN(1, "queue type %d", type);
+ retval = -EINVAL;
+ goto err_invalid;
}
if (reset)
@@ -636,16 +535,17 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
break;
case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__perform_request_on_all_active_queues;
+ queue_sel__mes_unmap_queues__unmap_all_queues;
break;
case KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES:
/* in this case, we do not preempt static queues */
packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only;
+ queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
break;
default:
- BUG();
- break;
+ WARN(1, "filter %d", mode);
+ retval = -EINVAL;
+ goto err_invalid;
}
pm->priv_queue->ops.submit_packet(pm->priv_queue);
@@ -653,6 +553,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
mutex_unlock(&pm->lock);
return 0;
+err_invalid:
+ pm->priv_queue->ops.rollback_packet(pm->priv_queue);
err_acquire_packet_buffer:
mutex_unlock(&pm->lock);
return retval;
@@ -660,8 +562,6 @@ err_acquire_packet_buffer:
void pm_release_ib(struct packet_manager *pm)
{
- BUG_ON(!pm);
-
mutex_lock(&pm->lock);
if (pm->allocated) {
kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 6cfe7f1f18cf..1e06de0bc673 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -32,7 +32,8 @@ int kfd_pasid_init(void)
{
pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
- pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
+ pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long),
+ GFP_KERNEL);
if (!pasid_bitmap)
return -ENOMEM;
@@ -91,6 +92,6 @@ unsigned int kfd_pasid_alloc(void)
void kfd_pasid_free(unsigned int pasid)
{
- BUG_ON(pasid == 0 || pasid >= pasid_limit);
- clear_bit(pasid, pasid_bitmap);
+ if (!WARN_ON(pasid == 0 || pasid >= pasid_limit))
+ clear_bit(pasid, pasid_bitmap);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
index 5b393f3e34a9..e50f73d25de6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
@@ -28,112 +28,19 @@
#define PM4_MES_HEADER_DEFINED
union PM4_MES_TYPE_3_HEADER {
struct {
- uint32_t reserved1:8; /* < reserved */
- uint32_t opcode:8; /* < IT opcode */
- uint32_t count:14; /* < number of DWORDs - 1
- * in the information body.
- */
- uint32_t type:2; /* < packet identifier.
- * It should be 3 for type 3 packets
- */
+ /* reserved */
+ uint32_t reserved1:8;
+ /* IT opcode */
+ uint32_t opcode:8;
+ /* number of DWORDs - 1 in the information body */
+ uint32_t count:14;
+ /* packet identifier. It should be 3 for type 3 packets */
+ uint32_t type:2;
};
uint32_t u32all;
};
#endif /* PM4_MES_HEADER_DEFINED */
-/* --------------------MES_SET_RESOURCES-------------------- */
-
-#ifndef PM4_MES_SET_RESOURCES_DEFINED
-#define PM4_MES_SET_RESOURCES_DEFINED
-enum set_resources_queue_type_enum {
- queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
- queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
- queue_type__mes_set_resources__hsa_debug_interface_queue = 4
-};
-
-struct pm4_set_resources {
- union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- uint32_t vmid_mask:16;
- uint32_t unmap_latency:8;
- uint32_t reserved1:5;
- enum set_resources_queue_type_enum queue_type:3;
- } bitfields2;
- uint32_t ordinal2;
- };
-
- uint32_t queue_mask_lo;
- uint32_t queue_mask_hi;
- uint32_t gws_mask_lo;
- uint32_t gws_mask_hi;
-
- union {
- struct {
- uint32_t oac_mask:16;
- uint32_t reserved2:16;
- } bitfields7;
- uint32_t ordinal7;
- };
-
- union {
- struct {
- uint32_t gds_heap_base:6;
- uint32_t reserved3:5;
- uint32_t gds_heap_size:6;
- uint32_t reserved4:15;
- } bitfields8;
- uint32_t ordinal8;
- };
-
-};
-#endif
-
-/*--------------------MES_RUN_LIST-------------------- */
-
-#ifndef PM4_MES_RUN_LIST_DEFINED
-#define PM4_MES_RUN_LIST_DEFINED
-
-struct pm4_runlist {
- union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- uint32_t reserved1:2;
- uint32_t ib_base_lo:30;
- } bitfields2;
- uint32_t ordinal2;
- };
-
- union {
- struct {
- uint32_t ib_base_hi:16;
- uint32_t reserved2:16;
- } bitfields3;
- uint32_t ordinal3;
- };
-
- union {
- struct {
- uint32_t ib_size:20;
- uint32_t chain:1;
- uint32_t offload_polling:1;
- uint32_t reserved3:1;
- uint32_t valid:1;
- uint32_t reserved4:8;
- } bitfields4;
- uint32_t ordinal4;
- };
-
-};
-#endif
/*--------------------MES_MAP_PROCESS-------------------- */
@@ -186,217 +93,58 @@ struct pm4_map_process {
};
#endif
-/*--------------------MES_MAP_QUEUES--------------------*/
-
-#ifndef PM4_MES_MAP_QUEUES_DEFINED
-#define PM4_MES_MAP_QUEUES_DEFINED
-enum map_queues_queue_sel_enum {
- queue_sel__mes_map_queues__map_to_specified_queue_slots = 0,
- queue_sel__mes_map_queues__map_to_hws_determined_queue_slots = 1,
- queue_sel__mes_map_queues__enable_process_queues = 2
-};
+#ifndef PM4_MES_MAP_PROCESS_DEFINED_KV_SCRATCH
+#define PM4_MES_MAP_PROCESS_DEFINED_KV_SCRATCH
-enum map_queues_vidmem_enum {
- vidmem__mes_map_queues__uses_no_video_memory = 0,
- vidmem__mes_map_queues__uses_video_memory = 1
-};
-
-enum map_queues_alloc_format_enum {
- alloc_format__mes_map_queues__one_per_pipe = 0,
- alloc_format__mes_map_queues__all_on_one_pipe = 1
-};
-
-enum map_queues_engine_sel_enum {
- engine_sel__mes_map_queues__compute = 0,
- engine_sel__mes_map_queues__sdma0 = 2,
- engine_sel__mes_map_queues__sdma1 = 3
-};
-
-struct pm4_map_queues {
+struct pm4_map_process_scratch_kv {
union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- uint32_t reserved1:4;
- enum map_queues_queue_sel_enum queue_sel:2;
- uint32_t reserved2:2;
- uint32_t vmid:4;
- uint32_t reserved3:4;
- enum map_queues_vidmem_enum vidmem:2;
- uint32_t reserved4:6;
- enum map_queues_alloc_format_enum alloc_format:2;
- enum map_queues_engine_sel_enum engine_sel:3;
- uint32_t num_queues:3;
- } bitfields2;
- uint32_t ordinal2;
- };
-
- struct {
- union {
- struct {
- uint32_t is_static:1;
- uint32_t reserved5:1;
- uint32_t doorbell_offset:21;
- uint32_t reserved6:3;
- uint32_t queue:6;
- } bitfields3;
- uint32_t ordinal3;
- };
-
- uint32_t mqd_addr_lo;
- uint32_t mqd_addr_hi;
- uint32_t wptr_addr_lo;
- uint32_t wptr_addr_hi;
-
- } mes_map_queues_ordinals[1]; /* 1..N of these ordinal groups */
-
-};
-#endif
-
-/*--------------------MES_QUERY_STATUS--------------------*/
-
-#ifndef PM4_MES_QUERY_STATUS_DEFINED
-#define PM4_MES_QUERY_STATUS_DEFINED
-enum query_status_interrupt_sel_enum {
- interrupt_sel__mes_query_status__completion_status = 0,
- interrupt_sel__mes_query_status__process_status = 1,
- interrupt_sel__mes_query_status__queue_status = 2
-};
-
-enum query_status_command_enum {
- command__mes_query_status__interrupt_only = 0,
- command__mes_query_status__fence_only_immediate = 1,
- command__mes_query_status__fence_only_after_write_ack = 2,
- command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
-};
-
-enum query_status_engine_sel_enum {
- engine_sel__mes_query_status__compute = 0,
- engine_sel__mes_query_status__sdma0_queue = 2,
- engine_sel__mes_query_status__sdma1_queue = 3
-};
-
-struct pm4_query_status {
- union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- uint32_t context_id:28;
- enum query_status_interrupt_sel_enum interrupt_sel:2;
- enum query_status_command_enum command:2;
- } bitfields2;
- uint32_t ordinal2;
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
};
union {
struct {
uint32_t pasid:16;
- uint32_t reserved1:16;
- } bitfields3a;
- struct {
- uint32_t reserved2:2;
- uint32_t doorbell_offset:21;
- uint32_t reserved3:3;
- enum query_status_engine_sel_enum engine_sel:3;
- uint32_t reserved4:3;
- } bitfields3b;
- uint32_t ordinal3;
- };
-
- uint32_t addr_lo;
- uint32_t addr_hi;
- uint32_t data_lo;
- uint32_t data_hi;
-};
-#endif
-
-/*--------------------MES_UNMAP_QUEUES--------------------*/
-
-#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
-#define PM4_MES_UNMAP_QUEUES_DEFINED
-enum unmap_queues_action_enum {
- action__mes_unmap_queues__preempt_queues = 0,
- action__mes_unmap_queues__reset_queues = 1,
- action__mes_unmap_queues__disable_process_queues = 2
-};
-
-enum unmap_queues_queue_sel_enum {
- queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
- queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
- queue_sel__mes_unmap_queues__perform_request_on_all_active_queues = 2,
- queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only = 3
-};
-
-enum unmap_queues_engine_sel_enum {
- engine_sel__mes_unmap_queues__compute = 0,
- engine_sel__mes_unmap_queues__sdma0 = 2,
- engine_sel__mes_unmap_queues__sdma1 = 3
-};
-
-struct pm4_unmap_queues {
- union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- enum unmap_queues_action_enum action:2;
- uint32_t reserved1:2;
- enum unmap_queues_queue_sel_enum queue_sel:2;
- uint32_t reserved2:20;
- enum unmap_queues_engine_sel_enum engine_sel:3;
- uint32_t num_queues:3;
+ uint32_t reserved1:8;
+ uint32_t diq_enable:1;
+ uint32_t process_quantum:7;
} bitfields2;
uint32_t ordinal2;
};
union {
struct {
- uint32_t pasid:16;
- uint32_t reserved3:16;
- } bitfields3a;
- struct {
- uint32_t reserved4:2;
- uint32_t doorbell_offset0:21;
- uint32_t reserved5:9;
- } bitfields3b;
+ uint32_t page_table_base:28;
+ uint32_t reserved2:4;
+ } bitfields3;
uint32_t ordinal3;
};
- union {
- struct {
- uint32_t reserved6:2;
- uint32_t doorbell_offset1:21;
- uint32_t reserved7:9;
- } bitfields4;
- uint32_t ordinal4;
- };
-
- union {
- struct {
- uint32_t reserved8:2;
- uint32_t doorbell_offset2:21;
- uint32_t reserved9:9;
- } bitfields5;
- uint32_t ordinal5;
- };
+ uint32_t reserved3;
+ uint32_t sh_mem_bases;
+ uint32_t sh_mem_config;
+ uint32_t sh_mem_ape1_base;
+ uint32_t sh_mem_ape1_limit;
+ uint32_t sh_hidden_private_base_vmid;
+ uint32_t reserved4;
+ uint32_t reserved5;
+ uint32_t gds_addr_lo;
+ uint32_t gds_addr_hi;
union {
struct {
- uint32_t reserved10:2;
- uint32_t doorbell_offset3:21;
- uint32_t reserved11:9;
- } bitfields6;
- uint32_t ordinal6;
+ uint32_t num_gws:6;
+ uint32_t reserved6:2;
+ uint32_t num_oac:4;
+ uint32_t reserved7:4;
+ uint32_t gds_size:6;
+ uint32_t num_queues:10;
+ } bitfields14;
+ uint32_t ordinal14;
};
+ uint32_t completion_signal_lo32;
+uint32_t completion_signal_hi32;
};
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
index 08c721922812..7c8d9b357749 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
@@ -30,10 +30,12 @@ union PM4_MES_TYPE_3_HEADER {
struct {
uint32_t reserved1 : 8; /* < reserved */
uint32_t opcode : 8; /* < IT opcode */
- uint32_t count : 14;/* < number of DWORDs - 1 in the
- information body. */
- uint32_t type : 2; /* < packet identifier.
- It should be 3 for type 3 packets */
+ uint32_t count : 14;/* < Number of DWORDS - 1 in the
+ * information body
+ */
+ uint32_t type : 2; /* < packet identifier
+ * It should be 3 for type 3 packets
+ */
};
uint32_t u32All;
};
@@ -124,9 +126,10 @@ struct pm4_mes_runlist {
uint32_t ib_size:20;
uint32_t chain:1;
uint32_t offload_polling:1;
- uint32_t reserved3:1;
+ uint32_t reserved2:1;
uint32_t valid:1;
- uint32_t reserved4:8;
+ uint32_t process_cnt:4;
+ uint32_t reserved3:4;
} bitfields4;
uint32_t ordinal4;
};
@@ -141,8 +144,8 @@ struct pm4_mes_runlist {
struct pm4_mes_map_process {
union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
};
union {
@@ -153,36 +156,48 @@ struct pm4_mes_map_process {
uint32_t process_quantum:7;
} bitfields2;
uint32_t ordinal2;
-};
+ };
union {
struct {
uint32_t page_table_base:28;
- uint32_t reserved2:4;
+ uint32_t reserved3:4;
} bitfields3;
uint32_t ordinal3;
};
+ uint32_t reserved;
+
uint32_t sh_mem_bases;
+ uint32_t sh_mem_config;
uint32_t sh_mem_ape1_base;
uint32_t sh_mem_ape1_limit;
- uint32_t sh_mem_config;
+
+ uint32_t sh_hidden_private_base_vmid;
+
+ uint32_t reserved2;
+ uint32_t reserved3;
+
uint32_t gds_addr_lo;
uint32_t gds_addr_hi;
union {
struct {
uint32_t num_gws:6;
- uint32_t reserved3:2;
+ uint32_t reserved4:2;
uint32_t num_oac:4;
- uint32_t reserved4:4;
+ uint32_t reserved5:4;
uint32_t gds_size:6;
uint32_t num_queues:10;
} bitfields10;
uint32_t ordinal10;
};
+ uint32_t completion_signal_lo;
+ uint32_t completion_signal_hi;
+
};
+
#endif
/*--------------------MES_MAP_QUEUES--------------------*/
@@ -335,7 +350,7 @@ enum mes_unmap_queues_engine_sel_enum {
engine_sel__mes_unmap_queues__sdmal = 3
};
-struct PM4_MES_UNMAP_QUEUES {
+struct pm4_mes_unmap_queues {
union {
union PM4_MES_TYPE_3_HEADER header; /* header */
uint32_t ordinal1;
@@ -395,4 +410,101 @@ struct PM4_MES_UNMAP_QUEUES {
};
#endif
+#ifndef PM4_MEC_RELEASE_MEM_DEFINED
+#define PM4_MEC_RELEASE_MEM_DEFINED
+enum RELEASE_MEM_event_index_enum {
+ event_index___release_mem__end_of_pipe = 5,
+ event_index___release_mem__shader_done = 6
+};
+
+enum RELEASE_MEM_cache_policy_enum {
+ cache_policy___release_mem__lru = 0,
+ cache_policy___release_mem__stream = 1,
+ cache_policy___release_mem__bypass = 2
+};
+
+enum RELEASE_MEM_dst_sel_enum {
+ dst_sel___release_mem__memory_controller = 0,
+ dst_sel___release_mem__tc_l2 = 1,
+ dst_sel___release_mem__queue_write_pointer_register = 2,
+ dst_sel___release_mem__queue_write_pointer_poll_mask_bit = 3
+};
+
+enum RELEASE_MEM_int_sel_enum {
+ int_sel___release_mem__none = 0,
+ int_sel___release_mem__send_interrupt_only = 1,
+ int_sel___release_mem__send_interrupt_after_write_confirm = 2,
+ int_sel___release_mem__send_data_after_write_confirm = 3
+};
+
+enum RELEASE_MEM_data_sel_enum {
+ data_sel___release_mem__none = 0,
+ data_sel___release_mem__send_32_bit_low = 1,
+ data_sel___release_mem__send_64_bit_data = 2,
+ data_sel___release_mem__send_gpu_clock_counter = 3,
+ data_sel___release_mem__send_cp_perfcounter_hi_lo = 4,
+ data_sel___release_mem__store_gds_data_to_memory = 5
+};
+
+struct pm4_mec_release_mem {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /*header */
+ unsigned int ordinal1;
+ };
+
+ union {
+ struct {
+ unsigned int event_type:6;
+ unsigned int reserved1:2;
+ enum RELEASE_MEM_event_index_enum event_index:4;
+ unsigned int tcl1_vol_action_ena:1;
+ unsigned int tc_vol_action_ena:1;
+ unsigned int reserved2:1;
+ unsigned int tc_wb_action_ena:1;
+ unsigned int tcl1_action_ena:1;
+ unsigned int tc_action_ena:1;
+ unsigned int reserved3:6;
+ unsigned int atc:1;
+ enum RELEASE_MEM_cache_policy_enum cache_policy:2;
+ unsigned int reserved4:5;
+ } bitfields2;
+ unsigned int ordinal2;
+ };
+
+ union {
+ struct {
+ unsigned int reserved5:16;
+ enum RELEASE_MEM_dst_sel_enum dst_sel:2;
+ unsigned int reserved6:6;
+ enum RELEASE_MEM_int_sel_enum int_sel:3;
+ unsigned int reserved7:2;
+ enum RELEASE_MEM_data_sel_enum data_sel:3;
+ } bitfields3;
+ unsigned int ordinal3;
+ };
+
+ union {
+ struct {
+ unsigned int reserved8:2;
+ unsigned int address_lo_32b:30;
+ } bitfields4;
+ struct {
+ unsigned int reserved9:3;
+ unsigned int address_lo_64b:29;
+ } bitfields5;
+ unsigned int ordinal4;
+ };
+
+ unsigned int address_hi;
+
+ unsigned int data_lo;
+
+ unsigned int data_hi;
+};
+#endif
+
+enum {
+ CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 4750cabe4252..b397ec726400 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -239,11 +239,6 @@ enum kfd_preempt_type_filter {
KFD_PREEMPT_TYPE_FILTER_BY_PASID
};
-enum kfd_preempt_type {
- KFD_PREEMPT_TYPE_WAVEFRONT,
- KFD_PREEMPT_TYPE_WAVEFRONT_RESET
-};
-
/**
* enum kfd_queue_type
*
@@ -294,13 +289,13 @@ enum kfd_queue_format {
* @write_ptr: Defines the number of dwords written to the ring buffer.
*
* @doorbell_ptr: This field aim is to notify the H/W of new packet written to
- * the queue ring buffer. This field should be similar to write_ptr and the user
- * should update this field after he updated the write_ptr.
+ * the queue ring buffer. This field should be similar to write_ptr and the
+ * user should update this field after he updated the write_ptr.
*
* @doorbell_off: The doorbell offset in the doorbell pci-bar.
*
- * @is_interop: Defines if this is a interop queue. Interop queue means that the
- * queue can access both graphics and compute resources.
+ * @is_interop: Defines if this is a interop queue. Interop queue means that
+ * the queue can access both graphics and compute resources.
*
* @is_active: Defines if the queue is active or not.
*
@@ -352,9 +347,10 @@ struct queue_properties {
* @properties: The queue properties.
*
* @mec: Used only in no cp scheduling mode and identifies to micro engine id
- * that the queue should be execute on.
+ * that the queue should be execute on.
*
- * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id.
+ * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
+ * id.
*
* @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
*
@@ -436,6 +432,7 @@ struct qcm_process_device {
uint32_t gds_size;
uint32_t num_gws;
uint32_t num_oac;
+ uint32_t sh_hidden_private_base;
};
/* Data that is per-process-per device. */
@@ -520,8 +517,8 @@ struct kfd_process {
struct mutex event_mutex;
/* All events in process hashed by ID, linked on kfd_event.events. */
DECLARE_HASHTABLE(events, 4);
- struct list_head signal_event_pages; /* struct slot_page_header.
- event_pages */
+ /* struct slot_page_header.event_pages */
+ struct list_head signal_event_pages;
u32 next_nonsignal_event_id;
size_t signal_event_count;
};
@@ -559,8 +556,10 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
struct kfd_process *p);
/* Process device data iterator */
-struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p);
-struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
+struct kfd_process_device *kfd_get_first_process_device_data(
+ struct kfd_process *p);
+struct kfd_process_device *kfd_get_next_process_device_data(
+ struct kfd_process *p,
struct kfd_process_device *pdd);
bool kfd_has_process_device_data(struct kfd_process *p);
@@ -573,7 +572,8 @@ unsigned int kfd_pasid_alloc(void);
void kfd_pasid_free(unsigned int pasid);
/* Doorbells */
-void kfd_doorbell_init(struct kfd_dev *kfd);
+int kfd_doorbell_init(struct kfd_dev *kfd);
+void kfd_doorbell_fini(struct kfd_dev *kfd);
int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
unsigned int *doorbell_off);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 035bbc98a63d..c74cf22a1ed9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -79,9 +79,7 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
{
struct kfd_process *process;
- BUG_ON(!kfd_process_wq);
-
- if (thread->mm == NULL)
+ if (!thread->mm)
return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */
@@ -101,7 +99,7 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
/* A prior open of /dev/kfd could have already created the process. */
process = find_process(thread);
if (process)
- pr_debug("kfd: process already found\n");
+ pr_debug("Process already found\n");
if (!process)
process = create_process(thread);
@@ -117,7 +115,7 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
{
struct kfd_process *process;
- if (thread->mm == NULL)
+ if (!thread->mm)
return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */
@@ -202,10 +200,8 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu)
struct kfd_process_release_work *work;
struct kfd_process *p;
- BUG_ON(!kfd_process_wq);
-
p = container_of(rcu, struct kfd_process, rcu);
- BUG_ON(atomic_read(&p->mm->mm_count) <= 0);
+ WARN_ON(atomic_read(&p->mm->mm_count) <= 0);
mmdrop(p->mm);
@@ -229,7 +225,8 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
* mmu_notifier srcu is read locked
*/
p = container_of(mn, struct kfd_process, mmu_notifier);
- BUG_ON(p->mm != mm);
+ if (WARN_ON(p->mm != mm))
+ return;
mutex_lock(&kfd_processes_mutex);
hash_del_rcu(&p->kfd_processes);
@@ -250,7 +247,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
if (pdd->reset_wavefronts) {
- pr_warn("amdkfd: Resetting all wave fronts\n");
+ pr_warn("Resetting all wave fronts\n");
dbgdev_wave_reset_wavefronts(pdd->dev, p);
pdd->reset_wavefronts = false;
}
@@ -407,8 +404,6 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
struct kfd_process *p;
struct kfd_process_device *pdd;
- BUG_ON(dev == NULL);
-
/*
* Look for the process that matches the pasid. If there is no such
* process, we either released it in amdkfd's own notifier, or there
@@ -449,14 +444,16 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
mutex_unlock(&p->mutex);
}
-struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
+struct kfd_process_device *kfd_get_first_process_device_data(
+ struct kfd_process *p)
{
return list_first_entry(&p->per_device_data,
struct kfd_process_device,
per_device_list);
}
-struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
+struct kfd_process_device *kfd_get_next_process_device_data(
+ struct kfd_process *p,
struct kfd_process_device *pdd)
{
if (list_is_last(&pdd->per_device_list, &p->per_device_data))
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 32cdf2b483db..1cae95e2b13a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -32,12 +32,9 @@ static inline struct process_queue_node *get_queue_by_qid(
{
struct process_queue_node *pqn;
- BUG_ON(!pqm);
-
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
- if (pqn->q && pqn->q->properties.queue_id == qid)
- return pqn;
- if (pqn->kq && pqn->kq->queue->properties.queue_id == qid)
+ if ((pqn->q && pqn->q->properties.queue_id == qid) ||
+ (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
return pqn;
}
@@ -49,17 +46,13 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
{
unsigned long found;
- BUG_ON(!pqm || !qid);
-
- pr_debug("kfd: in %s\n", __func__);
-
found = find_first_zero_bit(pqm->queue_slot_bitmap,
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
- pr_debug("kfd: the new slot id %lu\n", found);
+ pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
- pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
+ pr_info("Cannot open more queues for process with pasid %d\n",
pqm->process->pasid);
return -ENOMEM;
}
@@ -72,13 +65,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
{
- BUG_ON(!pqm);
-
INIT_LIST_HEAD(&pqm->queues);
pqm->queue_slot_bitmap =
kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
BITS_PER_BYTE), GFP_KERNEL);
- if (pqm->queue_slot_bitmap == NULL)
+ if (!pqm->queue_slot_bitmap)
return -ENOMEM;
pqm->process = p;
@@ -90,10 +81,6 @@ void pqm_uninit(struct process_queue_manager *pqm)
int retval;
struct process_queue_node *pqn, *next;
- BUG_ON(!pqm);
-
- pr_debug("In func %s\n", __func__);
-
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
retval = pqm_destroy_queue(
pqm,
@@ -102,7 +89,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
pqn->kq->queue->properties.queue_id);
if (retval != 0) {
- pr_err("kfd: failed to destroy queue\n");
+ pr_err("failed to destroy queue\n");
return;
}
}
@@ -117,8 +104,6 @@ static int create_cp_queue(struct process_queue_manager *pqm,
{
int retval;
- retval = 0;
-
/* Doorbell initialized in user space*/
q_properties->doorbell_ptr = NULL;
@@ -131,16 +116,13 @@ static int create_cp_queue(struct process_queue_manager *pqm,
retval = init_queue(q, q_properties);
if (retval != 0)
- goto err_init_queue;
+ return retval;
(*q)->device = dev;
(*q)->process = pqm->process;
- pr_debug("kfd: PQM After init queue");
-
- return retval;
+ pr_debug("PQM After init queue");
-err_init_queue:
return retval;
}
@@ -161,8 +143,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
int num_queues = 0;
struct queue *cur;
- BUG_ON(!pqm || !dev || !properties || !qid);
-
memset(&q_properties, 0, sizeof(struct queue_properties));
memcpy(&q_properties, properties, sizeof(struct queue_properties));
q = NULL;
@@ -185,7 +165,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
list_for_each_entry(cur, &pdd->qpd.queues_list, list)
num_queues++;
if (num_queues >= dev->device_info->max_no_of_hqd/2)
- return (-ENOSPC);
+ return -ENOSPC;
}
retval = find_available_queue_slot(pqm, qid);
@@ -197,7 +177,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
}
- pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL);
+ pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
if (!pqn) {
retval = -ENOMEM;
goto err_allocate_pqn;
@@ -210,7 +190,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
- pr_err("kfd: over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
+ pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
}
@@ -227,7 +207,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
break;
case KFD_QUEUE_TYPE_DIQ:
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
- if (kq == NULL) {
+ if (!kq) {
retval = -ENOMEM;
goto err_create_queue;
}
@@ -238,22 +218,22 @@ int pqm_create_queue(struct process_queue_manager *pqm,
kq, &pdd->qpd);
break;
default:
- BUG();
- break;
+ WARN(1, "Invalid queue type %d", type);
+ retval = -EINVAL;
}
if (retval != 0) {
- pr_debug("Error dqm create queue\n");
+ pr_err("DQM create queue failed\n");
goto err_create_queue;
}
- pr_debug("kfd: PQM After DQM create queue\n");
+ pr_debug("PQM After DQM create queue\n");
list_add(&pqn->process_queue_list, &pqm->queues);
if (q) {
*properties = q->properties;
- pr_debug("kfd: PQM done creating queue\n");
+ pr_debug("PQM done creating queue\n");
print_queue_properties(properties);
}
@@ -279,14 +259,11 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dqm = NULL;
- BUG_ON(!pqm);
retval = 0;
- pr_debug("kfd: In Func %s\n", __func__);
-
pqn = get_queue_by_qid(pqm, qid);
- if (pqn == NULL) {
- pr_err("kfd: queue id does not match any known queue\n");
+ if (!pqn) {
+ pr_err("Queue id does not match any known queue\n");
return -EINVAL;
}
@@ -295,7 +272,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dev = pqn->kq->dev;
if (pqn->q)
dev = pqn->q->device;
- BUG_ON(!dev);
+ if (WARN_ON(!dev))
+ return -ENODEV;
pdd = kfd_get_process_device_data(dev, pqm->process);
if (!pdd) {
@@ -335,12 +313,9 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
int retval;
struct process_queue_node *pqn;
- BUG_ON(!pqm);
-
pqn = get_queue_by_qid(pqm, qid);
if (!pqn) {
- pr_debug("amdkfd: No queue %d exists for update operation\n",
- qid);
+ pr_debug("No queue %d exists for update operation\n", qid);
return -EFAULT;
}
@@ -363,8 +338,6 @@ struct kernel_queue *pqm_get_kernel_queue(
{
struct process_queue_node *pqn;
- BUG_ON(!pqm);
-
pqn = get_queue_by_qid(pqm, qid);
if (pqn && pqn->kq)
return pqn->kq;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 0ab197077f2d..a5315d4f1c95 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -65,17 +65,15 @@ void print_queue(struct queue *q)
int init_queue(struct queue **q, const struct queue_properties *properties)
{
- struct queue *tmp;
+ struct queue *tmp_q;
- BUG_ON(!q);
-
- tmp = kzalloc(sizeof(struct queue), GFP_KERNEL);
- if (!tmp)
+ tmp_q = kzalloc(sizeof(*tmp_q), GFP_KERNEL);
+ if (!tmp_q)
return -ENOMEM;
- memcpy(&tmp->properties, properties, sizeof(struct queue_properties));
+ memcpy(&tmp_q->properties, properties, sizeof(*properties));
- *q = tmp;
+ *q = tmp_q;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 1e5064749959..19ce59028d6b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -108,9 +108,6 @@ static int kfd_topology_get_crat_acpi(void *crat_image, size_t *size)
static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
struct crat_subtype_computeunit *cu)
{
- BUG_ON(!dev);
- BUG_ON(!cu);
-
dev->node_props.cpu_cores_count = cu->num_cpu_cores;
dev->node_props.cpu_core_id_base = cu->processor_id_low;
if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
@@ -123,9 +120,6 @@ static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
struct crat_subtype_computeunit *cu)
{
- BUG_ON(!dev);
- BUG_ON(!cu);
-
dev->node_props.simd_id_base = cu->processor_id_low;
dev->node_props.simd_count = cu->num_simd_cores;
dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
@@ -148,8 +142,6 @@ static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu)
struct kfd_topology_device *dev;
int i = 0;
- BUG_ON(!cu);
-
pr_info("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
cu->proximity_domain, cu->hsa_capability);
list_for_each_entry(dev, &topology_device_list, list) {
@@ -177,8 +169,6 @@ static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem)
struct kfd_topology_device *dev;
int i = 0;
- BUG_ON(!mem);
-
pr_info("Found memory entry in CRAT table with proximity_domain=%d\n",
mem->promixity_domain);
list_for_each_entry(dev, &topology_device_list, list) {
@@ -223,8 +213,6 @@ static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache)
struct kfd_topology_device *dev;
uint32_t id;
- BUG_ON(!cache);
-
id = cache->processor_id_low;
pr_info("Found cache entry in CRAT table with processor_id=%d\n", id);
@@ -274,8 +262,6 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink)
uint32_t id_from;
uint32_t id_to;
- BUG_ON(!iolink);
-
id_from = iolink->proximity_domain_from;
id_to = iolink->proximity_domain_to;
@@ -323,8 +309,6 @@ static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr)
struct crat_subtype_iolink *iolink;
int ret = 0;
- BUG_ON(!sub_type_hdr);
-
switch (sub_type_hdr->type) {
case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
cu = (struct crat_subtype_computeunit *)sub_type_hdr;
@@ -368,8 +352,6 @@ static void kfd_release_topology_device(struct kfd_topology_device *dev)
struct kfd_cache_properties *cache;
struct kfd_iolink_properties *iolink;
- BUG_ON(!dev);
-
list_del(&dev->list);
while (dev->mem_props.next != &dev->mem_props) {
@@ -416,7 +398,7 @@ static struct kfd_topology_device *kfd_create_topology_device(void)
struct kfd_topology_device *dev;
dev = kfd_alloc_struct(dev);
- if (dev == NULL) {
+ if (!dev) {
pr_err("No memory to allocate a topology device");
return NULL;
}
@@ -666,7 +648,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.simd_count);
if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
- pr_info_once("kfd: mem_banks_count truncated from %d to %d\n",
+ pr_info_once("mem_banks_count truncated from %d to %d\n",
dev->node_props.mem_banks_count,
dev->mem_bank_count);
sysfs_show_32bit_prop(buffer, "mem_banks_count",
@@ -763,8 +745,6 @@ static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
struct kfd_cache_properties *cache;
struct kfd_mem_properties *mem;
- BUG_ON(!dev);
-
if (dev->kobj_iolink) {
list_for_each_entry(iolink, &dev->io_link_props, list)
if (iolink->kobj) {
@@ -819,12 +799,12 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
int ret;
uint32_t i;
- BUG_ON(!dev);
+ if (WARN_ON(dev->kobj_node))
+ return -EEXIST;
/*
* Creating the sysfs folders
*/
- BUG_ON(dev->kobj_node);
dev->kobj_node = kfd_alloc_struct(dev->kobj_node);
if (!dev->kobj_node)
return -ENOMEM;
@@ -957,7 +937,7 @@ static int kfd_topology_update_sysfs(void)
int ret;
pr_info("Creating topology SYSFS entries\n");
- if (sys_props.kobj_topology == NULL) {
+ if (!sys_props.kobj_topology) {
sys_props.kobj_topology =
kfd_alloc_struct(sys_props.kobj_topology);
if (!sys_props.kobj_topology)
@@ -1117,10 +1097,8 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = NULL;
- BUG_ON(!gpu);
-
list_for_each_entry(dev, &topology_device_list, list)
- if (dev->gpu == NULL && dev->node_props.simd_count > 0) {
+ if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
break;
@@ -1143,11 +1121,9 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
struct kfd_topology_device *dev;
int res;
- BUG_ON(!gpu);
-
gpu_id = kfd_generate_gpu_id(gpu);
- pr_debug("kfd: Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
+ pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
down_write(&topology_lock);
/*
@@ -1170,8 +1146,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* GPU vBIOS
*/
- /*
- * Update the SYSFS tree, since we added another topology device
+ /* Update the SYSFS tree, since we added another topology
+ * device
*/
if (kfd_topology_update_sysfs() < 0)
kfd_topology_release_sysfs();
@@ -1190,7 +1166,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
dev->node_props.capability |= HSA_CAP_DOORBELL_PACKET_TYPE;
- pr_info("amdkfd: adding doorbell packet type capability\n");
+ pr_info("Adding doorbell packet type capability\n");
}
res = 0;
@@ -1210,8 +1186,6 @@ int kfd_topology_remove_device(struct kfd_dev *gpu)
uint32_t gpu_id;
int res = -ENODEV;
- BUG_ON(!gpu);
-
down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list)
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 36f376677a53..94277cb734d2 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -41,6 +41,11 @@ struct kgd_dev;
struct kgd_mem;
+enum kfd_preempt_type {
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
+};
+
enum kgd_memory_pool {
KGD_POOL_SYSTEM_CACHEABLE = 1,
KGD_POOL_SYSTEM_WRITECOMBINE = 2,
@@ -82,6 +87,17 @@ struct kgd2kfd_shared_resources {
size_t doorbell_start_offset;
};
+struct tile_config {
+ uint32_t *tile_config_ptr;
+ uint32_t *macro_tile_config_ptr;
+ uint32_t num_tile_configs;
+ uint32_t num_macro_tile_configs;
+
+ uint32_t gb_addr_config;
+ uint32_t num_banks;
+ uint32_t num_ranks;
+};
+
/**
* struct kfd2kgd_calls
*
@@ -123,6 +139,11 @@ struct kgd2kfd_shared_resources {
*
* @get_fw_version: Returns FW versions from the header
*
+ * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
+ * Only used for no cp scheduling mode
+ *
+ * @get_tile_config: Returns GPU-specific tiling mode information
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -153,14 +174,16 @@ struct kfd2kgd_calls {
int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id);
int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr);
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm);
int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd);
bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
- int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
+ int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
@@ -192,6 +215,9 @@ struct kfd2kgd_calls {
uint16_t (*get_fw_version)(struct kgd_dev *kgd,
enum kgd_engine_type type);
+ void (*set_scratch_backing_va)(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid);
+ int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
};
/**
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 0b74da3dca8b..bc839ff0bdd0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1240,13 +1240,18 @@ static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- if (cz_hwmgr->sclk_dpm.soft_min_clk !=
- cz_hwmgr->sclk_dpm.soft_max_clk)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMin,
+ cz_get_sclk_level(hwmgr,
+ cz_hwmgr->sclk_dpm.soft_max_clk,
+ PPSMC_MSG_SetSclkSoftMin));
+
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMax,
+ cz_get_sclk_level(hwmgr,
+ cz_hwmgr->sclk_dpm.soft_max_clk,
+ PPSMC_MSG_SetSclkSoftMax));
+
return 0;
}
@@ -1292,17 +1297,55 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- if (cz_hwmgr->sclk_dpm.soft_min_clk !=
- cz_hwmgr->sclk_dpm.soft_max_clk) {
- cz_hwmgr->sclk_dpm.soft_max_clk =
- cz_hwmgr->sclk_dpm.soft_min_clk;
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMax,
+ cz_get_sclk_level(hwmgr,
+ cz_hwmgr->sclk_dpm.soft_min_clk,
+ PPSMC_MSG_SetSclkSoftMax));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMin,
+ cz_get_sclk_level(hwmgr,
+ cz_hwmgr->sclk_dpm.soft_min_clk,
+ PPSMC_MSG_SetSclkSoftMin));
+
+ return 0;
+}
+
+static int cz_phm_force_dpm_sclk(struct pp_hwmgr *hwmgr, uint32_t sclk)
+{
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMin,
+ cz_get_sclk_level(hwmgr,
+ sclk,
+ PPSMC_MSG_SetSclkSoftMin));
+
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
+ sclk,
PPSMC_MSG_SetSclkSoftMax));
+ return 0;
+}
+
+static int cz_get_profiling_clk(struct pp_hwmgr *hwmgr, uint32_t *sclk)
+{
+ struct phm_clock_voltage_dependency_table *table =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+ int32_t tmp_sclk;
+ int32_t count;
+
+ tmp_sclk = table->entries[table->count-1].clk * 70 / 100;
+
+ for (count = table->count-1; count >= 0; count--) {
+ if (tmp_sclk >= table->entries[count].clk) {
+ tmp_sclk = table->entries[count].clk;
+ *sclk = tmp_sclk;
+ break;
+ }
}
+ if (count < 0)
+ *sclk = table->entries[0].clk;
return 0;
}
@@ -1310,30 +1353,70 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
+ uint32_t sclk = 0;
int ret = 0;
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+ if (level == hwmgr->dpm_level)
+ return ret;
+
+ if (!(hwmgr->dpm_level & profile_mode_mask)) {
+ /* enter profile mode, save current level, disable gfx cg*/
+ if (level & profile_mode_mask) {
+ hwmgr->saved_dpm_level = hwmgr->dpm_level;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+ }
+ } else {
+ /* exit profile mode, restore level, enable gfx cg*/
+ if (!(level & profile_mode_mask)) {
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
+ level = hwmgr->saved_dpm_level;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ }
+ }
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
ret = cz_phm_force_dpm_highest(hwmgr);
if (ret)
return ret;
+ hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
ret = cz_phm_force_dpm_lowest(hwmgr);
if (ret)
return ret;
+ hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = cz_phm_unforce_dpm_levels(hwmgr);
if (ret)
return ret;
+ hwmgr->dpm_level = level;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = cz_get_profiling_clk(hwmgr, &sclk);
+ if (ret)
+ return ret;
+ hwmgr->dpm_level = level;
+ cz_phm_force_dpm_sclk(hwmgr, sclk);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ hwmgr->dpm_level = level;
break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
- hwmgr->dpm_level = level;
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index d025653c7823..9547f265a8bb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -557,9 +557,8 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u
return vddci_table->entries[i].value;
}
- PP_ASSERT_WITH_CODE(false,
- "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i-1].value);
+ pr_debug("vddci is larger than max value in vddci_table\n");
+ return vddci_table->entries[i-1].value;
}
int phm_find_boot_level(void *table,
@@ -583,26 +582,26 @@ int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
phm_ppt_v1_voltage_lookup_table *lookup_table,
uint16_t virtual_voltage_id, int32_t *sclk)
{
- uint8_t entryId;
- uint8_t voltageId;
+ uint8_t entry_id;
+ uint8_t voltage_id;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
/* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
- for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) {
- voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd;
- if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
+ for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
+ voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
+ if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
break;
}
- PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count,
- "Can't find requested voltage id in vdd_dep_on_sclk table!",
- return -EINVAL;
- );
+ if (entry_id >= table_info->vdd_dep_on_sclk->count) {
+ pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
+ return -EINVAL;
+ }
- *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk;
+ *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index cd33eb179db2..c062844b15f3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -142,7 +142,7 @@ int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr,
}
} else if (voltage_mode == VOLTAGE_OBJ_SVID2) {
voltage_table->psi1_enable =
- voltage_object->svid2_voltage_obj.loadline_psi1 & 0x1;
+ (voltage_object->svid2_voltage_obj.loadline_psi1 & 0x20) >> 5;
voltage_table->psi0_enable =
voltage_object->svid2_voltage_obj.psi0_enable & 0x1;
voltage_table->max_vid_step =
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
index 4c7f430b36eb..edc5fb6412d9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
@@ -265,6 +265,15 @@ static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input,
}
} */
+ if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) ||
+ ((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) {
+ rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100;
+ rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100;
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSoftMinVcn,
+ (rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min);
+ }
+
if((hwmgr->gfx_arbiter.sclk_hard_min != 0) &&
((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) {
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
index afb852295a15..2472b50e54cf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
@@ -280,6 +280,8 @@ struct rv_hwmgr {
uint32_t f_actual_hard_min_freq;
uint32_t fabric_actual_soft_min_freq;
+ uint32_t vclk_soft_min;
+ uint32_t dclk_soft_min;
uint32_t gfx_actual_soft_min_freq;
bool vcn_power_gated;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index f01cda93f178..c2743233ba10 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1962,9 +1962,6 @@ static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
break;
default:
- PP_ASSERT_WITH_CODE(0,
- "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
- );
break;
}
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 01ff5054041b..9d71a259d97d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -2313,7 +2313,7 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg);
smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc);
- vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);;
+ vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);
if (1 == agc_btc_response) {
if (1 == data->acg_loop_state)
@@ -2522,6 +2522,9 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
pp_table->DisplayDpmVoltageMode =
(uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
+ data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
+ data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
+
if (data->registry_data.ulv_support &&
table_info->us_ulv_voltage_offset) {
result = vega10_populate_ulv_state(hwmgr);
@@ -3701,10 +3704,22 @@ static void vega10_apply_dal_minimum_voltage_request(
return;
}
+static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
+ struct phm_ppt_v2_information *table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
+
+ vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk;
+
+ return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
+}
+
static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
+ uint32_t socclk_idx;
vega10_apply_dal_minimum_voltage_request(hwmgr);
@@ -3725,13 +3740,22 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
if (!data->registry_data.mclk_dpm_key_disabled) {
if (data->smc_state_table.mem_boot_level !=
data->dpm_table.mem_table.dpm_state.soft_min_level) {
+ if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
+ socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_SetSoftMinUclkByIndex,
- data->smc_state_table.mem_boot_level),
- "Failed to set soft min mclk index!",
- return -EINVAL);
-
+ hwmgr->smumgr,
+ PPSMC_MSG_SetSoftMinSocclkByIndex,
+ socclk_idx),
+ "Failed to set soft min uclk index!",
+ return -EINVAL);
+ } else {
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr,
+ PPSMC_MSG_SetSoftMinUclkByIndex,
+ data->smc_state_table.mem_boot_level),
+ "Failed to set soft min uclk index!",
+ return -EINVAL);
+ }
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->smc_state_table.mem_boot_level;
}
@@ -4138,7 +4162,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
}
} else {
- pr_info("Cannot find requested DCEFCLK!");
+ pr_debug("Cannot find requested DCEFCLK!");
}
if (min_clocks.memoryClock != 0) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index fbafc849ea71..e7fa67063cdc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -543,7 +543,7 @@ static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] =
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
*/
/* SQ */
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
{ ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
{ ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
{ ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
@@ -556,7 +556,7 @@ static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] =
{ ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
/* TD */
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
{ ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK, DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
{ ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
{ ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
@@ -1208,7 +1208,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
if (0 != result)
return result;
- vega10_didt_set_mask(hwmgr, true);
+ vega10_didt_set_mask(hwmgr, false);
cgs_enter_safe_mode(hwmgr->device, false);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index e7ab8eb8a0cf..d44243441d28 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -321,10 +321,7 @@ int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) {
- result = vega10_fan_ctrl_set_static_mode(hwmgr,
- FDO_PWM_MODE_STATIC);
- if (!result)
- result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
+ result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
} else
result = vega10_fan_ctrl_set_default_mode(hwmgr);
@@ -633,7 +630,6 @@ int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) {
vega10_fan_ctrl_start_smc_fan_control(hwmgr);
- vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 47e57bd2c36f..91b0105e8240 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -128,6 +128,8 @@ struct phm_uvd_arbiter {
uint32_t dclk;
uint32_t vclk_ceiling;
uint32_t dclk_ceiling;
+ uint32_t vclk_soft_min;
+ uint32_t dclk_soft_min;
};
struct phm_vce_arbiter {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index e0e106f1b23a..901c960cfe21 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -66,7 +66,12 @@
#define PPSMC_MSG_SetMinVddcrSocVoltage 0x22
#define PPSMC_MSG_SetMinVideoFclkFreq 0x23
#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x24
-#define PPSMC_Message_Count 0x25
+#define PPSMC_MSG_ForcePowerDownGfx 0x25
+#define PPSMC_MSG_SetPhyclkVoltageByFreq 0x26
+#define PPSMC_MSG_SetDppclkVoltageByFreq 0x27
+#define PPSMC_MSG_SetSoftMinVcn 0x28
+#define PPSMC_Message_Count 0x29
+
typedef uint16_t PPSMC_Result;
typedef int PPSMC_Msg;
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index e3c13aa202b8..289eda54e5aa 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -31,7 +31,7 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
}
-static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
+static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = arcpgu_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 3022b39c00f3..69dab82a3771 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -209,7 +209,6 @@ static struct drm_driver driver = {
.gem_free_object_unlocked = ast_gem_free_object,
.dumb_create = ast_dumb_create,
.dumb_map_offset = ast_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
};
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 9052ebeae8d0..0cd827e11fa2 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -266,7 +266,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_unregister_fbi(&afbdev->helper);
if (afb->obj) {
- drm_gem_object_unreference_unlocked(afb->obj);
+ drm_gem_object_put_unlocked(afb->obj);
afb->obj = NULL;
}
drm_fb_helper_fini(&afbdev->helper);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 9a44cdec3bca..dac355812adc 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -387,7 +387,7 @@ static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
- drm_gem_object_unreference_unlocked(ast_fb->obj);
+ drm_gem_object_put_unlocked(ast_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(ast_fb);
}
@@ -429,13 +429,13 @@ ast_user_framebuffer_create(struct drm_device *dev,
ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
if (!ast_fb) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
if (ret) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
kfree(ast_fb);
return ERR_PTR(ret);
}
@@ -628,7 +628,7 @@ int ast_dumb_create(struct drm_file *file,
return ret;
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret)
return ret;
@@ -676,7 +676,7 @@ ast_dumb_mmap_offset(struct drm_file *file,
bo = gem_to_ast_bo(obj);
*offset = ast_bo_mmap_offset(bo);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 43245229f437..6f3849ec0c1d 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -950,7 +950,7 @@ static void ast_cursor_fini(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
ttm_bo_kunmap(&ast->cache_kmap);
- drm_gem_object_unreference_unlocked(ast->cursor_cache);
+ drm_gem_object_put_unlocked(ast->cursor_cache);
}
int ast_mode_init(struct drm_device *dev)
@@ -1215,10 +1215,10 @@ static int ast_cursor_set(struct drm_crtc *crtc,
ast_show_cursor(crtc);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
fail:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index a1d28845da5f..7b20318483e4 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -93,7 +93,6 @@ static struct drm_driver bochs_driver = {
.gem_free_object_unlocked = bochs_gem_free_object,
.dumb_create = bochs_dumb_create,
.dumb_map_offset = bochs_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
};
/* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 682c090fa3ed..b2431aee7887 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -785,7 +785,7 @@ adv7511_connector_detect(struct drm_connector *connector, bool force)
return adv7511_detect(adv, connector);
}
-static struct drm_connector_funcs adv7511_connector_funcs = {
+static const struct drm_connector_funcs adv7511_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = adv7511_connector_detect,
.destroy = drm_connector_cleanup,
@@ -856,7 +856,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
return ret;
}
-static struct drm_bridge_funcs adv7511_bridge_funcs = {
+static const struct drm_bridge_funcs adv7511_bridge_funcs = {
.enable = adv7511_bridge_enable,
.disable = adv7511_bridge_disable,
.mode_set = adv7511_bridge_mode_set,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index 8f2d1379c880..cf3f0caf9c63 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -517,7 +517,7 @@ static snd_pcm_uframes_t dw_hdmi_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(runtime, dw->buf_offset);
}
-static struct snd_pcm_ops snd_dw_hdmi_ops = {
+static const struct snd_pcm_ops snd_dw_hdmi_ops = {
.open = dw_hdmi_open,
.close = dw_hdmi_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 36f5ccbd1794..63c7a01b7053 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -811,7 +811,7 @@ static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge)
return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge);
}
-static struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
+static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
.mode_set = dw_mipi_dsi_bridge_mode_set,
.enable = dw_mipi_dsi_bridge_enable,
.post_disable = dw_mipi_dsi_bridge_post_disable,
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 503252d6a74d..8571cfd877c5 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1254,7 +1254,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* port@2 is the output port */
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
- if (ret)
+ if (ret && ret != -ENODEV)
return ret;
/* Shut down GPIO is optional */
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 910c300f5c37..69c4e352dd78 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -142,7 +142,6 @@ static struct drm_driver driver = {
.gem_free_object_unlocked = cirrus_gem_free_object,
.dumb_create = cirrus_dumb_create,
.dumb_map_offset = cirrus_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
};
static const struct dev_pm_ops cirrus_pm_ops = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 0f6815f35ad2..32fbfba2c623 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -251,7 +251,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_unregister_fbi(&gfbdev->helper);
if (gfb->obj) {
- drm_gem_object_unreference_unlocked(gfb->obj);
+ drm_gem_object_put_unlocked(gfb->obj);
gfb->obj = NULL;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index e7fc95f63dca..b5f528543956 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -18,7 +18,7 @@ static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
- drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+ drm_gem_object_put_unlocked(cirrus_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
@@ -67,13 +67,13 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
if (!cirrus_fb) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
if (ret) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
kfree(cirrus_fb);
return ERR_PTR(ret);
}
@@ -261,7 +261,7 @@ int cirrus_dumb_create(struct drm_file *file,
return ret;
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret)
return ret;
@@ -310,7 +310,7 @@ cirrus_dumb_mmap_offset(struct drm_file *file,
bo = gem_to_cirrus_bo(obj);
*offset = cirrus_bo_mmap_offset(bo);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 213fb837e1c4..08af8d6b844b 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -544,7 +544,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
DP_DETAILED_CAP_INFO_AVAILABLE;
int clk;
int bpc;
- char id[6];
+ char id[7];
int len;
uint8_t rev[2];
int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
@@ -583,6 +583,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
seq_puts(m, "\t\tType: N/A\n");
}
+ memset(id, 0, sizeof(id));
drm_dp_downstream_id(aux, id);
seq_printf(m, "\t\tID: %s\n", id);
@@ -591,7 +592,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
seq_printf(m, "\t\tHW: %d.%d\n",
(rev[0] & 0xf0) >> 4, rev[0] & 0xf);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2);
+ len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2);
if (len > 0)
seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 2ed2d919beae..be38ac7050d4 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -291,7 +291,7 @@ struct drm_minor *drm_minor_acquire(unsigned int minor_id)
if (!minor) {
return ERR_PTR(-ENODEV);
- } else if (drm_device_is_unplugged(minor->dev)) {
+ } else if (drm_dev_is_unplugged(minor->dev)) {
drm_dev_unref(minor->dev);
return ERR_PTR(-ENODEV);
}
@@ -364,26 +364,32 @@ void drm_put_dev(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_put_dev);
-void drm_unplug_dev(struct drm_device *dev)
+static void drm_device_set_unplugged(struct drm_device *dev)
{
- /* for a USB device */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_modeset_unregister_all(dev);
+ smp_wmb();
+ atomic_set(&dev->unplugged, 1);
+}
- drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+/**
+ * drm_dev_unplug - unplug a DRM device
+ * @dev: DRM device
+ *
+ * This unplugs a hotpluggable DRM device, which makes it inaccessible to
+ * userspace operations. Entry-points can use drm_dev_is_unplugged(). This
+ * essentially unregisters the device like drm_dev_unregister(), but can be
+ * called while there are still open users of @dev.
+ */
+void drm_dev_unplug(struct drm_device *dev)
+{
+ drm_dev_unregister(dev);
mutex_lock(&drm_global_mutex);
-
drm_device_set_unplugged(dev);
-
- if (dev->open_count == 0) {
- drm_put_dev(dev);
- }
+ if (dev->open_count == 0)
+ drm_dev_unref(dev);
mutex_unlock(&drm_global_mutex);
}
-EXPORT_SYMBOL(drm_unplug_dev);
+EXPORT_SYMBOL(drm_dev_unplug);
/*
* DRM internal mount
@@ -835,6 +841,9 @@ EXPORT_SYMBOL(drm_dev_register);
* drm_dev_register() but does not deallocate the device. The caller must call
* drm_dev_unref() to drop their final reference.
*
+ * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
+ * which can be called while there are still open users of @dev.
+ *
* This should be called first in the device teardown code to make sure
* userspace can't access the device instance any more.
*/
@@ -842,7 +851,8 @@ void drm_dev_unregister(struct drm_device *dev)
{
struct drm_map_list *r_list, *list_temp;
- drm_lastclose(dev);
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
+ drm_lastclose(dev);
dev->registered = false;
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index ade319d10e70..f2ee88363015 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -18,27 +18,17 @@
*/
#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
-#include <linux/dma-buf.h>
-#include <linux/dma-mapping.h>
#include <linux/module.h>
-#include <linux/reservation.h>
#define DEFAULT_FBDEFIO_DELAY_MS 50
-struct drm_fb_cma {
- struct drm_framebuffer fb;
- struct drm_gem_cma_object *obj[4];
-};
-
struct drm_fbdev_cma {
struct drm_fb_helper fb_helper;
- struct drm_fb_cma *fb;
const struct drm_framebuffer_funcs *fb_funcs;
};
@@ -90,69 +80,19 @@ static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
return container_of(helper, struct drm_fbdev_cma, fb_helper);
}
-static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
-{
- return container_of(fb, struct drm_fb_cma, fb);
-}
-
void drm_fb_cma_destroy(struct drm_framebuffer *fb)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
- int i;
-
- for (i = 0; i < 4; i++) {
- if (fb_cma->obj[i])
- drm_gem_object_put_unlocked(&fb_cma->obj[i]->base);
- }
-
- drm_framebuffer_cleanup(fb);
- kfree(fb_cma);
+ drm_gem_fb_destroy(fb);
}
EXPORT_SYMBOL(drm_fb_cma_destroy);
int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv, unsigned int *handle)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
-
- return drm_gem_handle_create(file_priv,
- &fb_cma->obj[0]->base, handle);
+ return drm_gem_fb_create_handle(fb, file_priv, handle);
}
EXPORT_SYMBOL(drm_fb_cma_create_handle);
-static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
- .destroy = drm_fb_cma_destroy,
- .create_handle = drm_fb_cma_create_handle,
-};
-
-static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_cma_object **obj,
- unsigned int num_planes, const struct drm_framebuffer_funcs *funcs)
-{
- struct drm_fb_cma *fb_cma;
- int ret;
- int i;
-
- fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
- if (!fb_cma)
- return ERR_PTR(-ENOMEM);
-
- drm_helper_mode_fill_fb_struct(dev, &fb_cma->fb, mode_cmd);
-
- for (i = 0; i < num_planes; i++)
- fb_cma->obj[i] = obj[i];
-
- ret = drm_framebuffer_init(dev, &fb_cma->fb, funcs);
- if (ret) {
- dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret);
- kfree(fb_cma);
- return ERR_PTR(ret);
- }
-
- return fb_cma;
-}
-
/**
* drm_fb_cma_create_with_funcs() - helper function for the
* &drm_mode_config_funcs.fb_create
@@ -170,53 +110,7 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
- const struct drm_format_info *info;
- struct drm_fb_cma *fb_cma;
- struct drm_gem_cma_object *objs[4];
- struct drm_gem_object *obj;
- int ret;
- int i;
-
- info = drm_get_format_info(dev, mode_cmd);
- if (!info)
- return ERR_PTR(-EINVAL);
-
- for (i = 0; i < info->num_planes; i++) {
- unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
- unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
- unsigned int min_size;
-
- obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
- if (!obj) {
- dev_err(dev->dev, "Failed to lookup GEM object\n");
- ret = -ENOENT;
- goto err_gem_object_put;
- }
-
- min_size = (height - 1) * mode_cmd->pitches[i]
- + width * info->cpp[i]
- + mode_cmd->offsets[i];
-
- if (obj->size < min_size) {
- drm_gem_object_put_unlocked(obj);
- ret = -EINVAL;
- goto err_gem_object_put;
- }
- objs[i] = to_drm_gem_cma_obj(obj);
- }
-
- fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, funcs);
- if (IS_ERR(fb_cma)) {
- ret = PTR_ERR(fb_cma);
- goto err_gem_object_put;
- }
-
- return &fb_cma->fb;
-
-err_gem_object_put:
- for (i--; i >= 0; i--)
- drm_gem_object_put_unlocked(&objs[i]->base);
- return ERR_PTR(ret);
+ return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd, funcs);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
@@ -233,8 +127,7 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_fb_cma_create_with_funcs(dev, file_priv, mode_cmd,
- &drm_fb_cma_funcs);
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_create);
@@ -250,12 +143,13 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create);
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+ struct drm_gem_object *gem;
- if (plane >= 4)
+ gem = drm_gem_fb_get_obj(fb, plane);
+ if (!gem)
return NULL;
- return fb_cma->obj[plane];
+ return to_drm_gem_cma_obj(gem);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
@@ -272,13 +166,14 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
struct drm_plane_state *state,
unsigned int plane)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+ struct drm_gem_cma_object *obj;
dma_addr_t paddr;
- if (plane >= 4)
+ obj = drm_fb_cma_get_gem_obj(fb, plane);
+ if (!obj)
return 0;
- paddr = fb_cma->obj[plane]->paddr + fb->offsets[plane];
+ paddr = obj->paddr + fb->offsets[plane];
paddr += fb->format->cpp[plane] * (state->src_x >> 16);
paddr += fb->pitches[plane] * (state->src_y >> 16);
@@ -302,26 +197,13 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
int drm_fb_cma_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct dma_buf *dma_buf;
- struct dma_fence *fence;
-
- if ((plane->state->fb == state->fb) || !state->fb)
- return 0;
-
- dma_buf = drm_fb_cma_get_gem_obj(state->fb, 0)->base.dma_buf;
- if (dma_buf) {
- fence = reservation_object_get_excl_rcu(dma_buf->resv);
- drm_atomic_set_fence_for_plane(state, fence);
- }
-
- return 0;
+ return drm_gem_fb_prepare_fb(plane, state);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
#ifdef CONFIG_DEBUG_FS
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
int i;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
@@ -330,7 +212,7 @@ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
for (i = 0; i < fb->format->num_planes; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
- drm_gem_cma_describe(fb_cma->obj[i], m);
+ drm_gem_cma_describe(drm_fb_cma_get_gem_obj(fb, i), m);
}
}
@@ -431,7 +313,6 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_device *dev = helper->dev;
struct drm_gem_cma_object *obj;
struct drm_framebuffer *fb;
@@ -446,14 +327,7 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper,
sizes->surface_bpp);
bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = sizes->surface_width * sizes->surface_height * bytes_per_pixel;
obj = drm_gem_cma_create(dev, size);
if (IS_ERR(obj))
return -ENOMEM;
@@ -464,15 +338,14 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper,
goto err_gem_free_object;
}
- fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1,
- fbdev_cma->fb_funcs);
- if (IS_ERR(fbdev_cma->fb)) {
+ fb = drm_gem_fbdev_fb_create(dev, sizes, 0, &obj->base,
+ fbdev_cma->fb_funcs);
+ if (IS_ERR(fb)) {
dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
- ret = PTR_ERR(fbdev_cma->fb);
+ ret = PTR_ERR(fb);
goto err_fb_info_destroy;
}
- fb = &fbdev_cma->fb->fb;
helper->fb = fb;
fbi->par = helper;
@@ -500,7 +373,7 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper,
return 0;
err_cma_destroy:
- drm_framebuffer_remove(&fbdev_cma->fb->fb);
+ drm_framebuffer_remove(fb);
err_fb_info_destroy:
drm_fb_helper_fini(helper);
err_gem_free_object:
@@ -570,6 +443,11 @@ err_free:
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
+static const struct drm_framebuffer_funcs drm_fb_cma_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
+
/**
* drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
* @dev: DRM device
@@ -597,8 +475,8 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
if (fbdev_cma->fb_helper.fbdev)
drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
- if (fbdev_cma->fb)
- drm_framebuffer_remove(&fbdev_cma->fb->fb);
+ if (fbdev_cma->fb_helper.fb)
+ drm_framebuffer_remove(fbdev_cma->fb_helper.fb);
drm_fb_helper_fini(&fbdev_cma->fb_helper);
kfree(fbdev_cma);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 59b75a974357..b3c6e997ccdb 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -436,7 +436,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (!--dev->open_count) {
drm_lastclose(dev);
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
drm_put_dev(dev);
}
mutex_unlock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index a8d396bed6a4..ad4e9cfe48a2 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1001,7 +1001,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_vma_offset_node *node;
int ret;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 275ab872b34f..373e33f22be4 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -264,41 +264,6 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
}
EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
-/**
- * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
- * object
- * @file_priv: DRM file-private structure containing the GEM object
- * @drm: DRM device
- * @handle: GEM object handle
- * @offset: return location for the fake mmap offset
- *
- * This function look up an object by its handle and returns the fake mmap
- * offset associated with it. Drivers using the CMA helpers should set this
- * as their &drm_driver.dumb_map_offset callback.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *drm, u32 handle,
- u64 *offset)
-{
- struct drm_gem_object *gem_obj;
-
- gem_obj = drm_gem_object_lookup(file_priv, handle);
- if (!gem_obj) {
- dev_err(drm->dev, "failed to lookup GEM object\n");
- return -EINVAL;
- }
-
- *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
-
- drm_gem_object_put_unlocked(gem_obj);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
-
const struct vm_operations_struct drm_gem_cma_vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -390,7 +355,7 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
struct drm_device *dev = priv->minor->dev;
struct drm_vma_offset_node *node;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
new file mode 100644
index 000000000000..d54a083dc5dd
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -0,0 +1,283 @@
+/*
+ * drm gem framebuffer helper functions
+ *
+ * Copyright (C) 2017 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-fence.h>
+#include <linux/reservation.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helpers for drivers that don't subclass
+ * &drm_framebuffer and and use &drm_gem_object for their backing storage.
+ *
+ * Drivers without additional needs to validate framebuffers can simply use
+ * drm_gem_fb_create() and everything is wired up automatically. But all
+ * parts can be used individually.
+ */
+
+/**
+ * drm_gem_fb_get_obj() - Get GEM object for framebuffer
+ * @fb: The framebuffer
+ * @plane: Which plane
+ *
+ * Returns the GEM object for given framebuffer.
+ */
+struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
+ unsigned int plane)
+{
+ if (plane >= 4)
+ return NULL;
+
+ return fb->obj[plane];
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj);
+
+static struct drm_framebuffer *
+drm_gem_fb_alloc(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **obj, unsigned int num_planes,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_framebuffer *fb;
+ int ret, i;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb->obj[i] = obj[i];
+
+ ret = drm_framebuffer_init(dev, fb, funcs);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "Failed to init framebuffer: %d\n",
+ ret);
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+
+ return fb;
+}
+
+/**
+ * drm_gem_fb_destroy - Free GEM backed framebuffer
+ * @fb: DRM framebuffer
+ *
+ * Frees a GEM backed framebuffer with its backing buffer(s) and the structure
+ * itself. Drivers can use this as their &drm_framebuffer_funcs->destroy
+ * callback.
+ */
+void drm_gem_fb_destroy(struct drm_framebuffer *fb)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ drm_gem_object_put_unlocked(fb->obj[i]);
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+EXPORT_SYMBOL(drm_gem_fb_destroy);
+
+/**
+ * drm_gem_fb_create_handle - Create handle for GEM backed framebuffer
+ * @fb: DRM framebuffer
+ * @file: drm file
+ * @handle: handle created
+ *
+ * Drivers can use this as their &drm_framebuffer_funcs->create_handle
+ * callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
+ unsigned int *handle)
+{
+ return drm_gem_handle_create(file, fb->obj[0], handle);
+}
+EXPORT_SYMBOL(drm_gem_fb_create_handle);
+
+/**
+ * drm_gem_fb_create_with_funcs() - helper function for the
+ * &drm_mode_config_funcs.fb_create
+ * callback
+ * @dev: DRM device
+ * @file: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
+ * @funcs: vtable to be used for the new framebuffer object
+ *
+ * This can be used to set &drm_framebuffer_funcs for drivers that need the
+ * &drm_framebuffer_funcs.dirty callback. Use drm_gem_fb_create() if you don't
+ * need to change &drm_framebuffer_funcs.
+ * The function does buffer size validation.
+ */
+struct drm_framebuffer *
+drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ const struct drm_format_info *info;
+ struct drm_gem_object *objs[4];
+ struct drm_framebuffer *fb;
+ int ret, i;
+
+ info = drm_get_format_info(dev, mode_cmd);
+ if (!info)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
+ unsigned int min_size;
+
+ objs[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
+ if (!objs[i]) {
+ DRM_DEV_ERROR(dev->dev, "Failed to lookup GEM\n");
+ ret = -ENOENT;
+ goto err_gem_object_put;
+ }
+
+ min_size = (height - 1) * mode_cmd->pitches[i]
+ + width * info->cpp[i]
+ + mode_cmd->offsets[i];
+
+ if (objs[i]->size < min_size) {
+ drm_gem_object_put_unlocked(objs[i]);
+ ret = -EINVAL;
+ goto err_gem_object_put;
+ }
+ }
+
+ fb = drm_gem_fb_alloc(dev, mode_cmd, objs, i, funcs);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto err_gem_object_put;
+ }
+
+ return fb;
+
+err_gem_object_put:
+ for (i--; i >= 0; i--)
+ drm_gem_object_put_unlocked(objs[i]);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_funcs);
+
+static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
+
+/**
+ * drm_gem_fb_create() - &drm_mode_config_funcs.fb_create callback function
+ * @dev: DRM device
+ * @file: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
+ *
+ * If your hardware has special alignment or pitch requirements these should be
+ * checked before calling this function. The function does buffer size
+ * validation. Use drm_gem_fb_create_with_funcs() if you need to set
+ * &drm_framebuffer_funcs.dirty.
+ */
+struct drm_framebuffer *
+drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
+ &drm_gem_fb_funcs);
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_create);
+
+/**
+ * drm_gem_fb_prepare_fb() - Prepare gem framebuffer
+ * @plane: Which plane
+ * @state: Plane state attach fence to
+ *
+ * This can be used as the &drm_plane_helper_funcs.prepare_fb hook.
+ *
+ * This function checks if the plane FB has an dma-buf attached, extracts
+ * the exclusive fence and attaches it to plane state for the atomic helper
+ * to wait on.
+ *
+ * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
+ * gem based framebuffer drivers which have their buffers always pinned in
+ * memory.
+ */
+int drm_gem_fb_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dma_buf *dma_buf;
+ struct dma_fence *fence;
+
+ if ((plane->state->fb == state->fb) || !state->fb)
+ return 0;
+
+ dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf;
+ if (dma_buf) {
+ fence = reservation_object_get_excl_rcu(dma_buf->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
+
+/**
+ * drm_gem_fbdev_fb_create - Create a drm_framebuffer for fbdev emulation
+ * @dev: DRM device
+ * @sizes: fbdev size description
+ * @pitch_align: optional pitch alignment
+ * @obj: GEM object backing the framebuffer
+ * @funcs: vtable to be used for the new framebuffer object
+ *
+ * This function creates a framebuffer for use with fbdev emulation.
+ *
+ * Returns:
+ * Pointer to a drm_framebuffer on success or an error pointer on failure.
+ */
+struct drm_framebuffer *
+drm_gem_fbdev_fb_create(struct drm_device *dev,
+ struct drm_fb_helper_surface_size *sizes,
+ unsigned int pitch_align, struct drm_gem_object *obj,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = sizes->surface_width *
+ DIV_ROUND_UP(sizes->surface_bpp, 8);
+ if (pitch_align)
+ mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0],
+ pitch_align);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ if (obj->size < mode_cmd.pitches[0] * mode_cmd.height)
+ return ERR_PTR(-EINVAL);
+
+ return drm_gem_fb_alloc(dev, &mode_cmd, &obj, 1, funcs);
+}
+EXPORT_SYMBOL(drm_gem_fbdev_fb_create);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 4e906b82a170..fbc3f308fa19 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -167,3 +167,9 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
+int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
+int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 8bfeb32f8a10..a9ae6dd2d593 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -657,6 +657,12 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -716,7 +722,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
struct drm_device *dev = file_priv->minor->dev;
int retcode;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
retcode = drm_ioctl_permit(flags, file_priv);
@@ -765,7 +771,7 @@ long drm_ioctl(struct file *filp,
dev = file_priv->minor->dev;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 5c14beee52ff..85ab1eec73e5 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -126,7 +126,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
plane->format_types[j],
plane->modifiers[i])) {
- mod->formats |= 1 << j;
+ mod->formats |= 1ULL << j;
}
}
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index a5b38a80a99a..0422b8c2c2e7 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1,5 +1,7 @@
/*
* Copyright 2017 Red Hat
+ * Parts ported from amdgpu (fence wait code).
+ * Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -31,6 +33,9 @@
* that contain an optional fence. The fence can be updated with a new
* fence, or be NULL.
*
+ * syncobj's can be waited upon, where it will wait for the underlying
+ * fence.
+ *
* syncobj's can be export to fd's and back, these fd's are opaque and
* have no other use case, except passing the syncobj between processes.
*
@@ -46,6 +51,7 @@
#include <linux/fs.h>
#include <linux/anon_inodes.h>
#include <linux/sync_file.h>
+#include <linux/sched/signal.h>
#include "drm_internal.h"
#include <drm/drm_syncobj.h>
@@ -75,6 +81,75 @@ struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
}
EXPORT_SYMBOL(drm_syncobj_find);
+static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func)
+{
+ cb->func = func;
+ list_add_tail(&cb->node, &syncobj->cb_list);
+}
+
+static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
+ struct dma_fence **fence,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func)
+{
+ int ret;
+
+ *fence = drm_syncobj_fence_get(syncobj);
+ if (*fence)
+ return 1;
+
+ spin_lock(&syncobj->lock);
+ /* We've already tried once to get a fence and failed. Now that we
+ * have the lock, try one more time just to be sure we don't add a
+ * callback when a fence has already been set.
+ */
+ if (syncobj->fence) {
+ *fence = dma_fence_get(syncobj->fence);
+ ret = 1;
+ } else {
+ *fence = NULL;
+ drm_syncobj_add_callback_locked(syncobj, cb, func);
+ ret = 0;
+ }
+ spin_unlock(&syncobj->lock);
+
+ return ret;
+}
+
+/**
+ * drm_syncobj_add_callback - adds a callback to syncobj::cb_list
+ * @syncobj: Sync object to which to add the callback
+ * @cb: Callback to add
+ * @func: Func to use when initializing the drm_syncobj_cb struct
+ *
+ * This adds a callback to be called next time the fence is replaced
+ */
+void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func)
+{
+ spin_lock(&syncobj->lock);
+ drm_syncobj_add_callback_locked(syncobj, cb, func);
+ spin_unlock(&syncobj->lock);
+}
+EXPORT_SYMBOL(drm_syncobj_add_callback);
+
+/**
+ * drm_syncobj_add_callback - removes a callback to syncobj::cb_list
+ * @syncobj: Sync object from which to remove the callback
+ * @cb: Callback to remove
+ */
+void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb)
+{
+ spin_lock(&syncobj->lock);
+ list_del_init(&cb->node);
+ spin_unlock(&syncobj->lock);
+}
+EXPORT_SYMBOL(drm_syncobj_remove_callback);
+
/**
* drm_syncobj_replace_fence - replace fence in a sync object.
* @syncobj: Sync object to replace fence in
@@ -86,18 +161,75 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence)
{
struct dma_fence *old_fence;
+ struct drm_syncobj_cb *cur, *tmp;
if (fence)
dma_fence_get(fence);
- old_fence = xchg(&syncobj->fence, fence);
+
+ spin_lock(&syncobj->lock);
+
+ old_fence = syncobj->fence;
+ syncobj->fence = fence;
+
+ if (fence != old_fence) {
+ list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
+ list_del_init(&cur->node);
+ cur->func(syncobj, cur);
+ }
+ }
+
+ spin_unlock(&syncobj->lock);
dma_fence_put(old_fence);
}
EXPORT_SYMBOL(drm_syncobj_replace_fence);
-int drm_syncobj_fence_get(struct drm_file *file_private,
- u32 handle,
- struct dma_fence **fence)
+struct drm_syncobj_null_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+};
+
+static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence)
+{
+ return "syncobjnull";
+}
+
+static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence)
+{
+ dma_fence_enable_sw_signaling(fence);
+ return !dma_fence_is_signaled(fence);
+}
+
+static const struct dma_fence_ops drm_syncobj_null_fence_ops = {
+ .get_driver_name = drm_syncobj_null_fence_get_name,
+ .get_timeline_name = drm_syncobj_null_fence_get_name,
+ .enable_signaling = drm_syncobj_null_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+ .release = NULL,
+};
+
+static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
+{
+ struct drm_syncobj_null_fence *fence;
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (fence == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&fence->lock);
+ dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops,
+ &fence->lock, 0, 0);
+ dma_fence_signal(&fence->base);
+
+ drm_syncobj_replace_fence(syncobj, &fence->base);
+
+ dma_fence_put(&fence->base);
+
+ return 0;
+}
+
+int drm_syncobj_find_fence(struct drm_file *file_private,
+ u32 handle,
+ struct dma_fence **fence)
{
struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
int ret = 0;
@@ -105,14 +237,14 @@ int drm_syncobj_fence_get(struct drm_file *file_private,
if (!syncobj)
return -ENOENT;
- *fence = dma_fence_get(syncobj->fence);
+ *fence = drm_syncobj_fence_get(syncobj);
if (!*fence) {
ret = -EINVAL;
}
drm_syncobj_put(syncobj);
return ret;
}
-EXPORT_SYMBOL(drm_syncobj_fence_get);
+EXPORT_SYMBOL(drm_syncobj_find_fence);
/**
* drm_syncobj_free - free a sync object.
@@ -125,13 +257,13 @@ void drm_syncobj_free(struct kref *kref)
struct drm_syncobj *syncobj = container_of(kref,
struct drm_syncobj,
refcount);
- dma_fence_put(syncobj->fence);
+ drm_syncobj_replace_fence(syncobj, NULL);
kfree(syncobj);
}
EXPORT_SYMBOL(drm_syncobj_free);
static int drm_syncobj_create(struct drm_file *file_private,
- u32 *handle)
+ u32 *handle, uint32_t flags)
{
int ret;
struct drm_syncobj *syncobj;
@@ -141,6 +273,16 @@ static int drm_syncobj_create(struct drm_file *file_private,
return -ENOMEM;
kref_init(&syncobj->refcount);
+ INIT_LIST_HEAD(&syncobj->cb_list);
+ spin_lock_init(&syncobj->lock);
+
+ if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
+ ret = drm_syncobj_assign_null_handle(syncobj);
+ if (ret < 0) {
+ drm_syncobj_put(syncobj);
+ return ret;
+ }
+ }
idr_preload(GFP_KERNEL);
spin_lock(&file_private->syncobj_table_lock);
@@ -307,7 +449,7 @@ int drm_syncobj_export_sync_file(struct drm_file *file_private,
if (fd < 0)
return fd;
- ret = drm_syncobj_fence_get(file_private, handle, &fence);
+ ret = drm_syncobj_find_fence(file_private, handle, &fence);
if (ret)
goto err_put_fd;
@@ -377,11 +519,11 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
/* no valid flags yet */
- if (args->flags)
+ if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
return -EINVAL;
return drm_syncobj_create(file_private,
- &args->handle);
+ &args->handle, args->flags);
}
int
@@ -447,3 +589,368 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
return drm_syncobj_fd_to_handle(file_private, args->fd,
&args->handle);
}
+
+struct syncobj_wait_entry {
+ struct task_struct *task;
+ struct dma_fence *fence;
+ struct dma_fence_cb fence_cb;
+ struct drm_syncobj_cb syncobj_cb;
+};
+
+static void syncobj_wait_fence_func(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
+{
+ struct syncobj_wait_entry *wait =
+ container_of(cb, struct syncobj_wait_entry, fence_cb);
+
+ wake_up_process(wait->task);
+}
+
+static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb)
+{
+ struct syncobj_wait_entry *wait =
+ container_of(cb, struct syncobj_wait_entry, syncobj_cb);
+
+ /* This happens inside the syncobj lock */
+ wait->fence = dma_fence_get(syncobj->fence);
+ wake_up_process(wait->task);
+}
+
+static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ uint32_t count,
+ uint32_t flags,
+ signed long timeout,
+ uint32_t *idx)
+{
+ struct syncobj_wait_entry *entries;
+ struct dma_fence *fence;
+ signed long ret;
+ uint32_t signaled_count, i;
+
+ entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ /* Walk the list of sync objects and initialize entries. We do
+ * this up-front so that we can properly return -EINVAL if there is
+ * a syncobj with a missing fence and then never have the chance of
+ * returning -EINVAL again.
+ */
+ signaled_count = 0;
+ for (i = 0; i < count; ++i) {
+ entries[i].task = current;
+ entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
+ if (!entries[i].fence) {
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ continue;
+ } else {
+ ret = -EINVAL;
+ goto cleanup_entries;
+ }
+ }
+
+ if (dma_fence_is_signaled(entries[i].fence)) {
+ if (signaled_count == 0 && idx)
+ *idx = i;
+ signaled_count++;
+ }
+ }
+
+ /* Initialize ret to the max of timeout and 1. That way, the
+ * default return value indicates a successful wait and not a
+ * timeout.
+ */
+ ret = max_t(signed long, timeout, 1);
+
+ if (signaled_count == count ||
+ (signaled_count > 0 &&
+ !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
+ goto cleanup_entries;
+
+ /* There's a very annoying laxness in the dma_fence API here, in
+ * that backends are not required to automatically report when a
+ * fence is signaled prior to fence->ops->enable_signaling() being
+ * called. So here if we fail to match signaled_count, we need to
+ * fallthough and try a 0 timeout wait!
+ */
+
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ for (i = 0; i < count; ++i) {
+ drm_syncobj_fence_get_or_add_callback(syncobjs[i],
+ &entries[i].fence,
+ &entries[i].syncobj_cb,
+ syncobj_wait_syncobj_func);
+ }
+ }
+
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ signaled_count = 0;
+ for (i = 0; i < count; ++i) {
+ fence = entries[i].fence;
+ if (!fence)
+ continue;
+
+ if (dma_fence_is_signaled(fence) ||
+ (!entries[i].fence_cb.func &&
+ dma_fence_add_callback(fence,
+ &entries[i].fence_cb,
+ syncobj_wait_fence_func))) {
+ /* The fence has been signaled */
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
+ signaled_count++;
+ } else {
+ if (idx)
+ *idx = i;
+ goto done_waiting;
+ }
+ }
+ }
+
+ if (signaled_count == count)
+ goto done_waiting;
+
+ if (timeout == 0) {
+ /* If we are doing a 0 timeout wait and we got
+ * here, then we just timed out.
+ */
+ ret = 0;
+ goto done_waiting;
+ }
+
+ ret = schedule_timeout(ret);
+
+ if (ret > 0 && signal_pending(current))
+ ret = -ERESTARTSYS;
+ } while (ret > 0);
+
+done_waiting:
+ __set_current_state(TASK_RUNNING);
+
+cleanup_entries:
+ for (i = 0; i < count; ++i) {
+ if (entries[i].syncobj_cb.func)
+ drm_syncobj_remove_callback(syncobjs[i],
+ &entries[i].syncobj_cb);
+ if (entries[i].fence_cb.func)
+ dma_fence_remove_callback(entries[i].fence,
+ &entries[i].fence_cb);
+ dma_fence_put(entries[i].fence);
+ }
+ kfree(entries);
+
+ return ret;
+}
+
+/**
+ * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
+ *
+ * @timeout_nsec: timeout nsec component in ns, 0 for poll
+ *
+ * Calculate the timeout in jiffies from an absolute time in sec/nsec.
+ */
+static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
+{
+ ktime_t abs_timeout, now;
+ u64 timeout_ns, timeout_jiffies64;
+
+ /* make 0 timeout means poll - absolute 0 doesn't seem valid */
+ if (timeout_nsec == 0)
+ return 0;
+
+ abs_timeout = ns_to_ktime(timeout_nsec);
+ now = ktime_get();
+
+ if (!ktime_after(abs_timeout, now))
+ return 0;
+
+ timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
+
+ timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
+ /* clamp timeout to avoid infinite timeout */
+ if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
+ return MAX_SCHEDULE_TIMEOUT - 1;
+
+ return timeout_jiffies64 + 1;
+}
+
+static int drm_syncobj_array_wait(struct drm_device *dev,
+ struct drm_file *file_private,
+ struct drm_syncobj_wait *wait,
+ struct drm_syncobj **syncobjs)
+{
+ signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+ signed long ret = 0;
+ uint32_t first = ~0;
+
+ ret = drm_syncobj_array_wait_timeout(syncobjs,
+ wait->count_handles,
+ wait->flags,
+ timeout, &first);
+ if (ret < 0)
+ return ret;
+
+ wait->first_signaled = first;
+ if (ret == 0)
+ return -ETIME;
+ return 0;
+}
+
+static int drm_syncobj_array_find(struct drm_file *file_private,
+ void *user_handles, uint32_t count_handles,
+ struct drm_syncobj ***syncobjs_out)
+{
+ uint32_t i, *handles;
+ struct drm_syncobj **syncobjs;
+ int ret;
+
+ handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
+ if (handles == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(handles, user_handles,
+ sizeof(uint32_t) * count_handles)) {
+ ret = -EFAULT;
+ goto err_free_handles;
+ }
+
+ syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
+ if (syncobjs == NULL) {
+ ret = -ENOMEM;
+ goto err_free_handles;
+ }
+
+ for (i = 0; i < count_handles; i++) {
+ syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
+ if (!syncobjs[i]) {
+ ret = -ENOENT;
+ goto err_put_syncobjs;
+ }
+ }
+
+ kfree(handles);
+ *syncobjs_out = syncobjs;
+ return 0;
+
+err_put_syncobjs:
+ while (i-- > 0)
+ drm_syncobj_put(syncobjs[i]);
+ kfree(syncobjs);
+err_free_handles:
+ kfree(handles);
+
+ return ret;
+}
+
+static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
+ uint32_t count)
+{
+ uint32_t i;
+ for (i = 0; i < count; i++)
+ drm_syncobj_put(syncobjs[i]);
+ kfree(syncobjs);
+}
+
+int
+drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_wait *args = data;
+ struct drm_syncobj **syncobjs;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_syncobj_array_wait(dev, file_private,
+ args, syncobjs);
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
+
+int
+drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_array *args = data;
+ struct drm_syncobj **syncobjs;
+ uint32_t i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < args->count_handles; i++)
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return 0;
+}
+
+int
+drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_array *args = data;
+ struct drm_syncobj **syncobjs;
+ uint32_t i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < args->count_handles; i++) {
+ ret = drm_syncobj_assign_null_handle(syncobjs[i]);
+ if (ret < 0)
+ break;
+ }
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 1170b3209a12..13a59ed2afbc 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -631,7 +631,7 @@ int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
int ret;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index 71cee4e9fefb..38b477b5fbf9 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -10,6 +10,8 @@ config DRM_ETNAVIV
select IOMMU_API
select IOMMU_SUPPORT
select WANT_DEV_COREDUMP
+ select CMA if HAVE_DMA_CONTIGUOUS
+ select DMA_CMA if HAVE_DMA_CONTIGUOUS
help
DRM driver for Vivante GPUs.
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 91e17aeee1da..2cb4773823c2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -316,7 +316,7 @@ static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -337,7 +337,7 @@ static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
ret = etnaviv_gem_cpu_fini(obj);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -357,7 +357,7 @@ static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
return -ENOENT;
ret = etnaviv_gem_mmap_offset(obj, &args->offset);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -446,7 +446,7 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 9a3bea738330..5a634594a6ce 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -68,7 +68,7 @@ static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
if (IS_ERR(p)) {
- dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
+ dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
return PTR_ERR(p);
}
@@ -265,7 +265,7 @@ void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
{
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
- drm_gem_object_reference(&etnaviv_obj->base);
+ drm_gem_object_get(&etnaviv_obj->base);
mutex_lock(&etnaviv_obj->lock);
WARN_ON(mapping->use == 0);
@@ -282,7 +282,7 @@ void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
mapping->use -= 1;
mutex_unlock(&etnaviv_obj->lock);
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
}
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
@@ -358,7 +358,7 @@ out:
return ERR_PTR(ret);
/* Take a reference on the object */
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
return mapping;
}
@@ -413,6 +413,16 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
bool write = !!(op & ETNA_PREP_WRITE);
int ret;
+ if (!etnaviv_obj->sgt) {
+ void *ret;
+
+ mutex_lock(&etnaviv_obj->lock);
+ ret = etnaviv_gem_get_pages(etnaviv_obj);
+ mutex_unlock(&etnaviv_obj->lock);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+ }
+
if (op & ETNA_PREP_NOSYNC) {
if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
write))
@@ -427,16 +437,6 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
- if (!etnaviv_obj->sgt) {
- void *ret;
-
- mutex_lock(&etnaviv_obj->lock);
- ret = etnaviv_gem_get_pages(etnaviv_obj);
- mutex_unlock(&etnaviv_obj->lock);
- if (IS_ERR(ret))
- return PTR_ERR(ret);
- }
-
dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
etnaviv_obj->sgt->nents,
etnaviv_op_to_dma_dir(op));
@@ -662,7 +662,8 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
* going to pin these pages.
*/
mapping = obj->filp->f_mapping;
- mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
}
if (ret)
@@ -671,7 +672,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
return obj;
fail:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
@@ -688,14 +689,14 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
ret = etnaviv_gem_obj_add(dev, obj);
if (ret < 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -712,7 +713,7 @@ struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
ret = etnaviv_gem_obj_add(dev, obj);
if (ret < 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
@@ -800,7 +801,7 @@ static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
}
mutex_unlock(&etnaviv_obj->lock);
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
mmput(work->mm);
put_task_struct(work->task);
@@ -858,7 +859,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
}
get_task_struct(current);
- drm_gem_object_reference(&etnaviv_obj->base);
+ drm_gem_object_get(&etnaviv_obj->base);
work->mm = mm;
work->task = current;
@@ -924,6 +925,6 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
unreference:
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index e5da4f2300ba..ae884723e9b1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -146,7 +146,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
return &etnaviv_obj->base;
fail:
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 5bd93169dac2..a7ff2e4c00d2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -88,7 +88,7 @@ static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
* Take a refcount on the object. The file table lock
* prevents the object_idr's refcount on this being dropped.
*/
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
submit->bos[i].obj = to_etnaviv_bo(obj);
}
@@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
if (ret)
return ret;
- if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
- DRM_ERROR("relocation %u outside object", i);
+ if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
+ DRM_ERROR("relocation %u outside object\n", i);
return -EINVAL;
}
@@ -291,7 +291,7 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit)
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
submit_unlock_object(submit, i);
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
}
ww_acquire_fini(&submit->ticket);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index ada45fdd0eae..fc9a6a83dfc7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1622,10 +1622,12 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
int ret;
- gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
+ if (IS_ENABLED(CONFIG_THERMAL)) {
+ gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
(char *)dev_name(dev), gpu, &cooling_ops);
- if (IS_ERR(gpu->cooling))
- return PTR_ERR(gpu->cooling);
+ if (IS_ERR(gpu->cooling))
+ return PTR_ERR(gpu->cooling);
+ }
#ifdef CONFIG_PM
ret = pm_runtime_get_sync(gpu->dev);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 1d185347c64c..305dc3d4ff77 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -75,6 +75,7 @@ config DRM_EXYNOS_DP
config DRM_EXYNOS_HDMI
bool "HDMI"
depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON
+ select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you want to use Exynos HDMI for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 5792ca88ab7a..730b8d9db187 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -33,9 +34,8 @@
#define WINDOWS_NR 3
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
-#define IFTYPE_I80 (1 << 0)
-#define I80_HW_TRG (1 << 1)
-#define IFTYPE_HDMI (1 << 2)
+#define I80_HW_TRG (1 << 0)
+#define IFTYPE_HDMI (1 << 1)
static const char * const decon_clks_name[] = {
"pclk",
@@ -57,6 +57,8 @@ struct decon_context {
struct regmap *sysreg;
struct clk *clks[ARRAY_SIZE(decon_clks_name)];
unsigned int irq;
+ unsigned int irq_vsync;
+ unsigned int irq_lcd_sys;
unsigned int te_irq;
unsigned long out_type;
int first_win;
@@ -90,7 +92,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
u32 val;
val = VIDINTCON0_INTEN;
- if (ctx->out_type & IFTYPE_I80)
+ if (crtc->i80_mode)
val |= VIDINTCON0_FRAMEDONE;
else
val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP;
@@ -139,7 +141,7 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) {
case VIDCON1_VSTATUS_VS:
- if (!(ctx->out_type & IFTYPE_I80))
+ if (!(ctx->crtc->i80_mode))
--frm;
break;
case VIDCON1_VSTATUS_BP:
@@ -166,7 +168,7 @@ static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc)
static void decon_setup_trigger(struct decon_context *ctx)
{
- if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
+ if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG))
return;
if (!(ctx->out_type & I80_HW_TRG)) {
@@ -206,7 +208,7 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
val = VIDOUT_LCD_ON;
if (interlaced)
val |= VIDOUT_INTERLACE_EN_F;
- if (ctx->out_type & IFTYPE_I80) {
+ if (crtc->i80_mode) {
val |= VIDOUT_COMMAND_IF;
} else {
val |= VIDOUT_RGB_IF;
@@ -222,7 +224,7 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
VIDTCON2_HOZVAL(m->hdisplay - 1);
writel(val, ctx->addr + DECON_VIDTCON2);
- if (!(ctx->out_type & IFTYPE_I80)) {
+ if (!crtc->i80_mode) {
int vbp = m->crtc_vtotal - m->crtc_vsync_end;
int vfp = m->crtc_vsync_start - m->crtc_vdisplay;
@@ -277,16 +279,14 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_ARGB8888:
+ default:
val |= WINCONx_BPPMODE_32BPP_A8888;
val |= WINCONx_WSWP_F | WINCONx_BLD_PIX_F | WINCONx_ALPHA_SEL_F;
val |= WINCONx_BURSTLEN_16WORD;
break;
- default:
- DRM_ERROR("Proper pixel format is not set\n");
- return;
}
- DRM_DEBUG_KMS("bpp = %u\n", fb->format->cpp[0] * 8);
+ DRM_DEBUG_KMS("cpp = %u\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -329,7 +329,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
struct decon_context *ctx = crtc->ctx;
struct drm_framebuffer *fb = state->base.fb;
unsigned int win = plane->index;
- unsigned int bpp = fb->format->cpp[0];
+ unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
dma_addr_t dma_addr = exynos_drm_fb_dma_addr(fb, 0);
u32 val;
@@ -365,11 +365,11 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
if (!(ctx->out_type & IFTYPE_HDMI))
- val = BIT_VAL(pitch - state->crtc.w * bpp, 27, 14)
- | BIT_VAL(state->crtc.w * bpp, 13, 0);
+ val = BIT_VAL(pitch - state->crtc.w * cpp, 27, 14)
+ | BIT_VAL(state->crtc.w * cpp, 13, 0);
else
- val = BIT_VAL(pitch - state->crtc.w * bpp, 29, 15)
- | BIT_VAL(state->crtc.w * bpp, 14, 0);
+ val = BIT_VAL(pitch - state->crtc.w * cpp, 29, 15)
+ | BIT_VAL(state->crtc.w * cpp, 14, 0);
writel(val, ctx->addr + DECON_VIDW0xADD2(win));
decon_win_set_pixfmt(ctx, win, fb);
@@ -407,24 +407,19 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
static void decon_swreset(struct decon_context *ctx)
{
- unsigned int tries;
unsigned long flags;
+ u32 val;
+ int ret;
writel(0, ctx->addr + DECON_VIDCON0);
- for (tries = 2000; tries; --tries) {
- if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_STOP_STATUS)
- break;
- udelay(10);
- }
+ readl_poll_timeout(ctx->addr + DECON_VIDCON0, val,
+ ~val & VIDCON0_STOP_STATUS, 12, 20000);
writel(VIDCON0_SWRESET, ctx->addr + DECON_VIDCON0);
- for (tries = 2000; tries; --tries) {
- if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_SWRESET)
- break;
- udelay(10);
- }
+ ret = readl_poll_timeout(ctx->addr + DECON_VIDCON0, val,
+ ~val & VIDCON0_SWRESET, 12, 20000);
- WARN(tries == 0, "failed to software reset DECON\n");
+ WARN(ret < 0, "failed to software reset DECON\n");
spin_lock_irqsave(&ctx->vblank_lock, flags);
ctx->frame_id = 0;
@@ -515,6 +510,22 @@ err:
clk_disable_unprepare(ctx->clks[i]);
}
+static enum drm_mode_status decon_mode_valid(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct decon_context *ctx = crtc->ctx;
+
+ ctx->irq = crtc->i80_mode ? ctx->irq_lcd_sys : ctx->irq_vsync;
+
+ if (ctx->irq)
+ return MODE_OK;
+
+ dev_info(ctx->dev, "Sink requires %s mode, but appropriate interrupt is not provided.\n",
+ crtc->i80_mode ? "command" : "video");
+
+ return MODE_BAD;
+}
+
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable = decon_enable,
.disable = decon_disable,
@@ -524,6 +535,7 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.atomic_begin = decon_atomic_begin,
.update_plane = decon_update_plane,
.disable_plane = decon_disable_plane,
+ .mode_valid = decon_mode_valid,
.atomic_flush = decon_atomic_flush,
};
@@ -674,19 +686,22 @@ static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
MODULE_DEVICE_TABLE(of, exynos5433_decon_driver_dt_match);
static int decon_conf_irq(struct decon_context *ctx, const char *name,
- irq_handler_t handler, unsigned long int flags, bool required)
+ irq_handler_t handler, unsigned long int flags)
{
struct platform_device *pdev = to_platform_device(ctx->dev);
int ret, irq = platform_get_irq_byname(pdev, name);
if (irq < 0) {
- if (irq == -EPROBE_DEFER)
+ switch (irq) {
+ case -EPROBE_DEFER:
return irq;
- if (required)
- dev_err(ctx->dev, "cannot get %s IRQ\n", name);
- else
- irq = 0;
- return irq;
+ case -ENODATA:
+ case -ENXIO:
+ return 0;
+ default:
+ dev_err(ctx->dev, "IRQ %s get failed, %d\n", name, irq);
+ return irq;
+ }
}
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_irq(ctx->dev, irq, handler, flags, "drm_decon", ctx);
@@ -714,11 +729,8 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
ctx->out_type = (unsigned long)of_device_get_match_data(dev);
spin_lock_init(&ctx->vblank_lock);
- if (ctx->out_type & IFTYPE_HDMI) {
+ if (ctx->out_type & IFTYPE_HDMI)
ctx->first_win = 1;
- } else if (of_get_child_by_name(dev->of_node, "i80-if-timings")) {
- ctx->out_type |= IFTYPE_I80;
- }
for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
struct clk *clk;
@@ -742,25 +754,23 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
return PTR_ERR(ctx->addr);
}
- if (ctx->out_type & IFTYPE_I80) {
- ret = decon_conf_irq(ctx, "lcd_sys", decon_irq_handler, 0, true);
- if (ret < 0)
- return ret;
- ctx->irq = ret;
+ ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0);
+ if (ret < 0)
+ return ret;
+ ctx->irq_vsync = ret;
- ret = decon_conf_irq(ctx, "te", decon_te_irq_handler,
- IRQF_TRIGGER_RISING, false);
- if (ret < 0)
- return ret;
- if (ret) {
- ctx->te_irq = ret;
- ctx->out_type &= ~I80_HW_TRG;
- }
- } else {
- ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0, true);
- if (ret < 0)
+ ret = decon_conf_irq(ctx, "lcd_sys", decon_irq_handler, 0);
+ if (ret < 0)
+ return ret;
+ ctx->irq_lcd_sys = ret;
+
+ ret = decon_conf_irq(ctx, "te", decon_te_irq_handler,
+ IRQF_TRIGGER_RISING);
+ if (ret < 0)
return ret;
- ctx->irq = ret;
+ if (ret) {
+ ctx->te_irq = ret;
+ ctx->out_type &= ~I80_HW_TRG;
}
if (ctx->out_type & I80_HW_TRG) {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 3e88269fdc2e..615efcf7782a 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -309,19 +309,14 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_BGRA8888:
+ default:
val |= WINCONx_BPPMODE_32BPP_BGRA | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
val |= WINCONx_BURSTLEN_16WORD;
break;
- default:
- DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
-
- val |= WINCONx_BPPMODE_24BPP_xRGB;
- val |= WINCONx_BURSTLEN_16WORD;
- break;
}
- DRM_DEBUG_KMS("bpp = %d\n", fb->format->cpp[0] * 8);
+ DRM_DEBUG_KMS("cpp = %d\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -398,7 +393,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
unsigned int last_x;
unsigned int last_y;
unsigned int win = plane->index;
- unsigned int bpp = fb->format->cpp[0];
+ unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
@@ -418,7 +413,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0);
writel(val, ctx->regs + VIDW_BUF_START(win));
- padding = (pitch / bpp) - fb->width;
+ padding = (pitch / cpp) - fb->width;
/* buffer size */
writel(fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 385537b726a6..39629e7a80b9 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -155,7 +155,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
struct exynos_dp_device *dp = dev_get_drvdata(dev);
struct drm_encoder *encoder = &dp->encoder;
struct drm_device *drm_dev = data;
- int pipe, ret;
+ int ret;
/*
* Just like the probe function said, we don't need the
@@ -179,20 +179,15 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_LCD);
- if (pipe < 0)
- return pipe;
-
- encoder->possible_crtcs = 1 << pipe;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
dp->plat_data.encoder = encoder;
return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index edbd98ff293e..b0c0621fcdf7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -13,6 +13,7 @@
*/
#include <drm/drmP.h>
+
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index c37078fbe0ea..6ce0821590df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -16,6 +16,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_encoder.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
@@ -83,7 +84,19 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
exynos_crtc->ops->atomic_flush(exynos_crtc);
}
+static enum drm_mode_status exynos_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ if (exynos_crtc->ops->mode_valid)
+ return exynos_crtc->ops->mode_valid(exynos_crtc, mode);
+
+ return MODE_OK;
+}
+
static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
+ .mode_valid = exynos_crtc_mode_valid,
.atomic_check = exynos_crtc_atomic_check,
.atomic_begin = exynos_crtc_atomic_begin,
.atomic_flush = exynos_crtc_atomic_flush,
@@ -191,16 +204,30 @@ err_crtc:
return ERR_PTR(ret);
}
-int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
+struct exynos_drm_crtc *exynos_drm_crtc_get_by_type(struct drm_device *drm_dev,
enum exynos_drm_output_type out_type)
{
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, drm_dev)
if (to_exynos_crtc(crtc)->type == out_type)
- return drm_crtc_index(crtc);
+ return to_exynos_crtc(crtc);
- return -EPERM;
+ return ERR_PTR(-EPERM);
+}
+
+int exynos_drm_set_possible_crtcs(struct drm_encoder *encoder,
+ enum exynos_drm_output_type out_type)
+{
+ struct exynos_drm_crtc *crtc = exynos_drm_crtc_get_by_type(encoder->dev,
+ out_type);
+
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
+
+ encoder->possible_crtcs = drm_crtc_mask(&crtc->base);
+
+ return 0;
}
void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index ef58b64e3d2d..dec446109e6c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -15,21 +15,25 @@
#ifndef _EXYNOS_DRM_CRTC_H_
#define _EXYNOS_DRM_CRTC_H_
+
#include "exynos_drm_drv.h"
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
struct drm_plane *plane,
- enum exynos_drm_output_type type,
+ enum exynos_drm_output_type out_type,
const struct exynos_drm_crtc_ops *ops,
void *context);
void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc);
void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
struct exynos_drm_plane *exynos_plane);
-/* This function gets pipe value to crtc device matched with out_type. */
-int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
+/* This function gets crtc device matched with out_type. */
+struct exynos_drm_crtc *exynos_drm_crtc_get_by_type(struct drm_device *drm_dev,
enum exynos_drm_output_type out_type);
+int exynos_drm_set_possible_crtcs(struct drm_encoder *encoder,
+ enum exynos_drm_output_type out_type);
+
/*
* This function calls the crtc device(manager)'s te_handler() callback
* to trigger to transfer video image at the tearing effect synchronization
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 76d80e5de521..66945e0dc57f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -202,19 +202,15 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
{
int ret;
- ret = exynos_drm_crtc_get_pipe_from_type(dev, EXYNOS_DISPLAY_TYPE_LCD);
- if (ret < 0)
- return ret;
-
- encoder->possible_crtcs = 1 << ret;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
ret = exynos_dpi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index cab9e12d7846..b1f7299600f0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -145,8 +145,6 @@ static struct drm_driver exynos_drm_driver = {
.gem_free_object_unlocked = exynos_drm_gem_free_object,
.gem_vm_ops = &exynos_drm_gem_vm_ops,
.dumb_create = exynos_drm_gem_dumb_create,
- .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
@@ -455,7 +453,6 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
struct component_match *match;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
match = exynos_drm_match_add(&pdev->dev);
if (IS_ERR(match))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a93de321706b..cf131c2aa23e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -91,6 +91,7 @@ struct exynos_drm_plane {
#define EXYNOS_DRM_PLANE_CAP_DOUBLE (1 << 0)
#define EXYNOS_DRM_PLANE_CAP_SCALE (1 << 1)
#define EXYNOS_DRM_PLANE_CAP_ZPOS (1 << 2)
+#define EXYNOS_DRM_PLANE_CAP_TILE (1 << 3)
/*
* Exynos DRM plane configuration structure.
@@ -117,6 +118,7 @@ struct exynos_drm_plane_config {
* @disable: disable the device
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @mode_valid: specific driver callback for mode validation
* @atomic_check: validate state
* @atomic_begin: prepare device to receive an update
* @atomic_flush: mark the end of device update
@@ -132,6 +134,8 @@ struct exynos_drm_crtc_ops {
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
+ enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode);
int (*atomic_check)(struct exynos_drm_crtc *crtc,
struct drm_crtc_state *state);
void (*atomic_begin)(struct exynos_drm_crtc *crtc);
@@ -162,6 +166,7 @@ struct exynos_drm_crtc {
const struct exynos_drm_crtc_ops *ops;
void *ctx;
struct exynos_drm_clk *pipe_clk;
+ bool i80_mode : 1;
};
static inline void exynos_drm_pipe_clk_enable(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 4ea7cc7cb3de..7904ffa9abfb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -254,7 +254,6 @@ struct exynos_dsi {
struct drm_encoder encoder;
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
- struct device_node *panel_node;
struct drm_panel *panel;
struct device *dev;
@@ -1329,12 +1328,13 @@ static int exynos_dsi_init(struct exynos_dsi *dsi)
return 0;
}
-static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
+static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
+ struct device *panel)
{
int ret;
int te_gpio_irq;
- dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
+ dsi->te_gpio = of_get_named_gpio(panel->of_node, "te-gpios", 0);
if (dsi->te_gpio == -ENOENT)
return 0;
@@ -1374,85 +1374,6 @@ static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
}
}
-static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
-{
- struct exynos_dsi *dsi = host_to_dsi(host);
-
- dsi->lanes = device->lanes;
- dsi->format = device->format;
- dsi->mode_flags = device->mode_flags;
- dsi->panel_node = device->dev.of_node;
-
- /*
- * This is a temporary solution and should be made by more generic way.
- *
- * If attached panel device is for command mode one, dsi should register
- * TE interrupt handler.
- */
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) {
- int ret = exynos_dsi_register_te_irq(dsi);
-
- if (ret)
- return ret;
- }
-
- if (dsi->connector.dev)
- drm_helper_hpd_irq_event(dsi->connector.dev);
-
- return 0;
-}
-
-static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
-{
- struct exynos_dsi *dsi = host_to_dsi(host);
-
- exynos_dsi_unregister_te_irq(dsi);
-
- dsi->panel_node = NULL;
-
- if (dsi->connector.dev)
- drm_helper_hpd_irq_event(dsi->connector.dev);
-
- return 0;
-}
-
-static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
- const struct mipi_dsi_msg *msg)
-{
- struct exynos_dsi *dsi = host_to_dsi(host);
- struct exynos_dsi_transfer xfer;
- int ret;
-
- if (!(dsi->state & DSIM_STATE_ENABLED))
- return -EINVAL;
-
- if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
- ret = exynos_dsi_init(dsi);
- if (ret)
- return ret;
- dsi->state |= DSIM_STATE_INITIALIZED;
- }
-
- ret = mipi_dsi_create_packet(&xfer.packet, msg);
- if (ret < 0)
- return ret;
-
- xfer.rx_len = msg->rx_len;
- xfer.rx_payload = msg->rx_buf;
- xfer.flags = msg->flags;
-
- ret = exynos_dsi_transfer(dsi, &xfer);
- return (ret < 0) ? ret : xfer.rx_done;
-}
-
-static const struct mipi_dsi_host_ops exynos_dsi_ops = {
- .attach = exynos_dsi_host_attach,
- .detach = exynos_dsi_host_detach,
- .transfer = exynos_dsi_host_transfer,
-};
-
static void exynos_dsi_enable(struct drm_encoder *encoder)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1508,25 +1429,7 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
static enum drm_connector_status
exynos_dsi_detect(struct drm_connector *connector, bool force)
{
- struct exynos_dsi *dsi = connector_to_dsi(connector);
-
- if (!dsi->panel) {
- dsi->panel = of_drm_find_panel(dsi->panel_node);
- if (dsi->panel)
- drm_panel_attach(dsi->panel, &dsi->connector);
- } else if (!dsi->panel_node) {
- struct drm_encoder *encoder;
-
- encoder = platform_get_drvdata(to_platform_device(dsi->dev));
- exynos_dsi_disable(encoder);
- drm_panel_detach(dsi->panel);
- dsi->panel = NULL;
- }
-
- if (dsi->panel)
- return connector_status_connected;
-
- return connector_status_disconnected;
+ return connector->status;
}
static void exynos_dsi_connector_destroy(struct drm_connector *connector)
@@ -1575,6 +1478,7 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
return ret;
}
+ connector->status = connector_status_disconnected;
drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
drm_mode_connector_attach_encoder(connector, encoder);
@@ -1611,6 +1515,105 @@ static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
+static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+ struct drm_device *drm = dsi->connector.dev;
+
+ /*
+ * This is a temporary solution and should be made by more generic way.
+ *
+ * If attached panel device is for command mode one, dsi should register
+ * TE interrupt handler.
+ */
+ if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ int ret = exynos_dsi_register_te_irq(dsi, &device->dev);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&drm->mode_config.mutex);
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+ dsi->mode_flags = device->mode_flags;
+ dsi->panel = of_drm_find_panel(device->dev.of_node);
+ if (dsi->panel) {
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ dsi->connector.status = connector_status_connected;
+ }
+ exynos_drm_crtc_get_by_type(drm, EXYNOS_DISPLAY_TYPE_LCD)->i80_mode =
+ !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO);
+
+ mutex_unlock(&drm->mode_config.mutex);
+
+ if (drm->mode_config.poll_enabled)
+ drm_kms_helper_hotplug_event(drm);
+
+ return 0;
+}
+
+static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+ struct drm_device *drm = dsi->connector.dev;
+
+ mutex_lock(&drm->mode_config.mutex);
+
+ if (dsi->panel) {
+ exynos_dsi_disable(&dsi->encoder);
+ drm_panel_detach(dsi->panel);
+ dsi->panel = NULL;
+ dsi->connector.status = connector_status_disconnected;
+ }
+
+ mutex_unlock(&drm->mode_config.mutex);
+
+ if (drm->mode_config.poll_enabled)
+ drm_kms_helper_hotplug_event(drm);
+
+ exynos_dsi_unregister_te_irq(dsi);
+
+ return 0;
+}
+
+static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+ struct exynos_dsi_transfer xfer;
+ int ret;
+
+ if (!(dsi->state & DSIM_STATE_ENABLED))
+ return -EINVAL;
+
+ if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
+ ret = exynos_dsi_init(dsi);
+ if (ret)
+ return ret;
+ dsi->state |= DSIM_STATE_INITIALIZED;
+ }
+
+ ret = mipi_dsi_create_packet(&xfer.packet, msg);
+ if (ret < 0)
+ return ret;
+
+ xfer.rx_len = msg->rx_len;
+ xfer.rx_payload = msg->rx_buf;
+ xfer.flags = msg->flags;
+
+ ret = exynos_dsi_transfer(dsi, &xfer);
+ return (ret < 0) ? ret : xfer.rx_done;
+}
+
+static const struct mipi_dsi_host_ops exynos_dsi_ops = {
+ .attach = exynos_dsi_host_attach,
+ .detach = exynos_dsi_host_detach,
+ .transfer = exynos_dsi_host_transfer,
+};
+
static int exynos_dsi_of_read_u32(const struct device_node *np,
const char *propname, u32 *out_value)
{
@@ -1649,8 +1652,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
return ret;
dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0);
- if (!dsi->bridge_node)
- return -EINVAL;
return 0;
}
@@ -1664,20 +1665,15 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
struct drm_bridge *bridge;
int ret;
- ret = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_LCD);
- if (ret < 0)
- return ret;
-
- encoder->possible_crtcs = 1 << ret;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
ret = exynos_dsi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
@@ -1685,9 +1681,11 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
return ret;
}
- bridge = of_drm_find_bridge(dsi->bridge_node);
- if (bridge)
- drm_bridge_attach(encoder, bridge, NULL);
+ if (dsi->bridge_node) {
+ bridge = of_drm_find_bridge(dsi->bridge_node);
+ if (bridge)
+ drm_bridge_attach(encoder, bridge, NULL);
+ }
return mipi_dsi_host_register(&dsi->dsi_host);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index ed1a648d518c..8208df56a88f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -145,13 +145,19 @@ static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
struct drm_gem_object *obj;
struct drm_framebuffer *fb;
int i;
int ret;
- for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int height = (i == 0) ? mode_cmd->height :
+ DIV_ROUND_UP(mode_cmd->height, info->vsub);
+ unsigned long size = height * mode_cmd->pitches[i] +
+ mode_cmd->offsets[i];
+
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
@@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
exynos_gem[i] = to_exynos_gem(obj);
+
+ if (size > exynos_gem[i]->size) {
+ i++;
+ ret = -EINVAL;
+ goto err;
+ }
}
fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
@@ -213,4 +225,6 @@ void exynos_drm_mode_config_init(struct drm_device *dev)
dev->mode_config.funcs = &exynos_drm_mode_config_funcs;
dev->mode_config.helper_private = &exynos_drm_mode_config_helpers;
+
+ dev->mode_config.allow_fb_modifiers = true;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 60f93cad6643..d42ae2bc3e56 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -583,18 +583,12 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_ARGB8888:
+ default:
val |= WINCON1_BPPMODE_25BPP_A1888
| WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
val |= WINCONx_WSWP;
val |= WINCONx_BURSTLEN_16WORD;
break;
- default:
- DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
-
- val |= WINCON0_BPPMODE_24BPP_888;
- val |= WINCONx_WSWP;
- val |= WINCONx_BURSTLEN_16WORD;
- break;
}
/*
@@ -718,13 +712,13 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
unsigned long val, size, offset;
unsigned int last_x, last_y, buf_offsize, line_size;
unsigned int win = plane->index;
- unsigned int bpp = fb->format->cpp[0];
+ unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
return;
- offset = state->src.x * bpp;
+ offset = state->src.x * cpp;
offset += state->src.y * pitch;
/* buffer start address */
@@ -743,8 +737,8 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
state->crtc.w, state->crtc.h);
/* buffer size */
- buf_offsize = pitch - (state->crtc.w * bpp);
- line_size = state->crtc.w * bpp;
+ buf_offsize = pitch - (state->crtc.w * cpp);
+ line_size = state->crtc.w * cpp;
val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index c23479be4850..077de014d610 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -286,8 +286,8 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
{
struct drm_exynos_gem_map *args = data;
- return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
- &args->offset);
+ return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
+ &args->offset);
}
dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
@@ -422,32 +422,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
return 0;
}
-int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset)
-{
- struct drm_gem_object *obj;
- int ret = 0;
-
- /*
- * get offset of memory allocated for drm framebuffer.
- * - this callback would be called by user application
- * with DRM_IOCTL_MODE_MAP_DUMB command.
- */
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return -EINVAL;
- }
-
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
- DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
-
- drm_gem_object_unreference_unlocked(obj);
- return ret;
-}
-
int exynos_drm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 85457255fcd1..e86d1a9518c3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -110,11 +110,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-/* map memory region for drm framebuffer to user space. */
-int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
-
/* page fault handler and mmap fault address(virtual) to physical memory. */
int exynos_drm_gem_fault(struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index e45720543a45..ba4a32b132ba 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -21,9 +21,12 @@
#include <linux/component.h>
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include "exynos_drm_drv.h"
+
/* Sysreg registers for MIC */
#define DSD_CFG_MUX 0x1004
#define MIC0_RGB_MUX (1 << 0)
@@ -85,12 +88,6 @@
#define MIC_BS_SIZE_2D(x) ((x) & 0x3fff)
-enum {
- ENDPOINT_DECON_NODE,
- ENDPOINT_DSI_NODE,
- NUM_ENDPOINTS
-};
-
static char *clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
#define NUM_CLKS ARRAY_SIZE(clk_names)
static DEFINE_MUTEX(mic_mutex);
@@ -229,36 +226,6 @@ static void mic_set_reg_on(struct exynos_mic *mic, bool enable)
writel(reg, mic->reg + MIC_OP);
}
-static int parse_dt(struct exynos_mic *mic)
-{
- int ret = 0, i, j;
- struct device_node *remote_node;
- struct device_node *nodes[3];
-
- /*
- * The order of endpoints does matter.
- * The first node must be for decon and the second one must be for dsi.
- */
- for (i = 0, j = 0; i < NUM_ENDPOINTS; i++) {
- remote_node = of_graph_get_remote_node(mic->dev->of_node, i, 0);
- if (!remote_node) {
- ret = -EPIPE;
- goto exit;
- }
- nodes[j++] = remote_node;
-
- if (i == ENDPOINT_DECON_NODE &&
- of_get_child_by_name(remote_node, "i80-if-timings"))
- mic->i80_mode = 1;
- }
-
-exit:
- while (--j > -1)
- of_node_put(nodes[j]);
-
- return ret;
-}
-
static void mic_disable(struct drm_bridge *bridge) { }
static void mic_post_disable(struct drm_bridge *bridge)
@@ -286,6 +253,7 @@ static void mic_mode_set(struct drm_bridge *bridge,
mutex_lock(&mic_mutex);
drm_display_mode_to_videomode(mode, &mic->vm);
+ mic->i80_mode = to_exynos_crtc(bridge->encoder->crtc)->i80_mode;
mutex_unlock(&mic_mutex);
}
@@ -340,16 +308,10 @@ static int exynos_mic_bind(struct device *dev, struct device *master,
void *data)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
- int ret;
- mic->bridge.funcs = &mic_bridge_funcs;
- mic->bridge.of_node = dev->of_node;
mic->bridge.driver_private = mic;
- ret = drm_bridge_add(&mic->bridge);
- if (ret)
- DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
- return ret;
+ return 0;
}
static void exynos_mic_unbind(struct device *dev, struct device *master,
@@ -365,8 +327,6 @@ static void exynos_mic_unbind(struct device *dev, struct device *master,
already_disabled:
mutex_unlock(&mic_mutex);
-
- drm_bridge_remove(&mic->bridge);
}
static const struct component_ops exynos_mic_component_ops = {
@@ -425,10 +385,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
mic->dev = dev;
- ret = parse_dt(mic);
- if (ret)
- goto err;
-
ret = of_address_to_resource(dev->of_node, 0, &res);
if (ret) {
DRM_ERROR("mic: Failed to get mem region for MIC\n");
@@ -461,6 +417,15 @@ static int exynos_mic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mic);
+ mic->bridge.funcs = &mic_bridge_funcs;
+ mic->bridge.of_node = dev->of_node;
+
+ ret = drm_bridge_add(&mic->bridge);
+ if (ret) {
+ DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
+ return ret;
+ }
+
pm_runtime_enable(dev);
ret = component_add(dev, &exynos_mic_component_ops);
@@ -479,8 +444,13 @@ err:
static int exynos_mic_remove(struct platform_device *pdev)
{
+ struct exynos_mic *mic = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &exynos_mic_component_ops);
pm_runtime_disable(&pdev->dev);
+
+ drm_bridge_remove(&mic->bridge);
+
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 8de74009dee4..d2a90dae5c71 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -179,6 +179,29 @@ static struct drm_plane_funcs exynos_plane_funcs = {
};
static int
+exynos_drm_plane_check_format(const struct exynos_drm_plane_config *config,
+ struct exynos_drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->base.fb;
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
+ if (!(config->capabilities & EXYNOS_DRM_PLANE_CAP_TILE))
+ return -ENOTSUPP;
+ break;
+
+ case DRM_FORMAT_MOD_LINEAR:
+ break;
+
+ default:
+ DRM_ERROR("unsupported pixel format modifier");
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
struct exynos_drm_plane_state *state)
{
@@ -222,6 +245,10 @@ static int exynos_plane_atomic_check(struct drm_plane *plane,
/* translate state into exynos_state */
exynos_plane_mode_set(exynos_state);
+ ret = exynos_drm_plane_check_format(exynos_plane->config, exynos_state);
+ if (ret)
+ return ret;
+
ret = exynos_drm_plane_check_size(exynos_plane->config, exynos_state);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 9186a654c3b5..53e03f8af3d5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -381,7 +381,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_plane *exynos_plane;
struct exynos_drm_plane_config plane_config = { 0 };
unsigned int i;
- int pipe, ret;
+ int ret;
ctx->drm_dev = drm_dev;
@@ -406,20 +406,15 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(ctx->crtc);
}
- pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_VIDI);
- if (pipe < 0)
- return pipe;
-
- encoder->possible_crtcs = 1 << pipe;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_VIDI);
+ if (ret < 0)
+ return ret;
+
ret = vidi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 0e2a472c3021..214fa5e51963 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1500,8 +1500,6 @@ static void hdmi_disable(struct drm_encoder *encoder)
*/
cancel_delayed_work(&hdata->hotplug_work);
cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
-
- hdmiphy_disable(hdata);
}
static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
@@ -1675,7 +1673,7 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
return hdmi_bridge_init(hdata);
}
-static struct of_device_id hdmi_match_types[] = {
+static const struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos4210-hdmi",
.data = &exynos4210_hdmi_driver_data,
@@ -1699,32 +1697,25 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
struct hdmi_context *hdata = dev_get_drvdata(dev);
struct drm_encoder *encoder = &hdata->encoder;
- struct exynos_drm_crtc *exynos_crtc;
- struct drm_crtc *crtc;
- int ret, pipe;
+ struct exynos_drm_crtc *crtc;
+ int ret;
hdata->drm_dev = drm_dev;
- pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_HDMI);
- if (pipe < 0)
- return pipe;
-
hdata->phy_clk.enable = hdmiphy_clk_enable;
- crtc = drm_crtc_from_index(drm_dev, pipe);
- exynos_crtc = to_exynos_crtc(crtc);
- exynos_crtc->pipe_clk = &hdata->phy_clk;
-
- encoder->possible_crtcs = 1 << pipe;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_HDMI);
+ if (ret < 0)
+ return ret;
+
+ crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
+ crtc->pipe_clk = &hdata->phy_clk;
+
ret = hdmi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
@@ -1933,8 +1924,7 @@ static int hdmi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int exynos_hdmi_suspend(struct device *dev)
+static int __maybe_unused exynos_hdmi_suspend(struct device *dev)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
@@ -1943,7 +1933,7 @@ static int exynos_hdmi_suspend(struct device *dev)
return 0;
}
-static int exynos_hdmi_resume(struct device *dev)
+static int __maybe_unused exynos_hdmi_resume(struct device *dev)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
int ret;
@@ -1954,7 +1944,6 @@ static int exynos_hdmi_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops exynos_hdmi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 6bed4f3ffcd6..002755415e00 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -148,7 +148,8 @@ static const struct exynos_drm_plane_config plane_configs[MIXER_WIN_NR] = {
.pixel_formats = vp_formats,
.num_pixel_formats = ARRAY_SIZE(vp_formats),
.capabilities = EXYNOS_DRM_PLANE_CAP_SCALE |
- EXYNOS_DRM_PLANE_CAP_ZPOS,
+ EXYNOS_DRM_PLANE_CAP_ZPOS |
+ EXYNOS_DRM_PLANE_CAP_TILE,
},
};
@@ -483,29 +484,18 @@ static void vp_video_buffer(struct mixer_context *ctx,
unsigned int priority = state->base.normalized_zpos + 1;
unsigned long flags;
dma_addr_t luma_addr[2], chroma_addr[2];
- bool tiled_mode = false;
- bool crcb_mode = false;
+ bool is_tiled, is_nv21;
u32 val;
- switch (fb->format->format) {
- case DRM_FORMAT_NV12:
- crcb_mode = false;
- break;
- case DRM_FORMAT_NV21:
- crcb_mode = true;
- break;
- default:
- DRM_ERROR("pixel format for vp is wrong [%d].\n",
- fb->format->format);
- return;
- }
+ is_nv21 = (fb->format->format == DRM_FORMAT_NV21);
+ is_tiled = (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE);
luma_addr[0] = exynos_drm_fb_dma_addr(fb, 0);
chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1);
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
__set_bit(MXR_BIT_INTERLACE, &ctx->flags);
- if (tiled_mode) {
+ if (is_tiled) {
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
@@ -525,14 +515,14 @@ static void vp_video_buffer(struct mixer_context *ctx,
vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
/* setup format */
- val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12);
- val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
+ val = (is_nv21 ? VP_MODE_NV21 : VP_MODE_NV12);
+ val |= (is_tiled ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height));
- /* chroma height has to reduced by 2 to avoid chroma distorions */
+ /* chroma plane for NV12/NV21 is half the height of the luma plane */
vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height / 2));
@@ -594,7 +584,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
unsigned long flags;
unsigned int win = plane->index;
unsigned int x_ratio = 0, y_ratio = 0;
- unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+ unsigned int dst_x_offset, dst_y_offset;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
@@ -616,12 +606,9 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
+ default:
fmt = MXR_FORMAT_ARGB8888;
break;
-
- default:
- DRM_DEBUG_KMS("pixelformat unsupported by mixer\n");
- return;
}
/* ratio is already checked by common plane code */
@@ -631,12 +618,10 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
dst_x_offset = state->crtc.x;
dst_y_offset = state->crtc.y;
- /* converting dma address base and source offset */
+ /* translate dma address base s.t. the source image offset is zero */
dma_addr = exynos_drm_fb_dma_addr(fb, 0)
+ (state->src.x * fb->format->cpp[0])
+ (state->src.y * fb->pitches[0]);
- src_x_offset = 0;
- src_y_offset = 0;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
__set_bit(MXR_BIT_INTERLACE, &ctx->flags);
@@ -667,11 +652,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
- /* setup offsets in source image */
- val = MXR_GRP_SXY_SX(src_x_offset);
- val |= MXR_GRP_SXY_SY(src_y_offset);
- mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val);
-
/* setup offsets in display image */
val = MXR_GRP_DXY_DX(dst_x_offset);
val |= MXR_GRP_DXY_DY(dst_y_offset);
@@ -748,6 +728,10 @@ static void mixer_win_reset(struct mixer_context *ctx)
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+ /* set all source image offsets to zero */
+ mixer_reg_write(res, MXR_GRAPHIC_SXY(0), 0);
+ mixer_reg_write(res, MXR_GRAPHIC_SXY(1), 0);
+
spin_unlock_irqrestore(&res->reg_slock, flags);
}
@@ -1094,28 +1078,28 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
.atomic_check = mixer_atomic_check,
};
-static struct mixer_drv_data exynos5420_mxr_drv_data = {
+static const struct mixer_drv_data exynos5420_mxr_drv_data = {
.version = MXR_VER_128_0_0_184,
.is_vp_enabled = 0,
};
-static struct mixer_drv_data exynos5250_mxr_drv_data = {
+static const struct mixer_drv_data exynos5250_mxr_drv_data = {
.version = MXR_VER_16_0_33_0,
.is_vp_enabled = 0,
};
-static struct mixer_drv_data exynos4212_mxr_drv_data = {
+static const struct mixer_drv_data exynos4212_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
};
-static struct mixer_drv_data exynos4210_mxr_drv_data = {
+static const struct mixer_drv_data exynos4210_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
.has_sclk = 1,
};
-static struct of_device_id mixer_match_types[] = {
+static const struct of_device_id mixer_match_types[] = {
{
.compatible = "samsung,exynos4210-mixer",
.data = &exynos4210_mxr_drv_data,
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 7da061aab729..131239759a75 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -48,36 +48,6 @@ int psb_gem_get_aperture(struct drm_device *dev, void *data,
}
/**
- * psb_gem_dumb_map_gtt - buffer mapping for dumb interface
- * @file: our drm client file
- * @dev: drm device
- * @handle: GEM handle to the object (from dumb_create)
- *
- * Do the necessary setup to allow the mapping of the frame buffer
- * into user memory. We don't have to do much here at the moment.
- */
-int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
-{
- int ret = 0;
- struct drm_gem_object *obj;
-
- /* GEM does all our handle to object mapping */
- obj = drm_gem_object_lookup(file, handle);
- if (obj == NULL)
- return -ENOENT;
-
- /* Make it mmapable */
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
-out:
- drm_gem_object_unreference_unlocked(obj);
- return ret;
-}
-
-/**
* psb_gem_create - create a mappable object
* @file: the DRM file of the client
* @dev: our device
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index 1616af209bfc..c50534c923df 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -520,7 +520,7 @@ static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
u8 *data, u16 len, u32 *data_out, u16 len_out, bool hs)
{
unsigned long flags;
- struct drm_device *dev = sender->dev;
+ struct drm_device *dev;
int i;
u32 gen_data_reg;
int retry = MDFLD_DSI_READ_MAX_COUNT;
@@ -530,6 +530,8 @@ static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
return -EINVAL;
}
+ dev = sender->dev;
+
/**
* do reading.
* 0) send out generic read request
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 747c06b227c5..37a3be71acd9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -494,8 +494,6 @@ static struct drm_driver driver = {
.gem_vm_ops = &psb_gem_vm_ops,
.dumb_create = psb_gem_dumb_create,
- .dumb_map_offset = psb_gem_dumb_map_gtt,
- .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = psb_ioctls,
.fops = &psb_gem_fops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 83667087d6e5..821497dbd3fc 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -750,8 +750,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
extern int psb_gem_fault(struct vm_fault *vmf);
/* psb_device.c */
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
index 9740eed9231a..b92595c477ef 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
@@ -157,7 +157,7 @@ out_unpin_bo:
out_unreserve_ttm_bo:
ttm_bo_unreserve(&bo->bo);
out_unref_gem:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
@@ -172,7 +172,7 @@ static void hibmc_fbdev_destroy(struct hibmc_fbdev *fbdev)
drm_fb_helper_fini(fbh);
if (gfb)
- drm_framebuffer_unreference(&gfb->fb);
+ drm_framebuffer_put(&gfb->fb);
}
static const struct drm_fb_helper_funcs hibmc_fbdev_helper_funcs = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index ac457c779caa..3518167a7dc4 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -444,7 +444,7 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
}
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret) {
DRM_ERROR("failed to unreference GEM object: %d\n", ret);
return ret;
@@ -479,7 +479,7 @@ int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
bo = gem_to_hibmc_bo(obj);
*offset = hibmc_bo_mmap_offset(bo);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
@@ -487,7 +487,7 @@ static void hibmc_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb);
- drm_gem_object_unreference_unlocked(hibmc_fb->obj);
+ drm_gem_object_put_unlocked(hibmc_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(hibmc_fb);
}
@@ -543,7 +543,7 @@ hibmc_user_framebuffer_create(struct drm_device *dev,
hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj);
if (IS_ERR(hibmc_fb)) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR((long)hibmc_fb);
}
return &hibmc_fb->fb;
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index f77dcfaade6c..b4c7af3ab6ae 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -603,6 +603,72 @@ static void dsi_encoder_enable(struct drm_encoder *encoder)
dsi->enable = true;
}
+static enum drm_mode_status dsi_encoder_phy_mode_valid(
+ struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct dw_dsi *dsi = encoder_to_dsi(encoder);
+ struct mipi_phy_params phy;
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+ u32 req_kHz, act_kHz, lane_byte_clk_kHz;
+
+ /* Calculate the lane byte clk using the adjusted mode clk */
+ memset(&phy, 0, sizeof(phy));
+ req_kHz = mode->clock * bpp / dsi->lanes;
+ act_kHz = dsi_calc_phy_rate(req_kHz, &phy);
+ lane_byte_clk_kHz = act_kHz / 8;
+
+ DRM_DEBUG_DRIVER("Checking mode %ix%i-%i@%i clock: %i...",
+ mode->hdisplay, mode->vdisplay, bpp,
+ drm_mode_vrefresh(mode), mode->clock);
+
+ /*
+ * Make sure the adjusted mode clock and the lane byte clk
+ * have a common denominator base frequency
+ */
+ if (mode->clock/dsi->lanes == lane_byte_clk_kHz/3) {
+ DRM_DEBUG_DRIVER("OK!\n");
+ return MODE_OK;
+ }
+
+ DRM_DEBUG_DRIVER("BAD!\n");
+ return MODE_BAD;
+}
+
+static enum drm_mode_status dsi_encoder_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+
+{
+ const struct drm_crtc_helper_funcs *crtc_funcs = NULL;
+ struct drm_crtc *crtc = NULL;
+ struct drm_display_mode adj_mode;
+ enum drm_mode_status ret;
+
+ /*
+ * The crtc might adjust the mode, so go through the
+ * possible crtcs (technically just one) and call
+ * mode_fixup to figure out the adjusted mode before we
+ * validate it.
+ */
+ drm_for_each_crtc(crtc, encoder->dev) {
+ /*
+ * reset adj_mode to the mode value each time,
+ * so we don't adjust the mode twice
+ */
+ drm_mode_copy(&adj_mode, mode);
+
+ crtc_funcs = crtc->helper_private;
+ if (crtc_funcs && crtc_funcs->mode_fixup)
+ if (!crtc_funcs->mode_fixup(crtc, mode, &adj_mode))
+ return MODE_BAD;
+
+ ret = dsi_encoder_phy_mode_valid(encoder, &adj_mode);
+ if (ret != MODE_OK)
+ return ret;
+ }
+ return MODE_OK;
+}
+
static void dsi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
@@ -622,6 +688,7 @@ static int dsi_encoder_atomic_check(struct drm_encoder *encoder,
static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = {
.atomic_check = dsi_encoder_atomic_check,
+ .mode_valid = dsi_encoder_mode_valid,
.mode_set = dsi_encoder_mode_set,
.enable = dsi_encoder_enable,
.disable = dsi_encoder_disable
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 39f7d15673ed..9823477b1855 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -178,6 +178,19 @@ static void ade_init(struct ade_hw_ctx *ctx)
FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
}
+static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+
+ adjusted_mode->clock =
+ clk_round_rate(ctx->ade_pix_clk, mode->clock * 1000) / 1000;
+ return true;
+}
+
+
static void ade_set_pix_clk(struct ade_hw_ctx *ctx,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
@@ -555,6 +568,7 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
+ .mode_fixup = ade_crtc_mode_fixup,
.mode_set_nofb = ade_crtc_mode_set_nofb,
.atomic_begin = ade_crtc_atomic_begin,
.atomic_flush = ade_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 79fcce76f2ad..e27352ca26c4 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -157,8 +157,6 @@ static struct drm_driver kirin_drm_driver = {
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = kirin_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 24cc4b012e93..3c318439a659 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -323,27 +323,27 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
{
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_vgpu *vgpu;
- bool have_enabled_pipe = false;
int pipe, id;
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
return;
- hrtimer_cancel(&irq->vblank_timer.timer);
-
for_each_active_vgpu(gvt, vgpu, id) {
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
- have_enabled_pipe =
- pipe_is_enabled(vgpu, pipe);
- if (have_enabled_pipe)
- break;
+ if (pipe_is_enabled(vgpu, pipe))
+ goto out;
}
}
- if (have_enabled_pipe)
- hrtimer_start(&irq->vblank_timer.timer,
- ktime_add_ns(ktime_get(), irq->vblank_timer.period),
- HRTIMER_MODE_ABS);
+ /* all the pipes are disabled */
+ hrtimer_cancel(&irq->vblank_timer.timer);
+ return;
+
+out:
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+
}
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index df11f69edc05..91b4300f3b39 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -46,6 +46,8 @@
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
((a)->lrca == (b)->lrca))
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
+
static int context_switch_events[] = {
[RCS] = RCS_AS_CONTEXT_SWITCH,
[BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_execlist *execlist =
- &vgpu->execlist[workload->ring_id];
+ int ring_id = workload->ring_id;
+ struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct intel_vgpu_workload *next_workload;
- struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
+ struct list_head *next = workload_q_head(vgpu, ring_id)->next;
bool lite_restore = false;
int ret;
@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
- if (workload->status || vgpu->resetting)
+ if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+ /* if workload->status is not successful means HW GPU
+ * has occurred GPU hang or something wrong with i915/GVT,
+ * and GVT won't inject context switch interrupt to guest.
+ * So this error is a vGPU hang actually to the guest.
+ * According to this we should emunlate a vGPU hang. If
+ * there are pending workloads which are already submitted
+ * from guest, we should clean them up like HW GPU does.
+ *
+ * if it is in middle of engine resetting, the pending
+ * workloads won't be submitted to HW GPU and will be
+ * cleaned up during the resetting process later, so doing
+ * the workload clean up here doesn't have any impact.
+ **/
+ clean_workloads(vgpu, ENGINE_MASK(ring_id));
goto out;
+ }
- if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
+ if (!list_empty(workload_q_head(vgpu, ring_id))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
next_workload = container_of(next,
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 5dad9298b2d5..a26c1705430e 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
struct intel_gvt_mmio_info *e;
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ int num = gvt->mmio.num_mmio_block;
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size, crc32_start;
- int i;
+ int i, j;
int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
+ for (i = 0; i < num; i++, block++) {
+ for (j = 0; j < block->size; j += 4)
+ *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
+ I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
+ block->offset) + j));
+ }
+
memcpy(gvt->firmware.mmio, p, info->mmio_size);
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index ea736717e051..44b719eda8c4 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -149,7 +149,7 @@ struct intel_vgpu {
bool active;
bool pv_notified;
bool failsafe;
- bool resetting;
+ unsigned int resetting_eng;
void *sched_data;
struct vgpu_sched_ctl sched_ctl;
@@ -196,6 +196,15 @@ struct intel_gvt_fence {
unsigned long vgpu_allocated_fence_num;
};
+/* Special MMIO blocks. */
+struct gvt_mmio_block {
+ unsigned int device;
+ i915_reg_t offset;
+ unsigned int size;
+ gvt_mmio_func read;
+ gvt_mmio_func write;
+};
+
#define INTEL_GVT_MMIO_HASH_BITS 11
struct intel_gvt_mmio {
@@ -215,6 +224,9 @@ struct intel_gvt_mmio {
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
+ struct gvt_mmio_block *mmio_block;
+ unsigned int num_mmio_block;
+
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
unsigned int num_tracked_mmio;
};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 022dbc4a15d6..2294466dd415 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2872,31 +2872,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
return 0;
}
-/* Special MMIO blocks. */
-static struct gvt_mmio_block {
- unsigned int device;
- i915_reg_t offset;
- unsigned int size;
- gvt_mmio_func read;
- gvt_mmio_func write;
-} gvt_mmio_blocks[] = {
- {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
- {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
- {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
- pvinfo_mmio_read, pvinfo_mmio_write},
- {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
- {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
- {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
-};
-
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
unsigned int offset)
{
unsigned long device = intel_gvt_get_device_type(gvt);
- struct gvt_mmio_block *block = gvt_mmio_blocks;
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ int num = gvt->mmio.num_mmio_block;
int i;
- for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
+ for (i = 0; i < num; i++, block++) {
if (!(device & block->device))
continue;
if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
@@ -2927,6 +2911,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
gvt->mmio.mmio_attribute = NULL;
}
+/* Special MMIO blocks. */
+static struct gvt_mmio_block mmio_blocks[] = {
+ {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
+ {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
+ {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
+ pvinfo_mmio_read, pvinfo_mmio_write},
+ {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
+ {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
+ {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
+};
+
/**
* intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
* @gvt: GVT device
@@ -2966,6 +2961,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
goto err;
}
+ gvt->mmio.mmio_block = mmio_blocks;
+ gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
+
gvt_dbg_mmio("traced %u virtual mmio registers\n",
gvt->mmio.num_tracked_mmio);
return 0;
@@ -3045,7 +3043,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
gvt_mmio_func func;
int ret;
- if (WARN_ON(bytes > 4))
+ if (WARN_ON(bytes > 8))
return -EINVAL;
/*
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 025aba8a72e0..391800d2067b 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -479,7 +479,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
i915_gem_request_put(fetch_and_zero(&workload->req));
- if (!workload->status && !vgpu->resetting) {
+ if (!workload->status && !(vgpu->resetting_eng &
+ ENGINE_MASK(ring_id))) {
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 5896ead8529e..02c61a1ad56a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -481,11 +481,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
gvt_dbg_core("------------------------------------------\n");
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
vgpu->id, dmlr, engine_mask);
- vgpu->resetting = true;
+
+ vgpu->resetting_eng = resetting_eng;
intel_vgpu_stop_schedule(vgpu);
/*
@@ -498,7 +500,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
mutex_lock(&gvt->lock);
}
- intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
+ intel_vgpu_reset_execlist(vgpu, resetting_eng);
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
@@ -521,7 +523,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
}
}
- vgpu->resetting = false;
+ vgpu->resetting_eng = 0;
gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
gvt_dbg_core("------------------------------------------\n");
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index f0cb22cc0dd6..8ba932b22f7c 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1073,7 +1073,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
goto unpin_src;
}
- dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
+ dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
if (IS_ERR(dst))
goto unpin_dst;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 43100229613c..9f45cfeae775 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1891,9 +1891,15 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
/*
* Everything depends on having the GTT running, so we need to start
- * there. Fortunately we don't need to do this unless we reset the
- * chip at a PCI level.
- *
+ * there.
+ */
+ ret = i915_ggtt_enable_hw(i915);
+ if (ret) {
+ DRM_ERROR("Failed to re-enable GGTT following reset %d\n", ret);
+ goto error;
+ }
+
+ /*
* Next we need to restore the context, but we don't use those
* yet either...
*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 60267e375e88..571c4e27a574 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3479,6 +3479,9 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
enum i915_map_type {
I915_MAP_WB = 0,
I915_MAP_WC,
+#define I915_MAP_OVERRIDE BIT(31)
+ I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
+ I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
};
/**
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b9e8e0d6e97b..1abde927fe3e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -695,12 +695,11 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
switch (obj->base.write_domain) {
case I915_GEM_DOMAIN_GTT:
if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
- if (intel_runtime_pm_get_if_in_use(dev_priv)) {
- spin_lock_irq(&dev_priv->uncore.lock);
- POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
- spin_unlock_irq(&dev_priv->uncore.lock);
- intel_runtime_pm_put(dev_priv);
- }
+ intel_runtime_pm_get(dev_priv);
+ spin_lock_irq(&dev_priv->uncore.lock);
+ POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
+ spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_runtime_pm_put(dev_priv);
}
intel_fb_obj_flush(obj,
@@ -2213,7 +2212,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
struct radix_tree_iter iter;
- void **slot;
+ void __rcu **slot;
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
@@ -2553,6 +2552,9 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
GEM_BUG_ON(i != n_pages);
switch (type) {
+ default:
+ MISSING_CASE(type);
+ /* fallthrough to use PAGE_KERNEL anyway */
case I915_MAP_WB:
pgprot = PAGE_KERNEL;
break;
@@ -2583,7 +2585,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
if (ret)
return ERR_PTR(ret);
- pinned = true;
+ pinned = !(type & I915_MAP_OVERRIDE);
+ type &= ~I915_MAP_OVERRIDE;
+
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
ret = ____i915_gem_object_get_pages(obj);
@@ -3258,7 +3262,13 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
- if (!i915_vma_is_ggtt(vma))
+ GEM_BUG_ON(vma->obj != obj);
+
+ /* We allow the process to have multiple handles to the same
+ * vma, in the same fd namespace, by virtue of flink/open.
+ */
+ GEM_BUG_ON(!vma->open_count);
+ if (!--vma->open_count && !i915_vma_is_ggtt(vma))
i915_vma_close(vma);
list_del(&lut->obj_link);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3d74f3a27c13..50d5e24f91a9 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -285,7 +285,7 @@ static int eb_create(struct i915_execbuffer *eb)
* direct lookup.
*/
do {
- unsigned int flags;
+ gfp_t flags;
/* While we can still reduce the allocation size, don't
* raise a warning and allow the allocation to fail.
@@ -720,6 +720,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err_obj;
}
+ vma->open_count++;
list_add(&lut->obj_link, &obj->lut_list);
list_add(&lut->ctx_link, &eb->ctx->handles_list);
lut->ctx = eb->ctx;
@@ -1070,7 +1071,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
return PTR_ERR(obj);
cmd = i915_gem_object_pin_map(obj,
- cache->has_llc ? I915_MAP_WB : I915_MAP_WC);
+ cache->has_llc ?
+ I915_MAP_FORCE_WB :
+ I915_MAP_FORCE_WC);
i915_gem_object_unpin_pages(obj);
if (IS_ERR(cmd))
return PTR_ERR(cmd);
@@ -1526,7 +1529,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
min_t(u64, BIT_ULL(31), size - copied);
if (__copy_from_user((char *)relocs + copied,
- (char *)urelocs + copied,
+ (char __user *)urelocs + copied,
len)) {
kvfree(relocs);
err = -EFAULT;
@@ -2129,9 +2132,7 @@ await_fence_array(struct i915_execbuffer *eb,
if (!(flags & I915_EXEC_FENCE_WAIT))
continue;
- rcu_read_lock();
- fence = dma_fence_get_rcu_safe(&syncobj->fence);
- rcu_read_unlock();
+ fence = drm_syncobj_fence_get(syncobj);
if (!fence)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d60f38adc4c4..933c4ea127ce 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2754,10 +2754,10 @@ static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
- I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+ I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index b24a83d43559..6fd5c57e21f6 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -38,7 +38,7 @@ TRACE_EVENT(intel_cpu_fifo_underrun,
);
TRACE_EVENT(intel_pch_fifo_underrun,
- TP_PROTO(struct drm_i915_private *dev_priv, enum transcoder pch_transcoder),
+ TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder),
TP_ARGS(dev_priv, pch_transcoder),
TP_STRUCT__entry(
@@ -48,7 +48,7 @@ TRACE_EVENT(intel_pch_fifo_underrun,
),
TP_fast_assign(
- enum pipe pipe = (enum pipe)pch_transcoder;
+ enum pipe pipe = pch_transcoder;
__entry->pipe = pipe;
__entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
__entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 1fd61e88cfd0..e811067c7724 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -59,6 +59,12 @@ struct i915_vma {
u32 fence_size;
u32 fence_alignment;
+ /**
+ * Count of the number of times this vma has been opened by different
+ * handles (but same file) for execbuf, i.e. the number of aliases
+ * that exist in the ctx->handle_vmas LUT for this vma.
+ */
+ unsigned int open_count;
unsigned int flags;
/**
* How many users have pinned this object in GTT space. The following
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 8e4e829682b9..ff9ecd211abb 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -107,7 +107,7 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
}
}
-void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
+static void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
{
int pipe = intel_crtc->pipe;
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0e93ec201fe3..f17275519484 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2288,17 +2288,13 @@ void intel_add_fb_offsets(int *x, int *y,
}
}
-/*
- * Input tile dimensions and pitch must already be
- * rotated to match x and y, and in pixel units.
- */
-static u32 _intel_adjust_tile_offset(int *x, int *y,
- unsigned int tile_width,
- unsigned int tile_height,
- unsigned int tile_size,
- unsigned int pitch_tiles,
- u32 old_offset,
- u32 new_offset)
+static u32 __intel_adjust_tile_offset(int *x, int *y,
+ unsigned int tile_width,
+ unsigned int tile_height,
+ unsigned int tile_size,
+ unsigned int pitch_tiles,
+ u32 old_offset,
+ u32 new_offset)
{
unsigned int pitch_pixels = pitch_tiles * tile_width;
unsigned int tiles;
@@ -2319,18 +2315,13 @@ static u32 _intel_adjust_tile_offset(int *x, int *y,
return new_offset;
}
-/*
- * Adjust the tile offset by moving the difference into
- * the x/y offsets.
- */
-static u32 intel_adjust_tile_offset(int *x, int *y,
- const struct intel_plane_state *state, int plane,
- u32 old_offset, u32 new_offset)
+static u32 _intel_adjust_tile_offset(int *x, int *y,
+ const struct drm_framebuffer *fb, int plane,
+ unsigned int rotation,
+ u32 old_offset, u32 new_offset)
{
- const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
- const struct drm_framebuffer *fb = state->base.fb;
+ const struct drm_i915_private *dev_priv = to_i915(fb->dev);
unsigned int cpp = fb->format->cpp[plane];
- unsigned int rotation = state->base.rotation;
unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
WARN_ON(new_offset > old_offset);
@@ -2349,9 +2340,9 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
pitch_tiles = pitch / (tile_width * cpp);
}
- _intel_adjust_tile_offset(x, y, tile_width, tile_height,
- tile_size, pitch_tiles,
- old_offset, new_offset);
+ __intel_adjust_tile_offset(x, y, tile_width, tile_height,
+ tile_size, pitch_tiles,
+ old_offset, new_offset);
} else {
old_offset += *y * pitch + *x * cpp;
@@ -2363,6 +2354,19 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
}
/*
+ * Adjust the tile offset by moving the difference into
+ * the x/y offsets.
+ */
+static u32 intel_adjust_tile_offset(int *x, int *y,
+ const struct intel_plane_state *state, int plane,
+ u32 old_offset, u32 new_offset)
+{
+ return _intel_adjust_tile_offset(x, y, state->base.fb, plane,
+ state->base.rotation,
+ old_offset, new_offset);
+}
+
+/*
* Computes the linear offset to the base tile and adjusts
* x, y. bytes per pixel is assumed to be a power-of-two.
*
@@ -2413,9 +2417,9 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
offset = (tile_rows * pitch_tiles + tiles) * tile_size;
offset_aligned = offset & ~alignment;
- _intel_adjust_tile_offset(x, y, tile_width, tile_height,
- tile_size, pitch_tiles,
- offset, offset_aligned);
+ __intel_adjust_tile_offset(x, y, tile_width, tile_height,
+ tile_size, pitch_tiles,
+ offset, offset_aligned);
} else {
offset = *y * pitch + *x * cpp;
offset_aligned = offset & ~alignment;
@@ -2447,16 +2451,24 @@ u32 intel_compute_tile_offset(int *x, int *y,
rotation, alignment);
}
-/* Convert the fb->offset[] linear offset into x/y offsets */
-static void intel_fb_offset_to_xy(int *x, int *y,
- const struct drm_framebuffer *fb, int plane)
+/* Convert the fb->offset[] into x/y offsets */
+static int intel_fb_offset_to_xy(int *x, int *y,
+ const struct drm_framebuffer *fb, int plane)
{
- unsigned int cpp = fb->format->cpp[plane];
- unsigned int pitch = fb->pitches[plane];
- u32 linear_offset = fb->offsets[plane];
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
+ fb->offsets[plane] % intel_tile_size(dev_priv))
+ return -EINVAL;
- *y = linear_offset / pitch;
- *x = linear_offset % pitch / cpp;
+ *x = 0;
+ *y = 0;
+
+ _intel_adjust_tile_offset(x, y,
+ fb, plane, DRM_MODE_ROTATE_0,
+ fb->offsets[plane], 0);
+
+ return 0;
}
static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
@@ -2523,12 +2535,18 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
unsigned int cpp, size;
u32 offset;
int x, y;
+ int ret;
cpp = fb->format->cpp[i];
width = drm_framebuffer_plane_width(fb->width, fb, i);
height = drm_framebuffer_plane_height(fb->height, fb, i);
- intel_fb_offset_to_xy(&x, &y, fb, i);
+ ret = intel_fb_offset_to_xy(&x, &y, fb, i);
+ if (ret) {
+ DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
+ i, fb->offsets[i]);
+ return ret;
+ }
if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) {
@@ -2539,11 +2557,13 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
int ccs_x, ccs_y;
intel_tile_dims(fb, i, &tile_width, &tile_height);
+ tile_width *= hsub;
+ tile_height *= vsub;
- ccs_x = (x * hsub) % (tile_width * hsub);
- ccs_y = (y * vsub) % (tile_height * vsub);
- main_x = intel_fb->normal[0].x % (tile_width * hsub);
- main_y = intel_fb->normal[0].y % (tile_height * vsub);
+ ccs_x = (x * hsub) % tile_width;
+ ccs_y = (y * vsub) % tile_height;
+ main_x = intel_fb->normal[0].x % tile_width;
+ main_y = intel_fb->normal[0].y % tile_height;
/*
* CCS doesn't have its own x/y offset register, so the intra CCS tile
@@ -2569,7 +2589,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
* fb layout agrees with the fence layout. We already check that the
* fb stride matches the fence stride elsewhere.
*/
- if (i915_gem_object_is_tiled(intel_fb->obj) &&
+ if (i == 0 && i915_gem_object_is_tiled(intel_fb->obj) &&
(x + width) * cpp > fb->pitches[i]) {
DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
i, fb->offsets[i]);
@@ -2632,10 +2652,10 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
* We only keep the x/y offsets, so push all of the
* gtt offset into the x/y offsets.
*/
- _intel_adjust_tile_offset(&x, &y,
- tile_width, tile_height,
- tile_size, pitch_tiles,
- gtt_offset_rotated * tile_size, 0);
+ __intel_adjust_tile_offset(&x, &y,
+ tile_width, tile_height,
+ tile_size, pitch_tiles,
+ gtt_offset_rotated * tile_size, 0);
gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4fd4853b2250..64134947c0aa 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
* seems sufficient to avoid this problem.
*/
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
- vbt.t11_t12 = max_t(u16, vbt.t11_t12, 800 * 10);
+ vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10);
DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
vbt.t11_t12);
}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 3fca9fa39a8e..8c8ead2276e0 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -406,9 +406,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
if (drm_crtc_vblank_get(&crtc->base)) {
- DRM_ERROR("vblank not available for FBC on pipe %c\n",
- pipe_name(crtc->pipe));
-
+ /* CRTC is now off, leave FBC deactivated */
mutex_lock(&fbc->lock);
work->scheduled = false;
mutex_unlock(&fbc->lock);
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 5a7cca32c0fa..04689600e337 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -187,11 +187,11 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
}
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
- enum transcoder pch_transcoder,
+ enum pipe pch_transcoder,
bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
+ uint32_t bit = (pch_transcoder == PIPE_A) ?
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
if (enable)
@@ -203,7 +203,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder pch_transcoder = (enum transcoder) crtc->pipe;
+ enum pipe pch_transcoder = crtc->pipe;
uint32_t serr_int = I915_READ(SERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);
@@ -215,12 +215,12 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
POSTING_READ(SERR_INT);
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
- DRM_ERROR("pch fifo underrun on pch transcoder %s\n",
- transcoder_name(pch_transcoder));
+ DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
+ pipe_name(pch_transcoder));
}
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
- enum transcoder pch_transcoder,
+ enum pipe pch_transcoder,
bool enable, bool old)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -238,8 +238,8 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
if (old && I915_READ(SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
- DRM_ERROR("uncleared pch fifo underrun on pch transcoder %s\n",
- transcoder_name(pch_transcoder));
+ DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
+ pipe_name(pch_transcoder));
}
}
}
@@ -395,8 +395,8 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
false)) {
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
- DRM_ERROR("PCH transcoder %s FIFO underrun\n",
- transcoder_name(pch_transcoder));
+ DRM_ERROR("PCH transcoder %c FIFO underrun\n",
+ pipe_name(pch_transcoder));
}
}
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 52d5b82790d9..c17ed0e62b67 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -45,7 +45,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return true;
if (IS_SKYLAKE(dev_priv))
return true;
- if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D)
+ if (IS_KABYLAKE(dev_priv))
return true;
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 6698826954e1..eb5827110d8f 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -649,7 +649,7 @@ static void gmbus_unlock_bus(struct i2c_adapter *adapter,
mutex_unlock(&dev_priv->gmbus_mutex);
}
-const struct i2c_lock_operations gmbus_lock_ops = {
+static const struct i2c_lock_operations gmbus_lock_ops = {
.lock_bus = gmbus_lock_bus,
.trylock_bus = gmbus_trylock_bus,
.unlock_bus = gmbus_unlock_bus,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3dc38c2ef4c3..29a3b0f5bec7 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2980,7 +2980,7 @@ static void proxy_unlock_bus(struct i2c_adapter *adapter,
sdvo->i2c->lock_ops->unlock_bus(sdvo->i2c, flags);
}
-const struct i2c_lock_operations proxy_lock_ops = {
+static const struct i2c_lock_operations proxy_lock_ops = {
.lock_bus = proxy_lock_bus,
.trylock_bus = proxy_trylock_bus,
.unlock_bus = proxy_unlock_bus,
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index debde2dae7bf..227309b01206 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -496,6 +496,27 @@ static int ipu_chan_assign_axi_id(int ipu_chan)
}
}
+static void ipu_calculate_bursts(u32 width, u32 cpp, u32 stride,
+ u8 *burstsize, u8 *num_bursts)
+{
+ const unsigned int width_bytes = width * cpp;
+ unsigned int npb, bursts;
+
+ /* Maximum number of pixels per burst without overshooting stride */
+ for (npb = 64 / cpp; npb > 0; --npb) {
+ if (round_up(width_bytes, npb * cpp) <= stride)
+ break;
+ }
+ *burstsize = npb;
+
+ /* Maximum number of consecutive bursts without overshooting stride */
+ for (bursts = 8; bursts > 1; bursts /= 2) {
+ if (round_up(width_bytes, npb * cpp * bursts) <= stride)
+ break;
+ }
+ *num_bursts = bursts;
+}
+
static void ipu_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -509,6 +530,9 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
unsigned long alpha_eba = 0;
enum ipu_color_space ics;
unsigned int axi_id = 0;
+ const struct drm_format_info *info;
+ u8 burstsize, num_bursts;
+ u32 width, height;
int active;
if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_FG)
@@ -525,8 +549,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id,
drm_rect_width(&state->src) >> 16,
drm_rect_height(&state->src) >> 16,
- state->fb->pitches[0],
- state->fb->format->format, &eba);
+ fb->pitches[0],
+ fb->format->format, &eba);
}
if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state)) {
@@ -553,11 +577,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break;
case IPU_DP_FLOW_SYNC_FG:
- ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format);
+ ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
ipu_dp_setup_channel(ipu_plane->dp, ics,
IPUV3_COLORSPACE_UNKNOWN);
/* Enable local alpha on partial plane */
- switch (state->fb->format->format) {
+ switch (fb->format->format) {
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_RGBA5551:
@@ -583,15 +607,21 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst));
+ width = drm_rect_width(&state->src) >> 16;
+ height = drm_rect_height(&state->src) >> 16;
+ info = drm_format_info(fb->format->format);
+ ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0],
+ &burstsize, &num_bursts);
+
ipu_cpmem_zero(ipu_plane->ipu_ch);
- ipu_cpmem_set_resolution(ipu_plane->ipu_ch,
- drm_rect_width(&state->src) >> 16,
- drm_rect_height(&state->src) >> 16);
- ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->format->format);
+ ipu_cpmem_set_resolution(ipu_plane->ipu_ch, width, height);
+ ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->format->format);
+ ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, burstsize);
ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
- ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
+ ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]);
ipu_cpmem_set_axi_id(ipu_plane->ipu_ch, axi_id);
+
switch (fb->format->format) {
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
@@ -631,6 +661,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
case DRM_FORMAT_RGBX8888_A8:
case DRM_FORMAT_BGRX8888_A8:
alpha_eba = drm_plane_state_to_eba(state, 1);
+ num_bursts = 0;
dev_dbg(ipu_plane->base.dev->dev, "phys = %lu %lu, x = %d, y = %d",
eba, alpha_eba, state->src.x1 >> 16, state->src.y1 >> 16);
@@ -644,8 +675,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8);
ipu_cpmem_set_high_priority(ipu_plane->alpha_ch);
ipu_idmac_set_double_buffer(ipu_plane->alpha_ch, 1);
- ipu_cpmem_set_stride(ipu_plane->alpha_ch,
- state->fb->pitches[1]);
+ ipu_cpmem_set_stride(ipu_plane->alpha_ch, fb->pitches[1]);
ipu_cpmem_set_burstsize(ipu_plane->alpha_ch, 16);
ipu_cpmem_set_buffer(ipu_plane->alpha_ch, 0, alpha_eba);
ipu_cpmem_set_buffer(ipu_plane->alpha_ch, 1, alpha_eba);
@@ -657,6 +687,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
}
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
+ ipu_idmac_lock_enable(ipu_plane->ipu_ch, num_bursts);
ipu_plane_enable(ipu_plane);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
index d4246c9dceae..0d8d506695f9 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -58,7 +58,7 @@ static void mtk_drm_fb_destroy(struct drm_framebuffer *fb)
drm_framebuffer_cleanup(fb);
- drm_gem_object_unreference_unlocked(mtk_fb->gem_obj);
+ drm_gem_object_put_unlocked(mtk_fb->gem_obj);
kfree(mtk_fb);
}
@@ -160,6 +160,6 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
return &mtk_fb->base;
unreference:
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 8ec963fff8b1..f595ac816b55 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -122,7 +122,7 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
goto err_handle_create;
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_unreference_unlocked(&mtk_gem->base);
+ drm_gem_object_put_unlocked(&mtk_gem->base);
return 0;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 5375e6dccdd7..7742c7d81ed8 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -116,8 +116,6 @@ static struct drm_driver meson_driver = {
/* GEM Ops */
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_destroy = drm_gem_dumb_destroy,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 2ac3fcbfea7b..968e20379d54 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -248,7 +248,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
out_unreserve1:
mgag200_bo_unreserve(pixels_2);
out_unref:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 4189160af726..74cdde2ee474 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -102,7 +102,6 @@ static struct drm_driver driver = {
.gem_free_object_unlocked = mgag200_gem_free_object,
.dumb_create = mgag200_dumb_create,
.dumb_map_offset = mgag200_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
};
static struct pci_driver mgag200_pci_driver = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 9d914ca69996..30726c9fe28c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -232,7 +232,7 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
err_alloc_fbi:
vfree(sysram);
err_sysram:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
@@ -245,7 +245,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_unregister_fbi(&mfbdev->helper);
if (mfb->obj) {
- drm_gem_object_unreference_unlocked(mfb->obj);
+ drm_gem_object_put_unlocked(mfb->obj);
mfb->obj = NULL;
}
drm_fb_helper_fini(&mfbdev->helper);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index dce8a3eb5a10..780f983b0294 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -18,7 +18,7 @@ static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
- drm_gem_object_unreference_unlocked(mga_fb->obj);
+ drm_gem_object_put_unlocked(mga_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
@@ -59,13 +59,13 @@ mgag200_user_framebuffer_create(struct drm_device *dev,
mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL);
if (!mga_fb) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj);
if (ret) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
kfree(mga_fb);
return ERR_PTR(ret);
}
@@ -317,7 +317,7 @@ int mgag200_dumb_create(struct drm_file *file,
return ret;
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret)
return ret;
@@ -366,6 +366,6 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
bo = gem_to_mga_bo(obj);
*offset = mgag200_bo_mmap_offset(bo);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index b638d192ce5e..99d39b2aefa6 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -5,7 +5,7 @@ config DRM_MSM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
- select QCOM_MDT_LOADER
+ select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 0e3828ed1e46..7791313405b5 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -486,8 +486,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a3xx_gpu->base;
gpu = &adreno_gpu->base;
- a3xx_gpu->pdev = pdev;
-
gpu->perfcntrs = perfcntrs;
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index 85ff66cbddd6..ab60dc9e344e 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -28,7 +28,6 @@
struct a3xx_gpu {
struct adreno_gpu base;
- struct platform_device *pdev;
/* if OCMEM is used for GMEM: */
uint32_t ocmem_base;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 19abf229b08d..58341ef6f15b 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -568,8 +568,6 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a4xx_gpu->base;
gpu = &adreno_gpu->base;
- a4xx_gpu->pdev = pdev;
-
gpu->perfcntrs = NULL;
gpu->num_perfcntrs = 0;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
index 01247204ac92..f757184328a3 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
@@ -23,7 +23,6 @@
struct a4xx_gpu {
struct adreno_gpu base;
- struct platform_device *pdev;
/* if OCMEM is used for GMEM: */
uint32_t ocmem_base;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index b4b54f1c24bc..17c59d839e6f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -15,7 +15,7 @@
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
#include <linux/dma-mapping.h>
-#include <linux/of_reserved_mem.h>
+#include <linux/of_address.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
-#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
-
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
{
const struct firmware *fw;
+ struct device_node *np;
+ struct resource r;
phys_addr_t mem_phys;
ssize_t mem_size;
void *mem_region = NULL;
int ret;
+ if (!IS_ENABLED(CONFIG_ARCH_QCOM))
+ return -EINVAL;
+
+ np = of_get_child_by_name(dev->of_node, "zap-shader");
+ if (!np)
+ return -ENODEV;
+
+ np = of_parse_phandle(np, "memory-region", 0);
+ if (!np)
+ return -EINVAL;
+
+ ret = of_address_to_resource(np, 0, &r);
+ if (ret)
+ return ret;
+
+ mem_phys = r.start;
+ mem_size = resource_size(&r);
+
/* Request the MDT file for the firmware */
ret = request_firmware(&fw, fwname, dev);
if (ret) {
@@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
}
/* Allocate memory for the firmware image */
- mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL);
+ mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
if (!mem_region) {
ret = -ENOMEM;
goto out;
@@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
out:
+ if (mem_region)
+ memunmap(mem_region);
+
release_firmware(fw);
return ret;
}
-#else
-static int zap_shader_load_mdt(struct device *dev, const char *fwname)
-{
- return -ENODEV;
-}
-#endif
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
@@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
gpu->funcs->flush(gpu);
}
-struct a5xx_hwcg {
+static const struct {
u32 offset;
u32 value;
-};
-
-static const struct a5xx_hwcg a530_hwcg[] = {
+} a5xx_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
@@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
};
-static const struct {
- int (*test)(struct adreno_gpu *gpu);
- const struct a5xx_hwcg *regs;
- unsigned int count;
-} a5xx_hwcg_regs[] = {
- { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
-};
-
-static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
- const struct a5xx_hwcg *regs, unsigned int count)
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
unsigned int i;
- for (i = 0; i < count; i++)
- gpu_write(gpu, regs[i].offset, regs[i].value);
+ for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
+ gpu_write(gpu, a5xx_hwcg[i].offset,
+ state ? a5xx_hwcg[i].value : 0);
- gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
- gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
-}
-
-static void a5xx_enable_hwcg(struct msm_gpu *gpu)
-{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
- if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
- _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
- a5xx_hwcg_regs[i].count);
- return;
- }
- }
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
}
static int a5xx_me_init(struct msm_gpu *gpu)
@@ -293,28 +284,14 @@ static int a5xx_me_init(struct msm_gpu *gpu)
static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
const struct firmware *fw, u64 *iova)
{
- struct drm_device *drm = gpu->dev;
struct drm_gem_object *bo;
void *ptr;
- bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED);
- if (IS_ERR(bo))
- return bo;
-
- ptr = msm_gem_get_vaddr(bo);
- if (!ptr) {
- drm_gem_object_unreference(bo);
- return ERR_PTR(-ENOMEM);
- }
-
- if (iova) {
- int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
+ ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
- if (ret) {
- drm_gem_object_unreference(bo);
- return ERR_PTR(ret);
- }
- }
+ if (IS_ERR(ptr))
+ return ERR_CAST(ptr);
memcpy(ptr, &fw->data[4], fw->size - 4);
@@ -377,51 +354,11 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
return ret;
}
-/* Set up a child device to "own" the zap shader */
-static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
-{
- struct device_node *node;
- int ret;
-
- if (dev->parent)
- return 0;
-
- /* Find the sub-node for the zap shader */
- node = of_get_child_by_name(parent->of_node, "zap-shader");
- if (!node) {
- DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
- return -ENODEV;
- }
-
- dev->parent = parent;
- dev->of_node = node;
- dev_set_name(dev, "adreno_zap_shader");
-
- ret = device_register(dev);
- if (ret) {
- DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
- goto out;
- }
-
- ret = of_reserved_mem_device_init(dev);
- if (ret) {
- DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
- device_unregister(dev);
- }
-
-out:
- if (ret)
- dev->parent = NULL;
-
- return ret;
-}
-
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
- struct platform_device *pdev = a5xx_gpu->pdev;
+ struct platform_device *pdev = gpu->pdev;
int ret;
/*
@@ -444,11 +381,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
return -ENODEV;
}
- ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev);
-
- if (!ret)
- ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
- adreno_gpu->info->zapfw);
+ ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
loaded = !ret;
@@ -462,6 +395,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
@@ -545,7 +479,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
/* Enable HWCG */
- a5xx_enable_hwcg(gpu);
+ a5xx_set_hwcg(gpu, true);
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
@@ -691,9 +625,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
DBG("%s", gpu->name);
- if (a5xx_gpu->zap_dev.parent)
- device_unregister(&a5xx_gpu->zap_dev);
-
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
@@ -867,6 +798,27 @@ static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
}
+static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ dev_err(dev->dev, "gpu fault fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ gpu->funcs->last_fence(gpu),
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
+ gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
+ gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
+ gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
#define RBBM_ERROR_MASK \
(A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
@@ -893,6 +845,9 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
a5xx_cp_err_irq(gpu);
+ if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
+ a5xx_fault_detect_irq(gpu);
+
if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
a5xx_uche_err_irq(gpu);
@@ -920,31 +875,30 @@ static const u32 a5xx_registers[] = {
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
- 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
- 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
- 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
- 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
- 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
- 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
- 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
- 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
- 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
- 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
- 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
- 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
- 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
- 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
- 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
- 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
- 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
- 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
- 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
- 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
- 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
- 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
- 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
- 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
- ~0
+ 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
+ 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
+ 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
+ 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
+ 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
+ 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
+ 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
+ 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
+ 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
+ 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
+ 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
+ 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
+ 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
+ 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
+ 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
+ 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
+ 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
+ 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
+ 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
+ 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
+ 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
+ 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
+ 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
+ 0xB9A0, 0xB9BF, ~0
};
static void a5xx_dump(struct msm_gpu *gpu)
@@ -1020,7 +974,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+
+ /*
+ * Temporarily disable hardware clock gating before going into
+ * adreno_show to avoid issues while reading the registers
+ */
+ a5xx_set_hwcg(gpu, false);
adreno_show(gpu, m);
+ a5xx_set_hwcg(gpu, true);
}
#endif
@@ -1064,7 +1025,6 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a5xx_gpu->base;
gpu = &adreno_gpu->base;
- a5xx_gpu->pdev = pdev;
adreno_gpu->registers = a5xx_registers;
adreno_gpu->reg_offsets = a5xx_register_offsets;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 6638bc85645d..e94451685bf8 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -23,7 +23,6 @@
struct a5xx_gpu {
struct adreno_gpu base;
- struct platform_device *pdev;
struct drm_gem_object *pm4_bo;
uint64_t pm4_iova;
@@ -36,8 +35,6 @@ struct a5xx_gpu {
uint32_t gpmu_dwords;
uint32_t lm_leakage;
-
- struct device zap_dev;
};
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@@ -59,5 +56,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
}
bool a5xx_idle(struct msm_gpu *gpu);
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 87af6eea0483..04aab1dcae2b 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -294,16 +294,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
*/
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
- a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED);
- if (IS_ERR(a5xx_gpu->gpmu_bo))
- goto err;
-
- if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
- &a5xx_gpu->gpmu_iova))
- goto err;
-
- ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
- if (!ptr)
+ ptr = msm_gem_kernel_new_locked(drm, bosize,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
+ &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
+ if (IS_ERR(ptr))
goto err;
while (cmds_size > 0) {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f1ab2703674a..c8b4ac254bb5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
*value = adreno_gpu->base.fast_rate;
return 0;
case MSM_PARAM_TIMESTAMP:
- if (adreno_gpu->funcs->get_timestamp)
- return adreno_gpu->funcs->get_timestamp(gpu, value);
+ if (adreno_gpu->funcs->get_timestamp) {
+ int ret;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ ret = adreno_gpu->funcs->get_timestamp(gpu, value);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
+ return ret;
+ }
return -EINVAL;
default:
DBG("%s: invalid param: %u", gpu->name, param);
@@ -330,11 +337,6 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
}
-static const char *iommu_ports[] = {
- "gfx3d_user", "gfx3d_priv",
- "gfx3d1_user", "gfx3d1_priv",
-};
-
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
{
@@ -366,15 +368,15 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.ringsz = RB_SIZE;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, &adreno_gpu_config);
if (ret)
return ret;
- pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
if (ret) {
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@@ -389,37 +391,17 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- if (gpu->aspace && gpu->aspace->mmu) {
- struct msm_mmu *mmu = gpu->aspace->mmu;
- ret = mmu->funcs->attach(mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
- if (ret)
- return ret;
- }
+ adreno_gpu->memptrs = msm_gem_kernel_new(drm,
+ sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace,
+ &adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova);
- adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
- MSM_BO_UNCACHED);
- if (IS_ERR(adreno_gpu->memptrs_bo)) {
- ret = PTR_ERR(adreno_gpu->memptrs_bo);
- adreno_gpu->memptrs_bo = NULL;
- dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
- return ret;
- }
-
- adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
if (IS_ERR(adreno_gpu->memptrs)) {
- dev_err(drm->dev, "could not vmap memptrs\n");
- return -ENOMEM;
- }
-
- ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
- &adreno_gpu->memptrs_iova);
- if (ret) {
- dev_err(drm->dev, "could not map memptrs: %d\n", ret);
- return ret;
+ ret = PTR_ERR(adreno_gpu->memptrs);
+ adreno_gpu->memptrs = NULL;
+ dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
}
- return 0;
+ return ret;
}
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
@@ -439,10 +421,4 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
release_firmware(adreno_gpu->pfp);
msm_gpu_cleanup(gpu);
-
- if (gpu->aspace) {
- gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
- msm_gem_address_space_put(gpu->aspace);
- }
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 311c1c1e7d6c..98742d7af6dc 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -161,12 +161,17 @@ static const struct of_device_id dt_match[] = {
{}
};
+static const struct dev_pm_ops dsi_pm_ops = {
+ SET_RUNTIME_PM_OPS(msm_dsi_runtime_suspend, msm_dsi_runtime_resume, NULL)
+};
+
static struct platform_driver dsi_driver = {
.probe = dsi_dev_probe,
.remove = dsi_dev_remove,
.driver = {
.name = "msm_dsi",
.of_match_table = dt_match,
+ .pm = &dsi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 9e6017387efb..2302046197a8 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -179,6 +179,8 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host);
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev);
int msm_dsi_host_init(struct msm_dsi *msm_dsi);
+int msm_dsi_runtime_suspend(struct device *dev);
+int msm_dsi_runtime_resume(struct device *dev);
/* dsi phy */
struct msm_dsi_phy;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 9e9c5696bc03..dbb31a014419 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -135,7 +135,6 @@ struct msm_dsi_host {
struct completion video_comp;
struct mutex dev_mutex;
struct mutex cmd_mutex;
- struct mutex clk_mutex;
spinlock_t intr_lock; /* Protect interrupt ctrl register */
u32 err_work_state;
@@ -221,6 +220,8 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
goto put_gdsc;
}
+ pm_runtime_get_sync(dev);
+
ret = regulator_enable(gdsc_reg);
if (ret) {
pr_err("%s: unable to enable gdsc\n", __func__);
@@ -247,6 +248,7 @@ disable_clks:
clk_disable_unprepare(ahb_clk);
disable_gdsc:
regulator_disable(gdsc_reg);
+ pm_runtime_put_autosuspend(dev);
put_clk:
clk_put(ahb_clk);
put_gdsc:
@@ -455,6 +457,34 @@ static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
clk_disable_unprepare(msm_host->bus_clks[i]);
}
+int msm_dsi_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (!msm_host->cfg_hnd)
+ return 0;
+
+ dsi_bus_clk_disable(msm_host);
+
+ return 0;
+}
+
+int msm_dsi_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (!msm_host->cfg_hnd)
+ return 0;
+
+ return dsi_bus_clk_enable(msm_host);
+}
+
static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -596,35 +626,6 @@ static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
}
}
-static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
-{
- int ret = 0;
-
- mutex_lock(&msm_host->clk_mutex);
- if (enable) {
- ret = dsi_bus_clk_enable(msm_host);
- if (ret) {
- pr_err("%s: Can not enable bus clk, %d\n",
- __func__, ret);
- goto unlock_ret;
- }
- ret = dsi_link_clk_enable(msm_host);
- if (ret) {
- pr_err("%s: Can not enable link clk, %d\n",
- __func__, ret);
- dsi_bus_clk_disable(msm_host);
- goto unlock_ret;
- }
- } else {
- dsi_link_clk_disable(msm_host);
- dsi_bus_clk_disable(msm_host);
- }
-
-unlock_ret:
- mutex_unlock(&msm_host->clk_mutex);
- return ret;
-}
-
static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
{
struct drm_display_mode *mode = msm_host->mode;
@@ -1699,6 +1700,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
}
msm_host->pdev = pdev;
+ msm_dsi->host = &msm_host->base;
ret = dsi_host_parse_dt(msm_host);
if (ret) {
@@ -1713,6 +1715,8 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
+ pm_runtime_enable(&pdev->dev);
+
msm_host->cfg_hnd = dsi_get_config(msm_host);
if (!msm_host->cfg_hnd) {
ret = -EINVAL;
@@ -1753,7 +1757,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
init_completion(&msm_host->video_comp);
mutex_init(&msm_host->dev_mutex);
mutex_init(&msm_host->cmd_mutex);
- mutex_init(&msm_host->clk_mutex);
spin_lock_init(&msm_host->intr_lock);
/* setup workqueue */
@@ -1761,7 +1764,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
INIT_WORK(&msm_host->err_work, dsi_err_worker);
INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
- msm_dsi->host = &msm_host->base;
msm_dsi->id = msm_host->id;
DBG("Dsi Host %d initialized", msm_host->id);
@@ -1783,9 +1785,10 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
msm_host->workqueue = NULL;
}
- mutex_destroy(&msm_host->clk_mutex);
mutex_destroy(&msm_host->cmd_mutex);
mutex_destroy(&msm_host->dev_mutex);
+
+ pm_runtime_disable(&msm_host->pdev->dev);
}
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
@@ -1881,7 +1884,8 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
* mdss interrupt is generated in mdp core clock domain
* mdp clock need to be enabled to receive dsi interrupt
*/
- dsi_clk_ctrl(msm_host, 1);
+ pm_runtime_get_sync(&msm_host->pdev->dev);
+ dsi_link_clk_enable(msm_host);
/* TODO: vote for bus bandwidth */
@@ -1911,7 +1915,8 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
/* TODO: unvote for bus bandwidth */
- dsi_clk_ctrl(msm_host, 0);
+ dsi_link_clk_disable(msm_host);
+ pm_runtime_put_autosuspend(&msm_host->pdev->dev);
}
int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
@@ -2137,6 +2142,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int ret;
+
+ ret = dsi_calc_clk_rate(msm_host);
+ if (ret) {
+ pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+ return;
+ }
clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
clk_req->escclk_rate = msm_host->esc_clk_rate;
@@ -2153,8 +2165,11 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host)
* and only turned on before MDP START.
* This part of code should be enabled once mdp driver support it.
*/
- /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
- dsi_clk_ctrl(msm_host, 0); */
+ /* if (msm_panel->mode == MSM_DSI_CMD_MODE) {
+ * dsi_link_clk_disable(msm_host);
+ * pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+ * }
+ */
return 0;
}
@@ -2210,9 +2225,11 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
goto unlock_ret;
}
- ret = dsi_clk_ctrl(msm_host, 1);
+ pm_runtime_get_sync(&msm_host->pdev->dev);
+ ret = dsi_link_clk_enable(msm_host);
if (ret) {
- pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
+ pr_err("%s: failed to enable link clocks. ret=%d\n",
+ __func__, ret);
goto fail_disable_reg;
}
@@ -2236,7 +2253,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
return 0;
fail_disable_clk:
- dsi_clk_ctrl(msm_host, 0);
+ dsi_link_clk_disable(msm_host);
+ pm_runtime_put_autosuspend(&msm_host->pdev->dev);
fail_disable_reg:
dsi_host_regulator_disable(msm_host);
unlock_ret:
@@ -2261,7 +2279,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
- dsi_clk_ctrl(msm_host, 0);
+ dsi_link_clk_disable(msm_host);
+ pm_runtime_put_autosuspend(&msm_host->pdev->dev);
dsi_host_regulator_disable(msm_host);
@@ -2280,7 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- int ret;
if (msm_host->mode) {
drm_mode_destroy(msm_host->dev, msm_host->mode);
@@ -2293,12 +2311,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
return -ENOMEM;
}
- ret = dsi_calc_clk_rate(msm_host);
- if (ret) {
- pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 0c2eb9c9a1fc..7c9bf91bc22b 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -373,7 +373,7 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
{
clk_disable_unprepare(phy->ahb_clk);
- pm_runtime_put_sync(&phy->pdev->dev);
+ pm_runtime_put_autosuspend(&phy->pdev->dev);
}
static const struct of_device_id dsi_phy_dt_match[] = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index a968cad509c2..17e069a133a4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -239,6 +239,8 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->pwr_clks[i] = clk;
}
+ pm_runtime_enable(&pdev->dev);
+
hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
hdmi->i2c = msm_hdmi_i2c_init(hdmi);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 13ac822dee5d..7e357077ed26 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -35,6 +35,8 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
const struct hdmi_platform_config *config = hdmi->config;
int i, ret;
+ pm_runtime_get_sync(&hdmi->pdev->dev);
+
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_enable(hdmi->pwr_regs[i]);
if (ret) {
@@ -84,6 +86,8 @@ static void power_off(struct drm_bridge *bridge)
config->pwr_reg_names[i], ret);
}
}
+
+ pm_runtime_put_autosuspend(&hdmi->pdev->dev);
}
#define AVI_IFRAME_LINE_NUMBER 1
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 71536d9c7fe8..c0848dfedd50 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -137,6 +137,36 @@ err:
return ret;
}
+static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
+{
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ int i, ret;
+
+ if (enable) {
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ if (config->hpd_freq && config->hpd_freq[i]) {
+ ret = clk_set_rate(hdmi->hpd_clks[i],
+ config->hpd_freq[i]);
+ if (ret)
+ dev_warn(dev,
+ "failed to set clk %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+
+ ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+ if (ret) {
+ dev_err(dev,
+ "failed to enable hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+ }
+ } else {
+ for (i = config->hpd_clk_cnt - 1; i >= 0; i--)
+ clk_disable_unprepare(hdmi->hpd_clks[i]);
+ }
+}
+
static int hpd_enable(struct hdmi_connector *hdmi_connector)
{
struct hdmi *hdmi = hdmi_connector->hdmi;
@@ -167,22 +197,8 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
goto fail;
}
- for (i = 0; i < config->hpd_clk_cnt; i++) {
- if (config->hpd_freq && config->hpd_freq[i]) {
- ret = clk_set_rate(hdmi->hpd_clks[i],
- config->hpd_freq[i]);
- if (ret)
- dev_warn(dev, "failed to set clk %s (%d)\n",
- config->hpd_clk_names[i], ret);
- }
-
- ret = clk_prepare_enable(hdmi->hpd_clks[i]);
- if (ret) {
- dev_err(dev, "failed to enable hpd clk: %s (%d)\n",
- config->hpd_clk_names[i], ret);
- goto fail;
- }
- }
+ pm_runtime_get_sync(dev);
+ enable_hpd_clocks(hdmi, true);
msm_hdmi_set_mode(hdmi, false);
msm_hdmi_phy_reset(hdmi);
@@ -225,8 +241,8 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
msm_hdmi_set_mode(hdmi, false);
- for (i = 0; i < config->hpd_clk_cnt; i++)
- clk_disable_unprepare(hdmi->hpd_clks[i]);
+ enable_hpd_clocks(hdmi, false);
+ pm_runtime_put_autosuspend(dev);
ret = gpio_config(hdmi, false);
if (ret)
@@ -285,7 +301,16 @@ void msm_hdmi_connector_irq(struct drm_connector *connector)
static enum drm_connector_status detect_reg(struct hdmi *hdmi)
{
- uint32_t hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ uint32_t hpd_int_status;
+
+ pm_runtime_get_sync(&hdmi->pdev->dev);
+ enable_hpd_clocks(hdmi, true);
+
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+
+ enable_hpd_clocks(hdmi, false);
+ pm_runtime_put_autosuspend(&hdmi->pdev->dev);
+
return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
connector_status_connected : connector_status_disconnected;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index aa7402e03f67..60790df91bfa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -192,6 +192,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
{
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms;
+ struct device *dev;
int intf_num;
u32 data = 0;
@@ -214,14 +215,16 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
/* Smart Panel, Sync mode */
data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
+ dev = &mdp5_kms->pdev->dev;
+
/* Make sure clocks are on when connectors calling this function. */
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 5e3bc7224eee..6fcb58ab718c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
unsigned long flags;
- enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
- enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
+ enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
+ enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
int i, plane_cnt = 0;
bool bg_alpha_enabled = false;
u32 mixer_op_mode = 0;
@@ -415,6 +415,7 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct device *dev = &mdp5_kms->pdev->dev;
DBG("%s", crtc->name);
@@ -425,7 +426,7 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
mdp5_crtc->enabled = false;
}
@@ -436,13 +437,17 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct device *dev = &mdp5_kms->pdev->dev;
DBG("%s", crtc->name);
if (WARN_ON(mdp5_crtc->enabled))
return;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
+
+ mdp5_crtc_mode_set_nofb(crtc);
+
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
if (mdp5_cstate->cmd_mode)
@@ -533,7 +538,7 @@ static bool is_fullscreen(struct drm_crtc_state *cstate,
((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
}
-enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
+static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state,
struct drm_plane_state *bpstate)
{
@@ -727,6 +732,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct platform_device *pdev = mdp5_kms->pdev;
struct msm_kms *kms = &mdp5_kms->base.base;
struct drm_gem_object *cursor_bo, *old_bo = NULL;
uint32_t blendcfg, stride;
@@ -755,6 +761,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DBG("Cursor off");
cursor_enable = false;
+ pm_runtime_get_sync(&pdev->dev);
goto set_cursor;
}
@@ -769,6 +776,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
lm = mdp5_cstate->pipeline.mixer->lm;
stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
+ pm_runtime_get_sync(&pdev->dev);
+
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
old_bo = mdp5_crtc->cursor.scanout_bo;
@@ -795,6 +804,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
set_cursor:
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) {
@@ -806,6 +817,7 @@ set_cursor:
crtc_flush(crtc, flush_mask);
end:
+ pm_runtime_put_autosuspend(&pdev->dev);
if (old_bo) {
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
/* enable vblank to complete cursor work: */
@@ -838,6 +850,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
get_roi(crtc, &roi_w, &roi_h);
+ pm_runtime_get_sync(&mdp5_kms->pdev->dev);
+
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
@@ -849,6 +863,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
crtc_flush(crtc, flush_mask);
+ pm_runtime_put_autosuspend(&mdp5_kms->pdev->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 97f3294fbfc6..5b851380d3f2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -297,9 +297,13 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = mdp5_encoder->intf;
+ /* this isn't right I think */
+ struct drm_crtc_state *cstate = encoder->crtc->state;
+
+ mdp5_encoder_mode_set(encoder, &cstate->mode, &cstate->adjusted_mode);
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
- mdp5_cmd_encoder_disable(encoder);
+ mdp5_cmd_encoder_enable(encoder);
else
mdp5_vid_encoder_enable(encoder);
}
@@ -320,7 +324,6 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
- .mode_set = mdp5_encoder_mode_set,
.disable = mdp5_encoder_disable,
.enable = mdp5_encoder_enable,
.atomic_check = mdp5_encoder_atomic_check,
@@ -350,6 +353,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
struct mdp5_kms *mdp5_kms;
+ struct device *dev;
int intf_num;
u32 data = 0;
@@ -369,8 +373,10 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
else
return -EINVAL;
+ dev = &mdp5_kms->pdev->dev;
/* Make sure clocks are on when connectors calling this function. */
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
+
/* Dumb Panel, Sync mode */
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
@@ -378,7 +384,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 3ce8b9dec9c1..bb5deb00c899 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -49,16 +49,19 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
void mdp5_irq_preinstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- mdp5_enable(mdp5_kms);
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
}
int mdp5_irq_postinstall(struct msm_kms *kms)
{
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ struct device *dev = &mdp5_kms->pdev->dev;
struct mdp_irq *error_handler = &mdp5_kms->error_handler;
error_handler->irq = mdp5_irq_error_handler;
@@ -67,9 +70,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
MDP5_IRQ_INTF2_UNDER_RUN |
MDP5_IRQ_INTF3_UNDER_RUN;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
mdp_irq_register(mdp_kms, error_handler);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
return 0;
}
@@ -77,9 +80,11 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
void mdp5_irq_uninstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- mdp5_enable(mdp5_kms);
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
}
irqreturn_t mdp5_irq(struct msm_kms *kms)
@@ -109,11 +114,12 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), true);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
return 0;
}
@@ -121,9 +127,10 @@ int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 5d13fa5381ee..f7c0698fec40 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -30,11 +30,10 @@ static const char *iommu_ports[] = {
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct platform_device *pdev = mdp5_kms->pdev;
+ struct device *dev = &mdp5_kms->pdev->dev;
unsigned long flags;
- pm_runtime_get_sync(&pdev->dev);
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
/* Magic unknown register writes:
*
@@ -66,8 +65,7 @@ static int mdp5_hw_init(struct msm_kms *kms)
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
- mdp5_disable(mdp5_kms);
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_sync(dev);
return 0;
}
@@ -111,8 +109,9 @@ static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state)
static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
if (mdp5_kms->smp)
mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
@@ -121,11 +120,12 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
if (mdp5_kms->smp)
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
}
static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms,
@@ -249,6 +249,9 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms)
{
DBG("");
+ mdp5_kms->enable_count--;
+ WARN_ON(mdp5_kms->enable_count < 0);
+
clk_disable_unprepare(mdp5_kms->ahb_clk);
clk_disable_unprepare(mdp5_kms->axi_clk);
clk_disable_unprepare(mdp5_kms->core_clk);
@@ -262,6 +265,8 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
{
DBG("");
+ mdp5_kms->enable_count++;
+
clk_prepare_enable(mdp5_kms->ahb_clk);
clk_prepare_enable(mdp5_kms->axi_clk);
clk_prepare_enable(mdp5_kms->core_clk);
@@ -486,11 +491,12 @@ fail:
static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
u32 *major, u32 *minor)
{
+ struct device *dev = &mdp5_kms->pdev->dev;
u32 version;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
@@ -502,7 +508,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name, bool mandatory)
{
struct device *dev = &pdev->dev;
- struct clk *clk = devm_clk_get(dev, name);
+ struct clk *clk = msm_clk_get(pdev, name);
if (IS_ERR(clk) && mandatory) {
dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
return PTR_ERR(clk);
@@ -643,7 +649,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(&pdev->dev);
for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
!config->hw->intf.base[i])
@@ -652,7 +658,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
}
- mdp5_disable(mdp5_kms);
mdelay(16);
if (config->platform.iommu) {
@@ -678,6 +683,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
aspace = NULL;;
}
+ pm_runtime_put_autosuspend(&pdev->dev);
+
ret = modeset_init(mdp5_kms);
if (ret) {
dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
@@ -887,21 +894,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
}
/* mandatory clocks: */
- ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
if (ret)
goto fail;
- ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
if (ret)
goto fail;
- ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
if (ret)
goto fail;
- ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
if (ret)
goto fail;
/* optional clocks: */
- get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);
+ get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
/* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a
@@ -1005,6 +1012,30 @@ static int mdp5_dev_remove(struct platform_device *pdev)
return 0;
}
+static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
+
+ DBG("");
+
+ return mdp5_disable(mdp5_kms);
+}
+
+static __maybe_unused int mdp5_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
+
+ DBG("");
+
+ return mdp5_enable(mdp5_kms);
+}
+
+static const struct dev_pm_ops mdp5_pm_ops = {
+ SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
+};
+
static const struct of_device_id mdp5_dt_match[] = {
{ .compatible = "qcom,mdp5", },
/* to support downstream DT files */
@@ -1019,6 +1050,7 @@ static struct platform_driver mdp5_driver = {
.driver = {
.name = "msm_mdp",
.of_match_table = mdp5_dt_match,
+ .pm = &mdp5_pm_ops,
},
};
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 17caa0e8c8ae..9b3fe01089d1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -76,6 +76,8 @@ struct mdp5_kms {
bool rpm_enabled;
struct mdp_irq error_handler;
+
+ int enable_count;
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
@@ -167,11 +169,13 @@ struct mdp5_encoder {
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{
+ WARN_ON(mdp5_kms->enable_count <= 0);
msm_writel(data, mdp5_kms->mmio + reg);
}
static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
{
+ WARN_ON(mdp5_kms->enable_count <= 0);
return msm_readl(mdp5_kms->mmio + reg);
}
@@ -255,9 +259,6 @@ static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer)
return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp;
}
-int mdp5_disable(struct mdp5_kms *mdp5_kms);
-int mdp5_enable(struct mdp5_kms *mdp5_kms);
-
void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
uint32_t old_irqmask);
void mdp5_irq_preinstall(struct msm_kms *kms);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
index 9c34d7824988..f2a0db7a8a03 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
@@ -31,6 +31,10 @@ struct msm_mdss {
struct regulator *vdd;
+ struct clk *ahb_clk;
+ struct clk *axi_clk;
+ struct clk *vsync_clk;
+
struct {
volatile unsigned long enabled_mask;
struct irq_domain *domain;
@@ -140,6 +144,51 @@ static int mdss_irq_domain_init(struct msm_mdss *mdss)
return 0;
}
+int msm_mdss_enable(struct msm_mdss *mdss)
+{
+ DBG("");
+
+ clk_prepare_enable(mdss->ahb_clk);
+ if (mdss->axi_clk)
+ clk_prepare_enable(mdss->axi_clk);
+ if (mdss->vsync_clk)
+ clk_prepare_enable(mdss->vsync_clk);
+
+ return 0;
+}
+
+int msm_mdss_disable(struct msm_mdss *mdss)
+{
+ DBG("");
+
+ if (mdss->vsync_clk)
+ clk_disable_unprepare(mdss->vsync_clk);
+ if (mdss->axi_clk)
+ clk_disable_unprepare(mdss->axi_clk);
+ clk_disable_unprepare(mdss->ahb_clk);
+
+ return 0;
+}
+
+static int msm_mdss_get_clocks(struct msm_mdss *mdss)
+{
+ struct platform_device *pdev = to_platform_device(mdss->dev->dev);
+
+ mdss->ahb_clk = msm_clk_get(pdev, "iface");
+ if (IS_ERR(mdss->ahb_clk))
+ mdss->ahb_clk = NULL;
+
+ mdss->axi_clk = msm_clk_get(pdev, "bus");
+ if (IS_ERR(mdss->axi_clk))
+ mdss->axi_clk = NULL;
+
+ mdss->vsync_clk = msm_clk_get(pdev, "vsync");
+ if (IS_ERR(mdss->vsync_clk))
+ mdss->vsync_clk = NULL;
+
+ return 0;
+}
+
void msm_mdss_destroy(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -153,8 +202,6 @@ void msm_mdss_destroy(struct drm_device *dev)
regulator_disable(mdss->vdd);
- pm_runtime_put_sync(dev->dev);
-
pm_runtime_disable(dev->dev);
}
@@ -190,6 +237,12 @@ int msm_mdss_init(struct drm_device *dev)
goto fail;
}
+ ret = msm_mdss_get_clocks(mdss);
+ if (ret) {
+ dev_err(dev->dev, "failed to get clocks: %d\n", ret);
+ goto fail;
+ }
+
/* Regulator to enable GDSCs in downstream kernels */
mdss->vdd = devm_regulator_get(dev->dev, "vdd");
if (IS_ERR(mdss->vdd)) {
@@ -221,12 +274,6 @@ int msm_mdss_init(struct drm_device *dev)
pm_runtime_enable(dev->dev);
- /*
- * TODO: This is needed as the MDSS GDSC is only tied to MDSS's power
- * domain. Remove this once runtime PM is adapted for all the devices.
- */
- pm_runtime_get_sync(dev->dev);
-
return 0;
fail_irq:
regulator_disable(mdss->vdd);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 818244ac4a4b..4b22ac3413a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -888,8 +888,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
struct mdp5_hw_pipe *right_hwpipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
- struct phase_step step = { 0 };
- struct pixel_ext pe = { 0 };
+ struct phase_step step = { { 0 } };
+ struct pixel_ext pe = { { 0 } };
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
unsigned int rotation;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 58f712d37e7f..ae4983d9d0a5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -28,6 +28,13 @@ struct mdp5_smp {
int blk_cnt;
int blk_size;
+
+ /* register cache */
+ u32 alloc_w[22];
+ u32 alloc_r[22];
+ u32 pipe_reqprio_fifo_wm0[SSPP_MAX];
+ u32 pipe_reqprio_fifo_wm1[SSPP_MAX];
+ u32 pipe_reqprio_fifo_wm2[SSPP_MAX];
};
static inline
@@ -98,16 +105,15 @@ static int smp_request_block(struct mdp5_smp *smp,
static void set_fifo_thresholds(struct mdp5_smp *smp,
enum mdp5_pipe pipe, int nblks)
{
- struct mdp5_kms *mdp5_kms = get_kms(smp);
u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
u32 val;
/* 1/4 of SMP pool that is being fetched */
val = (nblks * smp_entries_per_blk) / 4;
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+ smp->pipe_reqprio_fifo_wm0[pipe] = val * 1;
+ smp->pipe_reqprio_fifo_wm1[pipe] = val * 2;
+ smp->pipe_reqprio_fifo_wm2[pipe] = val * 3;
}
/*
@@ -222,7 +228,6 @@ void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
static unsigned update_smp_state(struct mdp5_smp *smp,
u32 cid, mdp5_smp_state_t *assigned)
{
- struct mdp5_kms *mdp5_kms = get_kms(smp);
int cnt = smp->blk_cnt;
unsigned nblks = 0;
u32 blk, val;
@@ -231,7 +236,7 @@ static unsigned update_smp_state(struct mdp5_smp *smp,
int idx = blk / 3;
int fld = blk % 3;
- val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
+ val = smp->alloc_w[idx];
switch (fld) {
case 0:
@@ -248,8 +253,8 @@ static unsigned update_smp_state(struct mdp5_smp *smp,
break;
}
- mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
- mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
+ smp->alloc_w[idx] = val;
+ smp->alloc_r[idx] = val;
nblks++;
}
@@ -257,6 +262,39 @@ static unsigned update_smp_state(struct mdp5_smp *smp,
return nblks;
}
+static void write_smp_alloc_regs(struct mdp5_smp *smp)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int i, num_regs;
+
+ num_regs = smp->blk_cnt / 3 + 1;
+
+ for (i = 0; i < num_regs; i++) {
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i),
+ smp->alloc_w[i]);
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i),
+ smp->alloc_r[i]);
+ }
+}
+
+static void write_smp_fifo_regs(struct mdp5_smp *smp)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int i;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
+ enum mdp5_pipe pipe = hwpipe->pipe;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe),
+ smp->pipe_reqprio_fifo_wm0[pipe]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe),
+ smp->pipe_reqprio_fifo_wm1[pipe]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe),
+ smp->pipe_reqprio_fifo_wm2[pipe]);
+ }
+}
+
void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
{
enum mdp5_pipe pipe;
@@ -277,6 +315,9 @@ void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
set_fifo_thresholds(smp, pipe, nblks);
}
+ write_smp_alloc_regs(smp);
+ write_smp_fifo_regs(smp);
+
state->assigned = 0;
}
@@ -289,6 +330,8 @@ void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state
set_fifo_thresholds(smp, pipe, 0);
}
+ write_smp_fifo_regs(smp);
+
state->released = 0;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index f49f6ac5585c..606df7bea97b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -73,6 +73,10 @@ bool dumpstate = false;
MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
module_param(dumpstate, bool, 0600);
+static bool modeset = true;
+MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
+module_param(modeset, bool, 0600);
+
/*
* Util/helpers:
*/
@@ -832,7 +836,6 @@ static struct drm_driver msm_driver = {
.gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
@@ -879,8 +882,37 @@ static int msm_pm_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM
+static int msm_runtime_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+
+ DBG("");
+
+ if (priv->mdss)
+ return msm_mdss_disable(priv->mdss);
+
+ return 0;
+}
+
+static int msm_runtime_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+
+ DBG("");
+
+ if (priv->mdss)
+ return msm_mdss_enable(priv->mdss);
+
+ return 0;
+}
+#endif
+
static const struct dev_pm_ops msm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
+ SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
};
/*
@@ -1104,6 +1136,9 @@ static struct platform_driver msm_platform_driver = {
static int __init msm_drm_register(void)
{
+ if (!modeset)
+ return -EINVAL;
+
DBG("init");
msm_mdp_register();
msm_dsi_register();
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index fc8d24f7c084..5e8109c07560 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -55,8 +55,6 @@ struct msm_fence_cb;
struct msm_gem_address_space;
struct msm_gem_vma;
-#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
-
struct msm_file_private {
/* currently we don't do anything useful with this.. but when
* per-context address spaces are supported we'd keep track of
@@ -237,6 +235,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
uint32_t size, uint32_t flags);
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova);
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
@@ -248,10 +252,10 @@ uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
-struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
+ int w, int h, int p, uint32_t format);
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
void msm_fbdev_free(struct drm_device *dev);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 6ecb7b170316..fc175e724ad6 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -20,6 +20,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
+#include "msm_gem.h"
struct msm_framebuffer {
struct drm_framebuffer base;
@@ -28,6 +29,8 @@ struct msm_framebuffer {
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
+static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
@@ -161,7 +164,7 @@ out_unref:
return ERR_PTR(ret);
}
-struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -237,3 +240,43 @@ fail:
return ERR_PTR(ret);
}
+
+struct drm_framebuffer *
+msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format)
+{
+ struct drm_mode_fb_cmd2 mode_cmd = {
+ .pixel_format = format,
+ .width = w,
+ .height = h,
+ .pitches = { p },
+ };
+ struct drm_gem_object *bo;
+ struct drm_framebuffer *fb;
+ int size;
+
+ /* allocate backing bo */
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ DBG("allocating %d bytes for fb %d", size, dev->primary->index);
+ bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_STOLEN);
+ if (IS_ERR(bo)) {
+ dev_warn(dev->dev, "could not allocate stolen bo\n");
+ /* try regular bo: */
+ bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
+ }
+ if (IS_ERR(bo)) {
+ dev_err(dev->dev, "failed to allocate buffer object\n");
+ return ERR_CAST(bo);
+ }
+
+ fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
+ if (IS_ERR(fb)) {
+ dev_err(dev->dev, "failed to allocate fb\n");
+ /* note: if fb creation failed, we can't rely on fb destroy
+ * to unref the bo:
+ */
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_CAST(fb);
+ }
+
+ return fb;
+}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 9c00fedfc741..c178563fcd4d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -19,7 +19,6 @@
#include <drm/drm_fb_helper.h>
#include "msm_drv.h"
-#include "msm_gem.h"
#include "msm_kms.h"
extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
@@ -35,7 +34,6 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
struct msm_fbdev {
struct drm_fb_helper base;
struct drm_framebuffer *fb;
- struct drm_gem_object *bo;
};
static struct fb_ops msm_fb_ops = {
@@ -57,16 +55,16 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
struct msm_fbdev *fbdev = to_msm_fbdev(helper);
- struct drm_gem_object *drm_obj = fbdev->bo;
+ struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0);
int ret = 0;
- ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma);
+ ret = drm_gem_mmap_obj(bo, bo->size, vma);
if (ret) {
pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
return ret;
}
- return msm_gem_mmap_obj(drm_obj, vma);
+ return msm_gem_mmap_obj(bo, vma);
}
static int msm_fbdev_create(struct drm_fb_helper *helper,
@@ -76,47 +74,30 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_device *dev = helper->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb = NULL;
+ struct drm_gem_object *bo;
struct fb_info *fbi = NULL;
- struct drm_mode_fb_cmd2 mode_cmd = {0};
uint64_t paddr;
- int ret, size;
+ uint32_t format;
+ int ret, pitch;
+
+ format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp,
sizes->fb_width, sizes->fb_height);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
-
- mode_cmd.pitches[0] = align_pitch(
- mode_cmd.width, sizes->surface_bpp);
+ pitch = align_pitch(sizes->surface_width, sizes->surface_bpp);
+ fb = msm_alloc_stolen_fb(dev, sizes->surface_width,
+ sizes->surface_height, pitch, format);
- /* allocate backing bo */
- size = mode_cmd.pitches[0] * mode_cmd.height;
- DBG("allocating %d bytes for fb %d", size, dev->primary->index);
- fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
- MSM_BO_WC | MSM_BO_STOLEN);
- if (IS_ERR(fbdev->bo)) {
- ret = PTR_ERR(fbdev->bo);
- fbdev->bo = NULL;
- dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret);
- goto fail;
- }
-
- fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
- /* note: if fb creation failed, we can't rely on fb destroy
- * to unref the bo:
- */
- drm_gem_object_unreference_unlocked(fbdev->bo);
ret = PTR_ERR(fb);
goto fail;
}
+ bo = msm_framebuffer_bo(fb, 0);
+
mutex_lock(&dev->struct_mutex);
/*
@@ -124,7 +105,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
- ret = msm_gem_get_iova(fbdev->bo, priv->kms->aspace, &paddr);
+ ret = msm_gem_get_iova(bo, priv->kms->aspace, &paddr);
if (ret) {
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock;
@@ -152,14 +133,14 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = paddr;
- fbi->screen_base = msm_gem_get_vaddr(fbdev->bo);
+ fbi->screen_base = msm_gem_get_vaddr(bo);
if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base);
goto fail_unlock;
}
- fbi->screen_size = fbdev->bo->size;
+ fbi->screen_size = bo->size;
fbi->fix.smem_start = paddr;
- fbi->fix.smem_len = fbdev->bo->size;
+ fbi->fix.smem_len = bo->size;
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
@@ -241,7 +222,9 @@ void msm_fbdev_free(struct drm_device *dev)
/* this will free the backing object */
if (fbdev->fb) {
- msm_gem_put_vaddr(fbdev->bo);
+ struct drm_gem_object *bo =
+ msm_framebuffer_bo(fbdev->fb, 0);
+ msm_gem_put_vaddr(bo);
drm_framebuffer_remove(fbdev->fb);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 65f35544c1ec..f15821a0d900 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
struct page **pages;
vma = add_vma(obj, aspace);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto unlock;
+ }
pages = get_pages(obj);
if (IS_ERR(pages)) {
@@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
fail:
del_vma(vma);
-
+unlock:
mutex_unlock(&msm_obj->lock);
return ret;
}
@@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
if (use_vram) {
struct msm_gem_vma *vma;
struct page **pages;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ mutex_lock(&msm_obj->lock);
vma = add_vma(obj, NULL);
+ mutex_unlock(&msm_obj->lock);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto fail;
@@ -1018,3 +1024,49 @@ fail:
drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret);
}
+
+static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova, bool locked)
+{
+ void *vaddr;
+ struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
+ int ret;
+
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ if (iova) {
+ ret = msm_gem_get_iova(obj, aspace, iova);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ return ERR_PTR(ret);
+ }
+ }
+
+ vaddr = msm_gem_get_vaddr(obj);
+ if (!vaddr) {
+ msm_gem_put_iova(obj, aspace);
+ drm_gem_object_unreference(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (bo)
+ *bo = obj;
+
+ return vaddr;
+}
+
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova)
+{
+ return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
+}
+
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova)
+{
+ return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 6bfca7470141..8a75c0bd8a78 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
- uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
- (nr_cmds * sizeof(submit->cmd[0]));
+ uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
+ ((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX)
return NULL;
@@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
- if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
+ if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
ret = submit_fence_sync(submit);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index c36321bc8714..d34e331554f3 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -42,7 +42,7 @@ void
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt)
{
- if (!vma->iova)
+ if (!aspace || !vma->iova)
return;
if (aspace->mmu) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 9f3dbc236ab3..ffbff27600e0 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -562,11 +562,49 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
return 0;
}
+static struct msm_gem_address_space *
+msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
+ uint64_t va_start, uint64_t va_end)
+{
+ struct iommu_domain *iommu;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
+ /*
+ * Setup IOMMU.. eventually we will (I think) do this once per context
+ * and have separate page tables per context. For now, to keep things
+ * simple and to get something working, just use a single address space:
+ */
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return NULL;
+
+ iommu->geometry.aperture_start = va_start;
+ iommu->geometry.aperture_end = va_end;
+
+ dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
+
+ aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
+ if (IS_ERR(aspace)) {
+ dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
+ PTR_ERR(aspace));
+ iommu_domain_free(iommu);
+ return ERR_CAST(aspace);
+ }
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
+ if (ret) {
+ msm_gem_address_space_put(aspace);
+ return ERR_PTR(ret);
+ }
+
+ return aspace;
+}
+
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
{
- struct iommu_domain *iommu;
int ret;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
@@ -636,28 +674,19 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
- /* Setup IOMMU.. eventually we will (I think) do this once per context
- * and have separate page tables per context. For now, to keep things
- * simple and to get something working, just use a single address space:
- */
- iommu = iommu_domain_alloc(&platform_bus_type);
- if (iommu) {
- iommu->geometry.aperture_start = config->va_start;
- iommu->geometry.aperture_end = config->va_end;
-
- dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->aspace = msm_gem_address_space_create(&pdev->dev,
- iommu, "gpu");
- if (IS_ERR(gpu->aspace)) {
- ret = PTR_ERR(gpu->aspace);
- dev_err(drm->dev, "failed to init iommu: %d\n", ret);
- gpu->aspace = NULL;
- iommu_domain_free(iommu);
- goto fail;
- }
+ gpu->pdev = pdev;
+ platform_set_drvdata(pdev, gpu);
+
+ bs_init(gpu);
- } else {
+ gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
+ config->va_start, config->va_end);
+
+ if (gpu->aspace == NULL)
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
+ else if (IS_ERR(gpu->aspace)) {
+ ret = PTR_ERR(gpu->aspace);
+ goto fail;
}
/* Create ringbuffer: */
@@ -669,14 +698,10 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- gpu->pdev = pdev;
- platform_set_drvdata(pdev, gpu);
-
- bs_init(gpu);
-
return 0;
fail:
+ platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -693,7 +718,9 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb);
}
-
- if (gpu->fctx)
- msm_fence_context_free(gpu->fctx);
+ if (gpu->aspace) {
+ gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
+ NULL, 0);
+ msm_gem_address_space_put(gpu->aspace);
+ }
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index a8f2ba5e5f07..17d5824417ad 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -99,5 +99,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
int msm_mdss_init(struct drm_device *dev);
void msm_mdss_destroy(struct drm_device *dev);
+int msm_mdss_enable(struct msm_mdss *mdss);
+int msm_mdss_disable(struct msm_mdss *mdss);
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 791bca3c6a9c..bf065a540130 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -33,16 +33,14 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->gpu = gpu;
- ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
- if (IS_ERR(ring->bo)) {
- ret = PTR_ERR(ring->bo);
- ring->bo = NULL;
- goto fail;
- }
- ring->start = msm_gem_get_vaddr(ring->bo);
+ /* Pass NULL for the iova pointer - we will map it later */
+ ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC,
+ gpu->aspace, &ring->bo, NULL);
+
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
+ ring->start = 0;
goto fail;
}
ring->end = ring->start + (size / 4);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 93c38eb6d187..7fbad9cb656e 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -337,8 +337,6 @@ static struct drm_driver mxsfb_driver = {
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 8f689f1f6122..6aa6ee16dcbd 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1096,6 +1096,38 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
.disable = nv_crtc_disable,
};
+static const uint32_t modeset_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
+};
+
+static struct drm_plane *
+create_primary_plane(struct drm_device *dev)
+{
+ struct drm_plane *primary;
+ int ret;
+
+ primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+ if (primary == NULL) {
+ DRM_DEBUG_KMS("Failed to allocate primary plane\n");
+ return NULL;
+ }
+
+ /* possible_crtc's will be filled in later by crtc_init */
+ ret = drm_universal_plane_init(dev, primary, 0,
+ &drm_primary_helper_funcs,
+ modeset_formats,
+ ARRAY_SIZE(modeset_formats), NULL,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ kfree(primary);
+ primary = NULL;
+ }
+
+ return primary;
+}
+
int
nv04_crtc_create(struct drm_device *dev, int crtc_num)
{
@@ -1114,7 +1146,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
nv_crtc->save = nv_crtc_save;
nv_crtc->restore = nv_crtc_restore;
- drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs);
+ drm_crtc_init_with_planes(dev, &nv_crtc->base,
+ create_primary_plane(dev), NULL,
+ &nv04_crtc_funcs, NULL);
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index e54944d23268..c8c2333f24ee 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -63,6 +63,7 @@ static uint32_t formats[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
};
/* Sine can be approximated with
@@ -90,6 +91,26 @@ cos_mul(int degrees, int factor)
}
static int
+verify_scaling(const struct drm_framebuffer *fb, uint8_t shift,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
+ uint32_t crtc_w, uint32_t crtc_h)
+{
+ if (crtc_w < (src_w >> shift) || crtc_h < (src_h >> shift)) {
+ DRM_DEBUG_KMS("Unsuitable framebuffer scaling: %dx%d -> %dx%d\n",
+ src_w, src_h, crtc_w, crtc_h);
+ return -ERANGE;
+ }
+
+ if (src_x != 0 || src_y != 0) {
+ DRM_DEBUG_KMS("Unsuitable framebuffer offset: %d,%d\n",
+ src_x, src_y);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int
nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
@@ -107,7 +128,9 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
bool flip = nv_plane->flip;
int soff = NV_PCRTC0_SIZE * nv_crtc->index;
int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
- int format, ret;
+ unsigned shift = drm->client.device.info.chipset >= 0x30 ? 1 : 3;
+ unsigned format = 0;
+ int ret;
/* Source parameters given in 16.16 fixed point, ignore fractional. */
src_x >>= 16;
@@ -115,18 +138,9 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
src_w >>= 16;
src_h >>= 16;
- format = ALIGN(src_w * 4, 0x100);
-
- if (format > 0xffff)
- return -ERANGE;
-
- if (drm->client.device.info.chipset >= 0x30) {
- if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
- return -ERANGE;
- } else {
- if (crtc_w < (src_w >> 3) || crtc_h < (src_h >> 3))
- return -ERANGE;
- }
+ ret = verify_scaling(fb, shift, 0, 0, src_w, src_h, crtc_w, crtc_h);
+ if (ret)
+ return ret;
ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
if (ret)
@@ -146,21 +160,23 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
nvif_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
nvif_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
- if (fb->format->format != DRM_FORMAT_UYVY)
+ if (fb->format->format == DRM_FORMAT_YUYV ||
+ fb->format->format == DRM_FORMAT_NV12)
format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
- if (fb->format->format == DRM_FORMAT_NV12)
+ if (fb->format->format == DRM_FORMAT_NV12 ||
+ fb->format->format == DRM_FORMAT_NV21)
format |= NV_PVIDEO_FORMAT_PLANAR;
if (nv_plane->iturbt_709)
format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
if (nv_plane->colorkey & (1 << 24))
format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
- if (fb->format->format == DRM_FORMAT_NV12) {
+ if (format & NV_PVIDEO_FORMAT_PLANAR) {
nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
nv_fb->nvbo->bo.offset + fb->offsets[1]);
}
- nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format);
+ nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format | fb->pitches[0]);
nvif_wr32(dev, NV_PVIDEO_STOP, 0);
/* TODO: wait for vblank? */
nvif_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1);
@@ -357,7 +373,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct nouveau_bo *cur = nv_plane->cur;
uint32_t overlay = 1;
int brightness = (nv_plane->brightness - 512) * 62 / 512;
- int pitch, ret, i;
+ int ret, i;
/* Source parameters given in 16.16 fixed point, ignore fractional. */
src_x >>= 16;
@@ -365,17 +381,9 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
src_w >>= 16;
src_h >>= 16;
- pitch = ALIGN(src_w * 4, 0x100);
-
- if (pitch > 0xffff)
- return -ERANGE;
-
- /* TODO: Compute an offset? Not sure how to do this for YUYV. */
- if (src_x != 0 || src_y != 0)
- return -ERANGE;
-
- if (crtc_w < src_w || crtc_h < src_h)
- return -ERANGE;
+ ret = verify_scaling(fb, 0, src_x, src_y, src_w, src_h, crtc_w, crtc_h);
+ if (ret)
+ return ret;
ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
if (ret)
@@ -389,8 +397,9 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
for (i = 0; i < 2; i++) {
nvif_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
- nv_fb->nvbo->bo.offset);
- nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch);
+ nv_fb->nvbo->bo.offset);
+ nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i,
+ fb->pitches[0]);
nvif_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
}
nvif_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index e8e77ee24776..deb477282dde 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -18,6 +18,7 @@ enum dcb_connector_type {
DCB_CONNECTOR_HDMI_C = 0x63,
DCB_CONNECTOR_DMS59_DP0 = 0x64,
DCB_CONNECTOR_DMS59_DP1 = 0x65,
+ DCB_CONNECTOR_WFD = 0x70,
DCB_CONNECTOR_NONE = 0xff
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
index 4892a65ddd48..903d117603d8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
@@ -6,6 +6,7 @@ enum dcb_output_type {
DCB_OUTPUT_TMDS = 0x2,
DCB_OUTPUT_LVDS = 0x3,
DCB_OUTPUT_DP = 0x6,
+ DCB_OUTPUT_WFD = 0x8,
DCB_OUTPUT_EOL = 0xe,
DCB_OUTPUT_UNUSED = 0xf,
DCB_OUTPUT_ANY = -1,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index b268b96faece..1bfd93b85575 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -96,4 +96,5 @@ int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index b998c33af18a..dd6fba55ad5d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -351,11 +351,8 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
struct lvdstableheader lth;
if (bios->fp.fptablepointer == 0x0) {
- /* Apple cards don't have the fp table; the laptops use DDC */
- /* The table is also missing on some x86 IGPs */
-#ifndef __powerpc__
- NV_ERROR(drm, "Pointer to flat panel table invalid\n");
-#endif
+ /* Most laptop cards lack an fp table. They use DDC. */
+ NV_DEBUG(drm, "Pointer to flat panel table invalid\n");
bios->digital_min_front_porch = 0x4b;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 196eb668d30d..70d8e0d69ad5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1147,8 +1147,6 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
return -ENODEV;
if (WARN_ON(msg->size > 16))
return -E2BIG;
- if (msg->size == 0)
- return msg->size;
ret = nvkm_i2c_aux_acquire(aux);
if (ret)
@@ -1186,6 +1184,7 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
case DCB_CONNECTOR_HDMI_0 :
case DCB_CONNECTOR_HDMI_1 :
case DCB_CONNECTOR_HDMI_C : return DRM_MODE_CONNECTOR_HDMIA;
+ case DCB_CONNECTOR_WFD : return DRM_MODE_CONNECTOR_VIRTUAL;
default:
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index b9a109be989c..2e7785f49e6d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -231,9 +231,30 @@ nouveau_framebuffer_new(struct drm_device *dev,
struct nouveau_bo *nvbo,
struct nouveau_framebuffer **pfb)
{
+ struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_framebuffer *fb;
int ret;
+ /* YUV overlays have special requirements pre-NV50 */
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
+
+ (mode_cmd->pixel_format == DRM_FORMAT_YUYV ||
+ mode_cmd->pixel_format == DRM_FORMAT_UYVY ||
+ mode_cmd->pixel_format == DRM_FORMAT_NV12 ||
+ mode_cmd->pixel_format == DRM_FORMAT_NV21) &&
+ (mode_cmd->pitches[0] & 0x3f || /* align 64 */
+ mode_cmd->pitches[0] >= 0x10000 || /* at most 64k pitch */
+ (mode_cmd->pitches[1] && /* pitches for planes must match */
+ mode_cmd->pitches[0] != mode_cmd->pitches[1]))) {
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_KMS("Unsuitable framebuffer: format: %s; pitches: 0x%x\n 0x%x\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name),
+ mode_cmd->pitches[0],
+ mode_cmd->pitches[1]);
+ return -EINVAL;
+ }
+
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
@@ -407,7 +428,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
- struct drm_crtc *crtc;
if (!suspend) {
if (drm_drv_uses_atomic_modeset(dev))
@@ -416,10 +436,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
drm_crtc_force_disable_all(dev);
}
- /* Make sure that drm and hw vblank irqs get properly disabled. */
- drm_for_each_crtc(crtc, dev)
- drm_crtc_vblank_off(crtc);
-
/* disable flip completion events */
nvif_notify_put(&drm->flip);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index df7e2037031a..595630d1fb9e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -585,18 +585,18 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
nouveau_led_suspend(dev);
if (dev->mode_config.num_crtc) {
- NV_INFO(drm, "suspending console...\n");
+ NV_DEBUG(drm, "suspending console...\n");
nouveau_fbcon_set_suspend(dev, 1);
- NV_INFO(drm, "suspending display...\n");
+ NV_DEBUG(drm, "suspending display...\n");
ret = nouveau_display_suspend(dev, runtime);
if (ret)
return ret;
}
- NV_INFO(drm, "evicting buffers...\n");
+ NV_DEBUG(drm, "evicting buffers...\n");
ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
- NV_INFO(drm, "waiting for kernel channels to go idle...\n");
+ NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");
if (drm->cechan) {
ret = nouveau_channel_idle(drm->cechan);
if (ret)
@@ -609,7 +609,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
goto fail_display;
}
- NV_INFO(drm, "suspending fence...\n");
+ NV_DEBUG(drm, "suspending fence...\n");
if (drm->fence && nouveau_fence(drm)->suspend) {
if (!nouveau_fence(drm)->suspend(drm)) {
ret = -ENOMEM;
@@ -617,7 +617,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
}
}
- NV_INFO(drm, "suspending object tree...\n");
+ NV_DEBUG(drm, "suspending object tree...\n");
ret = nvif_client_suspend(&drm->client.base);
if (ret)
goto fail_client;
@@ -630,7 +630,7 @@ fail_client:
fail_display:
if (dev->mode_config.num_crtc) {
- NV_INFO(drm, "resuming display...\n");
+ NV_DEBUG(drm, "resuming display...\n");
nouveau_display_resume(dev, runtime);
}
return ret;
@@ -641,19 +641,19 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- NV_INFO(drm, "resuming object tree...\n");
+ NV_DEBUG(drm, "resuming object tree...\n");
nvif_client_resume(&drm->client.base);
- NV_INFO(drm, "resuming fence...\n");
+ NV_DEBUG(drm, "resuming fence...\n");
if (drm->fence && nouveau_fence(drm)->resume)
nouveau_fence(drm)->resume(drm);
nouveau_run_vbios_init(dev);
if (dev->mode_config.num_crtc) {
- NV_INFO(drm, "resuming display...\n");
+ NV_DEBUG(drm, "resuming display...\n");
nouveau_display_resume(dev, runtime);
- NV_INFO(drm, "resuming console...\n");
+ NV_DEBUG(drm, "resuming console...\n");
nouveau_fbcon_set_suspend(dev, 0);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 999c35a25498..b0ad7fcefcf5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -179,7 +179,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
}
static void
-nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+nouveau_gart_manager_debug(struct ttm_mem_type_manager *man,
+ struct drm_printer *printer)
{
}
@@ -252,7 +253,8 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
}
static void
-nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+nv04_gart_manager_debug(struct ttm_mem_type_manager *man,
+ struct drm_printer *printer)
{
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 6dee4071bb3f..2dbf62a2ac41 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -3141,7 +3141,7 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
mstc->connector.funcs->reset(&mstc->connector);
nouveau_conn_attach_properties(&mstc->connector);
- for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto; i++)
+ for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
@@ -3658,15 +3658,24 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_mode_connector_attach_encoder(connector, encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
struct nvkm_i2c_aux *aux =
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
if (aux) {
- nv_encoder->i2c = &nv_connector->aux.ddc;
+ if (disp->disp->oclass < GF110_DISP) {
+ /* HW has no support for address-only
+ * transactions, so we're required to
+ * use custom I2C-over-AUX code.
+ */
+ nv_encoder->i2c = &aux->i2c;
+ } else {
+ nv_encoder->i2c = &nv_connector->aux.ddc;
+ }
nv_encoder->aux = aux;
}
/*TODO: Use DP Info Table to check for support. */
- if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) {
+ if (disp->disp->oclass >= GF110_DISP) {
ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
nv_connector->base.base.id,
&nv_encoder->dp.mstm);
@@ -3888,7 +3897,7 @@ static void
nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
- struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc_state *new_crtc_state, *old_crtc_state;
struct drm_crtc *crtc;
struct drm_plane_state *new_plane_state;
struct drm_plane *plane;
@@ -3909,12 +3918,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
mutex_lock(&disp->mutex);
/* Disable head(s). */
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_head *head = nv50_head(crtc);
NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
asyh->clr.mask, asyh->set.mask);
+ if (old_crtc_state->active && !new_crtc_state->active)
+ drm_crtc_vblank_off(crtc);
if (asyh->clr.mask) {
nv50_head_flush_clr(head, asyh, atom->flush_disable);
@@ -3989,7 +4000,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
}
/* Update head(s). */
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_head *head = nv50_head(crtc);
@@ -4000,11 +4011,13 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
nv50_head_flush_set(head, asyh);
interlock_core = 1;
}
- }
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (new_crtc_state->event)
- drm_crtc_vblank_get(crtc);
+ if (new_crtc_state->active) {
+ if (!old_crtc_state->active)
+ drm_crtc_vblank_on(crtc);
+ if (new_crtc_state->event)
+ drm_crtc_vblank_get(crtc);
+ }
}
/* Update plane(s). */
@@ -4051,12 +4064,15 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
if (new_crtc_state->event) {
unsigned long flags;
/* Get correct count/ts if racing with vblank irq */
- drm_crtc_accurate_vblank_count(crtc);
+ if (new_crtc_state->active)
+ drm_crtc_accurate_vblank_count(crtc);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
new_crtc_state->event = NULL;
- drm_crtc_vblank_put(crtc);
+ if (new_crtc_state->active)
+ drm_crtc_vblank_put(crtc);
}
}
@@ -4435,11 +4451,13 @@ nv50_display_create(struct drm_device *dev)
/* create crtc objects to represent the hw heads */
if (disp->disp->oclass >= GF110_DISP)
- crtcs = nvif_rd32(&device->object, 0x022448);
+ crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
else
- crtcs = 2;
+ crtcs = 0x3;
- for (i = 0; i < crtcs; i++) {
+ for (i = 0; i < fls(crtcs); i++) {
+ if (!(crtcs & (1 << i)))
+ continue;
ret = nv50_head_create(dev, i);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7bdc7a5ae723..e096a5d9c292 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2043,6 +2043,7 @@ nv120_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
+ .therm = gm200_therm_new,
.secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
@@ -2077,6 +2078,7 @@ nv124_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
+ .therm = gm200_therm_new,
.secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
@@ -2111,6 +2113,7 @@ nv126_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
+ .therm = gm200_therm_new,
.secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
@@ -2321,6 +2324,35 @@ nv137_chipset = {
};
static const struct nvkm_device_chip
+nv138_chipset = {
+ .name = "GP108",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp102_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp102_ce_new,
+ .ce[1] = gp102_ce_new,
+ .ce[2] = gp102_ce_new,
+ .ce[3] = gp102_ce_new,
+ .disp = gp102_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gp100_fifo_new,
+};
+
+static const struct nvkm_device_chip
nv13b_chipset = {
.name = "GP10B",
.bar = gk20a_bar_new,
@@ -2782,6 +2814,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x134: device->chip = &nv134_chipset; break;
case 0x136: device->chip = &nv136_chipset; break;
case 0x137: device->chip = &nv137_chipset; break;
+ case 0x138: device->chip = &nv138_chipset; break;
case 0x13b: device->chip = &nv13b_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index c7c84d34d97e..93a75e5b2791 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
/* Create output path objects for each VBIOS display path. */
i = -1;
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
+ if (ver < 0x40) /* No support for chipsets prior to NV50. */
+ break;
if (dcbE.type == DCB_OUTPUT_UNUSED)
continue;
if (dcbE.type == DCB_OUTPUT_EOL)
@@ -283,6 +285,10 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
case DCB_OUTPUT_DP:
ret = nvkm_dp_new(disp, i, &dcbE, &outp);
break;
+ case DCB_OUTPUT_WFD:
+ /* No support for WFD yet. */
+ ret = -ENODEV;
+ continue;
default:
nvkm_warn(subdev, "dcb %d type %d unknown\n",
i, dcbE.type);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
index b33552757647..9fd7ae331308 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
@@ -92,5 +92,8 @@ gf119_head = {
int
gf119_head_new(struct nvkm_disp *disp, int id)
{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ if (!(nvkm_rd32(device, 0x612004) & (0x00000001 << id)))
+ return 0;
return nvkm_head_new_(&gf119_head, disp, id);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index a24312fb0228..a1e8bf48b778 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -22,6 +22,7 @@ struct nvkm_ior {
unsigned proto_evo:4;
enum nvkm_ior_proto {
CRT,
+ TV,
TMDS,
LVDS,
DP,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index 19c635663399..6ea19466f436 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -22,7 +22,7 @@ struct nv50_disp {
u8 type[3];
} pior;
- struct nv50_disp_chan *chan[17];
+ struct nv50_disp_chan *chan[21];
};
void nv50_disp_super_1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index 85aff85394ac..be9e7f8c3b23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -62,6 +62,7 @@ nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type)
case 0:
switch (outp->info.type) {
case DCB_OUTPUT_ANALOG: *type = DAC; return CRT;
+ case DCB_OUTPUT_TV : *type = DAC; return TV;
case DCB_OUTPUT_TMDS : *type = SOR; return TMDS;
case DCB_OUTPUT_LVDS : *type = SOR; return LVDS;
case DCB_OUTPUT_DP : *type = SOR; return DP;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index 8a8895246d26..7fea7d45202f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -124,6 +124,8 @@ nv31_mpeg_tile(struct nvkm_engine *engine, int i, struct nvkm_fb_tile *tile)
static bool
nv31_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
{
+ struct nv31_mpeg *mpeg = nv31_mpeg(device->mpeg);
+ struct nvkm_subdev *subdev = &mpeg->engine.subdev;
u32 inst = data << 4;
u32 dma0 = nvkm_rd32(device, 0x700000 + inst);
u32 dma1 = nvkm_rd32(device, 0x700004 + inst);
@@ -132,8 +134,11 @@ nv31_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
u32 size = dma1 + 1;
/* only allow linear DMA objects */
- if (!(dma0 & 0x00002000))
+ if (!(dma0 & 0x00002000)) {
+ nvkm_error(subdev, "inst %08x dma0 %08x dma1 %08x dma2 %08x\n",
+ inst, dma0, dma1, dma2);
return false;
+ }
if (mthd == 0x0190) {
/* DMA_CMD */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
index 16de5bd94b14..b5ec7c504dc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
@@ -31,6 +31,8 @@ bool
nv40_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
{
struct nvkm_instmem *imem = device->imem;
+ struct nv31_mpeg *mpeg = nv31_mpeg(device->mpeg);
+ struct nvkm_subdev *subdev = &mpeg->engine.subdev;
u32 inst = data << 4;
u32 dma0 = nvkm_instmem_rd32(imem, inst + 0);
u32 dma1 = nvkm_instmem_rd32(imem, inst + 4);
@@ -39,8 +41,11 @@ nv40_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
u32 size = dma1 + 1;
/* only allow linear DMA objects */
- if (!(dma0 & 0x00002000))
+ if (!(dma0 & 0x00002000)) {
+ nvkm_error(subdev, "inst %08x dma0 %08x dma1 %08x dma2 %08x\n",
+ inst, dma0, dma1, dma2);
return false;
+ }
if (mthd == 0x0190) {
/* DMA_CMD */
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
index d45d7947a964..77273b53672c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
@@ -251,7 +251,7 @@ cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd,
struct nvkm_msgqueue_queue *queue)
{
const struct nvkm_subdev *subdev = priv->falcon->owner;
- static unsigned long timeout = ~0;
+ static unsigned timeout = 2000;
unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
int ret = -EAGAIN;
bool commit = true;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index c794b2c2d21e..676c167c95b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -24,6 +24,7 @@
#include "gf100.h"
#include <core/gpuobj.h>
+#include <core/option.h>
#include <subdev/fb.h>
#include <subdev/mmu.h>
@@ -59,6 +60,8 @@ gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
return ret;
bar_len = device->func->resource_size(device, bar_nr);
+ if (bar_nr == 3 && bar->bar2_halve)
+ bar_len >>= 1;
ret = nvkm_vm_new(device, 0, bar_len, 0, key, &vm);
if (ret)
@@ -129,7 +132,9 @@ gf100_bar_init(struct nvkm_bar *base)
if (bar->bar[0].mem) {
addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
- nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
+ if (bar->bar2_halve)
+ addr |= 0x40000000;
+ nvkm_wr32(device, 0x001714, 0x80000000 | addr);
}
return 0;
@@ -161,6 +166,7 @@ gf100_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
return -ENOMEM;
nvkm_bar_ctor(func, device, index, &bar->base);
+ bar->bar2_halve = nvkm_boolopt(device->cfgopt, "NvBar2Halve", false);
*pbar = &bar->base;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
index f7dea69640d8..20a5255362ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -11,6 +11,7 @@ struct gf100_bar_vm {
struct gf100_bar {
struct nvkm_bar base;
+ bool bar2_halve;
struct gf100_bar_vm bar[2];
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 3841ad6be99e..a239e73562c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -60,12 +60,12 @@ gf100_fb_oneinit(struct nvkm_fb *base)
size = min(size, 0x1000);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->base.mmu_rd);
+ true, &fb->base.mmu_rd);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->base.mmu_wr);
+ true, &fb->base.mmu_wr);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index 48f01e40b8fc..b768e66a472b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -25,6 +25,7 @@ nvkm-y += nvkm/subdev/i2c/bit.o
nvkm-y += nvkm/subdev/i2c/aux.o
nvkm-y += nvkm/subdev/i2c/auxg94.o
+nvkm-y += nvkm/subdev/i2c/auxgf119.o
nvkm-y += nvkm/subdev/i2c/auxgm200.o
nvkm-y += nvkm/subdev/i2c/anx9805.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index d172e42dd228..4c1f547da463 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -117,6 +117,10 @@ int
nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
u32 addr, u8 *data, u8 *size)
{
+ if (!*size && !aux->func->address_only) {
+ AUX_ERR(aux, "address-only transaction dropped");
+ return -ENOSYS;
+ }
return aux->func->xfer(aux, retry, type, addr, data, size);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 27a4a39c87f0..9587ab456d9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -3,6 +3,7 @@
#include "pad.h"
struct nvkm_i2c_aux_func {
+ bool address_only;
int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 *size);
int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
@@ -17,7 +18,12 @@ void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 *size);
+int g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
+ int, u8, struct nvkm_i2c_aux **);
+
int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+int g94_i2c_aux_xfer(struct nvkm_i2c_aux *, bool, u8, u32, u8 *, u8 *);
+int gf119_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
#define AUX_MSG(b,l,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index ab8cb196c34e..c8ab1b5741a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -72,7 +72,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux)
return 0;
}
-static int
+int
g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
@@ -105,9 +105,9 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
}
ctrl = nvkm_rd32(device, 0x00e4e4 + base);
- ctrl &= ~0x0001f0ff;
+ ctrl &= ~0x0001f1ff;
ctrl |= type << 12;
- ctrl |= *size - 1;
+ ctrl |= (*size ? (*size - 1) : 0x00000100);
nvkm_wr32(device, 0x00e4e0 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
@@ -160,14 +160,10 @@ out:
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
}
-static const struct nvkm_i2c_aux_func
-g94_i2c_aux_func = {
- .xfer = g94_i2c_aux_xfer,
-};
-
int
-g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
- struct nvkm_i2c_aux **paux)
+g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
+ struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
{
struct g94_i2c_aux *aux;
@@ -175,8 +171,20 @@ g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
return -ENOMEM;
*paux = &aux->base;
- nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base);
+ nvkm_i2c_aux_ctor(func, pad, index, &aux->base);
aux->ch = drive;
aux->base.intr = 1 << aux->ch;
return 0;
}
+
+static const struct nvkm_i2c_aux_func
+g94_i2c_aux = {
+ .xfer = g94_i2c_aux_xfer,
+};
+
+int
+g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
+{
+ return g94_i2c_aux_new_(&g94_i2c_aux, pad, index, drive, paux);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
new file mode 100644
index 000000000000..dab40cd8fe3a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "aux.h"
+
+static const struct nvkm_i2c_aux_func
+gf119_i2c_aux = {
+ .address_only = true,
+ .xfer = g94_i2c_aux_xfer,
+};
+
+int
+gf119_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
+{
+ return g94_i2c_aux_new_(&gf119_i2c_aux, pad, index, drive, paux);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index ee091fa79628..7ef60895f43a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -105,9 +105,9 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
}
ctrl = nvkm_rd32(device, 0x00d954 + base);
- ctrl &= ~0x0001f0ff;
+ ctrl &= ~0x0001f1ff;
ctrl |= type << 12;
- ctrl |= *size - 1;
+ ctrl |= (*size ? (*size - 1) : 0x00000100);
nvkm_wr32(device, 0x00d950 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
@@ -162,6 +162,7 @@ out:
static const struct nvkm_i2c_aux_func
gm200_i2c_aux_func = {
+ .address_only = true,
.xfer = gm200_i2c_aux_xfer,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
index d53212f1aa52..3bc4d0310076 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
@@ -28,7 +28,7 @@
static const struct nvkm_i2c_pad_func
gf119_i2c_pad_s_func = {
.bus_new_4 = gf119_i2c_bus_new,
- .aux_new_6 = g94_i2c_aux_new,
+ .aux_new_6 = gf119_i2c_aux_new,
.mode = g94_i2c_pad_mode,
};
@@ -41,7 +41,7 @@ gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
static const struct nvkm_i2c_pad_func
gf119_i2c_pad_x_func = {
.bus_new_4 = gf119_i2c_bus_new,
- .aux_new_6 = g94_i2c_aux_new,
+ .aux_new_6 = gf119_i2c_aux_new,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index d2c4d6033abb..f93766418056 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -27,6 +27,7 @@ static const struct nvkm_mc_map
gf100_mc_reset[] = {
{ 0x00020000, NVKM_ENGINE_MSPDEC },
{ 0x00008000, NVKM_ENGINE_MSVLD },
+ { 0x00002000, NVKM_SUBDEV_PMU, true },
{ 0x00001000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
{ 0x00000080, NVKM_ENGINE_CE1 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index eb9b278198b2..a4cb82495cee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -192,6 +192,10 @@ nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
}
}
+#ifdef __BIG_ENDIAN
+ pci->msi = false;
+#endif
+
pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
if (pci->msi && func->msi_rearm) {
pci->msi = pci_enable_msi(pci->pdev) == 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index 3306f9fe7140..ce70a193caa7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -75,7 +75,7 @@ nvkm_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
- if (!(nvkm_rd32(device, 0x000200) & 0x00002000))
+ if (!pmu->func->enabled(pmu))
return 0;
/* Inhibit interrupts, and wait for idle. */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 0e36d4cb7201..0b458656e870 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -24,13 +24,30 @@
#include "priv.h"
#include "fuc/gf100.fuc3.h"
+#include <subdev/mc.h>
+
+void
+gf100_pmu_reset(struct nvkm_pmu *pmu)
+{
+ struct nvkm_device *device = pmu->subdev.device;
+ nvkm_mc_disable(device, NVKM_SUBDEV_PMU);
+ nvkm_mc_enable(device, NVKM_SUBDEV_PMU);
+}
+
+bool
+gf100_pmu_enabled(struct nvkm_pmu *pmu)
+{
+ return nvkm_mc_enabled(pmu->subdev.device, NVKM_SUBDEV_PMU);
+}
+
static const struct nvkm_pmu_func
gf100_pmu = {
.code.data = gf100_pmu_code,
.code.size = sizeof(gf100_pmu_code),
.data.data = gf100_pmu_data,
.data.size = sizeof(gf100_pmu_data),
- .reset = gt215_pmu_reset,
+ .enabled = gf100_pmu_enabled,
+ .reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index 0e4ba4248b15..3dfa79d4fb13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -30,7 +30,8 @@ gf119_pmu = {
.code.size = sizeof(gf119_pmu_code),
.data.data = gf119_pmu_data,
.data.size = sizeof(gf119_pmu_data),
- .reset = gt215_pmu_reset,
+ .enabled = gf100_pmu_enabled,
+ .reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 2ad858d825ac..8f7ec10fd2a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -109,7 +109,8 @@ gk104_pmu = {
.code.size = sizeof(gk104_pmu_code),
.data.data = gk104_pmu_data,
.data.size = sizeof(gk104_pmu_data),
- .reset = gt215_pmu_reset,
+ .enabled = gf100_pmu_enabled,
+ .reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index fc4b8ecfdaeb..345741d55a56 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -88,7 +88,8 @@ gk110_pmu = {
.code.size = sizeof(gk110_pmu_code),
.data.data = gk110_pmu_data,
.data.size = sizeof(gk110_pmu_data),
- .reset = gt215_pmu_reset,
+ .enabled = gf100_pmu_enabled,
+ .reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index e9a91277683a..e4acf7876ea1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -30,7 +30,8 @@ gk208_pmu = {
.code.size = sizeof(gk208_pmu_code),
.data.data = gk208_pmu_data,
.data.size = sizeof(gk208_pmu_data),
- .reset = gt215_pmu_reset,
+ .enabled = gf100_pmu_enabled,
+ .reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 978aae3c1001..05e81855c367 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -196,9 +196,10 @@ gk20a_dvfs_data= {
static const struct nvkm_pmu_func
gk20a_pmu = {
+ .enabled = gf100_pmu_enabled,
.init = gk20a_pmu_init,
.fini = gk20a_pmu_fini,
- .reset = gt215_pmu_reset,
+ .reset = gf100_pmu_reset,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
index 9a248ed75f09..459df1ef9e70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -32,7 +32,8 @@ gm107_pmu = {
.code.size = sizeof(gm107_pmu_code),
.data.data = gm107_pmu_data,
.data.size = sizeof(gm107_pmu_data),
- .reset = gt215_pmu_reset,
+ .enabled = gf100_pmu_enabled,
+ .reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index 44bef22bce52..31c843145c7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -38,6 +38,7 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu)
static const struct nvkm_pmu_func
gm20b_pmu = {
+ .enabled = gf100_pmu_enabled,
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
index 6c41c20c85a7..e210cd6af816 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
@@ -25,7 +25,8 @@
static const struct nvkm_pmu_func
gp100_pmu = {
- .reset = gt215_pmu_reset,
+ .enabled = gf100_pmu_enabled,
+ .reset = gf100_pmu_reset,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index f017352206c9..98c7a2a8afc4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -31,8 +31,15 @@ gp102_pmu_reset(struct nvkm_pmu *pmu)
nvkm_mask(device, 0x10a3c0, 0x00000001, 0x00000000);
}
+static bool
+gp102_pmu_enabled(struct nvkm_pmu *pmu)
+{
+ return !(nvkm_rd32(pmu->subdev.device, 0x10a3c0) & 0x00000001);
+}
+
static const struct nvkm_pmu_func
gp102_pmu = {
+ .enabled = gp102_pmu_enabled,
.reset = gp102_pmu_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 90d428b3be97..e04216daea58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -180,13 +180,19 @@ gt215_pmu_fini(struct nvkm_pmu *pmu)
nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060);
}
-void
+static void
gt215_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
- nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
- nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
- nvkm_rd32(device, 0x000200);
+ nvkm_mask(device, 0x022210, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x022210, 0x00000001, 0x00000001);
+ nvkm_rd32(device, 0x022210);
+}
+
+static bool
+gt215_pmu_enabled(struct nvkm_pmu *pmu)
+{
+ return nvkm_rd32(pmu->subdev.device, 0x022210) & 0x00000001;
}
int
@@ -241,6 +247,7 @@ gt215_pmu = {
.code.size = sizeof(gt215_pmu_code),
.data.data = gt215_pmu_data,
.data.size = sizeof(gt215_pmu_data),
+ .enabled = gt215_pmu_enabled,
.reset = gt215_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 096cba069f72..a4c48a10cd47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -20,6 +20,7 @@ struct nvkm_pmu_func {
u32 size;
} data;
+ bool (*enabled)(struct nvkm_pmu *);
void (*reset)(struct nvkm_pmu *);
int (*init)(struct nvkm_pmu *);
void (*fini)(struct nvkm_pmu *);
@@ -30,12 +31,14 @@ struct nvkm_pmu_func {
void (*pgob)(struct nvkm_pmu *, bool);
};
-void gt215_pmu_reset(struct nvkm_pmu *);
int gt215_pmu_init(struct nvkm_pmu *);
void gt215_pmu_fini(struct nvkm_pmu *);
void gt215_pmu_intr(struct nvkm_pmu *);
void gt215_pmu_recv(struct nvkm_pmu *);
int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
+bool gf100_pmu_enabled(struct nvkm_pmu *);
+void gf100_pmu_reset(struct nvkm_pmu *);
+
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
index 135758ba3e28..2bafcc1d1818 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -11,3 +11,4 @@ nvkm-y += nvkm/subdev/therm/g84.o
nvkm-y += nvkm/subdev/therm/gt215.o
nvkm-y += nvkm/subdev/therm/gf119.o
nvkm-y += nvkm/subdev/therm/gm107.o
+nvkm-y += nvkm/subdev/therm/gm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
index 86e81930d8ee..96f8da40ac82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
@@ -203,7 +203,7 @@ g84_therm_fini(struct nvkm_therm *therm)
nvkm_wr32(device, 0x1100, 0x10000); /* PBUS */
}
-static void
+void
g84_therm_init(struct nvkm_therm *therm)
{
g84_sensor_setup(therm);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c
new file mode 100644
index 000000000000..73dc78093d5d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include "priv.h"
+
+static const struct nvkm_therm_func
+gm200_therm = {
+ .init = g84_therm_init,
+ .fini = g84_therm_fini,
+ .temp_get = g84_temp_get,
+ .program_alarms = nvkm_therm_program_alarms_polling,
+};
+
+int
+gm200_therm_new(struct nvkm_device *device, int index,
+ struct nvkm_therm **ptherm)
+{
+ return nvkm_therm_new_(&gm200_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
index 235a5d8daff6..1f46e371d7c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
@@ -111,6 +111,7 @@ void g84_therm_fini(struct nvkm_therm *);
int gt215_therm_fan_sense(struct nvkm_therm *);
+void g84_therm_init(struct nvkm_therm *);
void gf119_therm_init(struct nvkm_therm *);
int nvkm_fanpwm_create(struct nvkm_therm *, struct dcb_gpio_func *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
index e93b2410c38b..ddb2b2c600ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
@@ -83,7 +83,7 @@ nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs,
{
struct nvkm_subdev *subdev = &therm->subdev;
bool active;
- const char *thresolds[] = {
+ static const char * const thresholds[] = {
"fanboost", "downclock", "critical", "shutdown"
};
int temperature = therm->func->temp_get(therm);
@@ -94,10 +94,10 @@ nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs,
if (dir == NVKM_THERM_THRS_FALLING)
nvkm_info(subdev,
"temperature (%i C) went below the '%s' threshold\n",
- temperature, thresolds[thrs]);
+ temperature, thresholds[thrs]);
else
nvkm_info(subdev, "temperature (%i C) hit the '%s' threshold\n",
- temperature, thresolds[thrs]);
+ temperature, thresholds[thrs]);
active = (dir == NVKM_THERM_THRS_RISING);
switch (thrs) {
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index e1fa143a5625..542a76503fbd 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -198,6 +198,9 @@ static int tvc_probe(struct platform_device *pdev)
struct omap_dss_device *dssdev;
int r;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 79cb69f1acf5..d9d25df6fc1b 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/mutex.h>
#include <drm/drm_edid.h>
@@ -37,6 +38,10 @@ static const struct videomode hdmic_default_vm = {
struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
+ void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
+ void *hpd_cb_data;
+ bool hpd_enabled;
+ struct mutex hpd_lock;
struct device *dev;
@@ -167,6 +172,70 @@ static bool hdmic_detect(struct omap_dss_device *dssdev)
return in->ops.hdmi->detect(in);
}
+static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
+ enum drm_connector_status status),
+ void *cb_data)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (gpio_is_valid(ddata->hpd_gpio)) {
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = cb;
+ ddata->hpd_cb_data = cb_data;
+ mutex_unlock(&ddata->hpd_lock);
+ return 0;
+ } else if (in->ops.hdmi->register_hpd_cb) {
+ return in->ops.hdmi->register_hpd_cb(in, cb, cb_data);
+ }
+
+ return -ENOTSUPP;
+}
+
+static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (gpio_is_valid(ddata->hpd_gpio)) {
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = NULL;
+ ddata->hpd_cb_data = NULL;
+ mutex_unlock(&ddata->hpd_lock);
+ } else if (in->ops.hdmi->unregister_hpd_cb) {
+ in->ops.hdmi->unregister_hpd_cb(in);
+ }
+}
+
+static void hdmic_enable_hpd(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (gpio_is_valid(ddata->hpd_gpio)) {
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_enabled = true;
+ mutex_unlock(&ddata->hpd_lock);
+ } else if (in->ops.hdmi->enable_hpd) {
+ in->ops.hdmi->enable_hpd(in);
+ }
+}
+
+static void hdmic_disable_hpd(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (gpio_is_valid(ddata->hpd_gpio)) {
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_enabled = false;
+ mutex_unlock(&ddata->hpd_lock);
+ } else if (in->ops.hdmi->disable_hpd) {
+ in->ops.hdmi->disable_hpd(in);
+ }
+}
+
static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
@@ -197,10 +266,34 @@ static struct omap_dss_driver hdmic_driver = {
.read_edid = hdmic_read_edid,
.detect = hdmic_detect,
+ .register_hpd_cb = hdmic_register_hpd_cb,
+ .unregister_hpd_cb = hdmic_unregister_hpd_cb,
+ .enable_hpd = hdmic_enable_hpd,
+ .disable_hpd = hdmic_disable_hpd,
.set_hdmi_mode = hdmic_set_hdmi_mode,
.set_hdmi_infoframe = hdmic_set_infoframe,
};
+static irqreturn_t hdmic_hpd_isr(int irq, void *data)
+{
+ struct panel_drv_data *ddata = data;
+
+ mutex_lock(&ddata->hpd_lock);
+ if (ddata->hpd_enabled && ddata->hpd_cb) {
+ enum drm_connector_status status;
+
+ if (hdmic_detect(&ddata->dssdev))
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ ddata->hpd_cb(ddata->hpd_cb_data, status);
+ }
+ mutex_unlock(&ddata->hpd_lock);
+
+ return IRQ_HANDLED;
+}
+
static int hdmic_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -246,11 +339,22 @@ static int hdmic_probe(struct platform_device *pdev)
if (r)
return r;
+ mutex_init(&ddata->hpd_lock);
+
if (gpio_is_valid(ddata->hpd_gpio)) {
r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
GPIOF_DIR_IN, "hdmi_hpd");
if (r)
goto err_reg;
+
+ r = devm_request_threaded_irq(&pdev->dev,
+ gpio_to_irq(ddata->hpd_gpio),
+ NULL, hdmic_hpd_isr,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "hdmic hpd", ddata);
+ if (r)
+ goto err_reg;
}
ddata->vm = hdmic_default_vm;
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 58276a48112e..a9e9d667c55e 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -15,12 +15,17 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
+#include <linux/mutex.h>
#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
+ void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
+ void *hpd_cb_data;
+ bool hpd_enabled;
+ struct mutex hpd_lock;
struct gpio_desc *ct_cp_hpd_gpio;
struct gpio_desc *ls_oe_gpio;
@@ -162,6 +167,49 @@ static bool tpd_detect(struct omap_dss_device *dssdev)
return gpiod_get_value_cansleep(ddata->hpd_gpio);
}
+static int tpd_register_hpd_cb(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
+ enum drm_connector_status status),
+ void *cb_data)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = cb;
+ ddata->hpd_cb_data = cb_data;
+ mutex_unlock(&ddata->hpd_lock);
+
+ return 0;
+}
+
+static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = NULL;
+ ddata->hpd_cb_data = NULL;
+ mutex_unlock(&ddata->hpd_lock);
+}
+
+static void tpd_enable_hpd(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_enabled = true;
+ mutex_unlock(&ddata->hpd_lock);
+}
+
+static void tpd_disable_hpd(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_enabled = false;
+ mutex_unlock(&ddata->hpd_lock);
+}
+
static int tpd_set_infoframe(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi)
{
@@ -193,10 +241,34 @@ static const struct omapdss_hdmi_ops tpd_hdmi_ops = {
.read_edid = tpd_read_edid,
.detect = tpd_detect,
+ .register_hpd_cb = tpd_register_hpd_cb,
+ .unregister_hpd_cb = tpd_unregister_hpd_cb,
+ .enable_hpd = tpd_enable_hpd,
+ .disable_hpd = tpd_disable_hpd,
.set_infoframe = tpd_set_infoframe,
.set_hdmi_mode = tpd_set_hdmi_mode,
};
+static irqreturn_t tpd_hpd_isr(int irq, void *data)
+{
+ struct panel_drv_data *ddata = data;
+
+ mutex_lock(&ddata->hpd_lock);
+ if (ddata->hpd_enabled && ddata->hpd_cb) {
+ enum drm_connector_status status;
+
+ if (tpd_detect(&ddata->dssdev))
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ ddata->hpd_cb(ddata->hpd_cb_data, status);
+ }
+ mutex_unlock(&ddata->hpd_lock);
+
+ return IRQ_HANDLED;
+}
+
static int tpd_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -261,6 +333,15 @@ static int tpd_probe(struct platform_device *pdev)
ddata->hpd_gpio = gpio;
+ mutex_init(&ddata->hpd_lock);
+
+ r = devm_request_threaded_irq(&pdev->dev, gpiod_to_irq(ddata->hpd_gpio),
+ NULL, tpd_hpd_isr,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "tpd12s015 hpd", ddata);
+ if (r)
+ goto err_gpio;
+
dssdev = &ddata->dssdev;
dssdev->ops.hdmi = &tpd_hdmi_ops;
dssdev->dev = &pdev->dev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 6468a765f3d1..e065f7e10cca 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -231,6 +231,9 @@ static int panel_dpi_probe(struct platform_device *pdev)
struct omap_dss_device *dssdev;
int r;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (ddata == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 76787a75a4dc..92c556ac22c7 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -554,7 +554,7 @@ static struct attribute *dsicm_attrs[] = {
NULL,
};
-static struct attribute_group dsicm_attr_group = {
+static const struct attribute_group dsicm_attr_group = {
.attrs = dsicm_attrs,
};
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index c90474afaebd..74d13969b9ca 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -19,7 +19,7 @@
#include "../dss/omapdss.h"
-static struct videomode lb035q02_vm = {
+static const struct videomode lb035q02_vm = {
.hactive = 320,
.vactive = 240,
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 346aefdb015f..8e5bff4e5226 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -503,7 +503,7 @@ static struct attribute *bldev_attrs[] = {
NULL,
};
-static struct attribute_group bldev_attr_group = {
+static const struct attribute_group bldev_attr_group = {
.attrs = bldev_attrs,
};
@@ -720,6 +720,9 @@ static int acx565akm_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "%s\n", __func__);
+ if (!spi->dev.of_node)
+ return -ENODEV;
+
spi->mode = SPI_MODE_3;
ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index cbf4c67c4933..0a38a0e8c925 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -40,7 +40,7 @@ struct panel_drv_data {
struct spi_device *spi_dev;
};
-static struct videomode td028ttec1_panel_vm = {
+static const struct videomode td028ttec1_panel_vm = {
.hactive = 480,
.vactive = 640,
.pixelclock = 22153000,
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 20c6d8fe215a..ac4a6d4d134c 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -282,7 +282,7 @@ static struct attribute *tpo_td043_attrs[] = {
NULL,
};
-static struct attribute_group tpo_td043_attr_group = {
+static const struct attribute_group tpo_td043_attr_group = {
.attrs = tpo_td043_attrs,
};
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 688195e448c5..142ce5a02542 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -5,7 +5,7 @@ omapdss-base-y := base.o display.o dss-of.o output.o
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
# Core DSS files
-omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o \
+omapdss-y := core.o dss.o dispc.o dispc_coefs.o \
pll.o video-pll.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index bdce4bfdf6e0..197ddbc1512b 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -24,182 +24,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/seq_file.h>
-#include <linux/debugfs.h>
-#include <linux/io.h>
-#include <linux/device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/suspend.h>
-#include <linux/slab.h>
#include "omapdss.h"
#include "dss.h"
-#include "dss_features.h"
-
-static struct {
- struct platform_device *pdev;
-} core;
-
-enum omapdss_version omapdss_get_version(void)
-{
- struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
- return pdata->version;
-}
-EXPORT_SYMBOL(omapdss_get_version);
-
-int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask)
-{
- struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
-
- if (!board_data->dsi_enable_pads)
- return -ENOENT;
-
- return board_data->dsi_enable_pads(dsi_id, lane_mask);
-}
-
-void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask)
-{
- struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
-
- if (!board_data->dsi_disable_pads)
- return;
-
- return board_data->dsi_disable_pads(dsi_id, lane_mask);
-}
-
-int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
-{
- struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
-
- if (pdata->set_min_bus_tput)
- return pdata->set_min_bus_tput(dev, tput);
- else
- return 0;
-}
-
-#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
-static int dss_debug_show(struct seq_file *s, void *unused)
-{
- void (*func)(struct seq_file *) = s->private;
- func(s);
- return 0;
-}
-
-static int dss_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dss_debug_show, inode->i_private);
-}
-
-static const struct file_operations dss_debug_fops = {
- .open = dss_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static struct dentry *dss_debugfs_dir;
-
-static int dss_initialize_debugfs(void)
-{
- dss_debugfs_dir = debugfs_create_dir("omapdss", NULL);
- if (IS_ERR(dss_debugfs_dir)) {
- int err = PTR_ERR(dss_debugfs_dir);
- dss_debugfs_dir = NULL;
- return err;
- }
-
- debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
- &dss_debug_dump_clocks, &dss_debug_fops);
-
- return 0;
-}
-
-static void dss_uninitialize_debugfs(void)
-{
- if (dss_debugfs_dir)
- debugfs_remove_recursive(dss_debugfs_dir);
-}
-
-int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
-{
- struct dentry *d;
-
- d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
- write, &dss_debug_fops);
-
- return PTR_ERR_OR_ZERO(d);
-}
-#else /* CONFIG_OMAP2_DSS_DEBUGFS */
-static inline int dss_initialize_debugfs(void)
-{
- return 0;
-}
-static inline void dss_uninitialize_debugfs(void)
-{
-}
-int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
-{
- return 0;
-}
-#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
-
-/* PLATFORM DEVICE */
-
-static void dss_disable_all_devices(void)
-{
- struct omap_dss_device *dssdev = NULL;
-
- for_each_dss_dev(dssdev) {
- if (!dssdev->driver)
- continue;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- dssdev->driver->disable(dssdev);
- }
-}
-
-static int __init omap_dss_probe(struct platform_device *pdev)
-{
- int r;
-
- core.pdev = pdev;
-
- dss_features_init(omapdss_get_version());
-
- r = dss_initialize_debugfs();
- if (r)
- goto err_debugfs;
-
- return 0;
-
-err_debugfs:
-
- return r;
-}
-
-static int omap_dss_remove(struct platform_device *pdev)
-{
- dss_uninitialize_debugfs();
-
- return 0;
-}
-
-static void omap_dss_shutdown(struct platform_device *pdev)
-{
- DSSDBG("shutdown\n");
- dss_disable_all_devices();
-}
-
-static struct platform_driver omap_dss_driver = {
- .remove = omap_dss_remove,
- .shutdown = omap_dss_shutdown,
- .driver = {
- .name = "omapdss",
- },
-};
/* INIT */
static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
@@ -236,21 +64,25 @@ static void (*dss_output_drv_unreg_funcs[])(void) = {
dss_uninit_platform_driver,
};
+static struct platform_device *omap_drm_device;
+
static int __init omap_dss_init(void)
{
int r;
int i;
- r = platform_driver_probe(&omap_dss_driver, omap_dss_probe);
- if (r)
- return r;
-
for (i = 0; i < ARRAY_SIZE(dss_output_drv_reg_funcs); ++i) {
r = dss_output_drv_reg_funcs[i]();
if (r)
goto err_reg;
}
+ omap_drm_device = platform_device_register_simple("omapdrm", 0, NULL, 0);
+ if (IS_ERR(omap_drm_device)) {
+ r = PTR_ERR(omap_drm_device);
+ goto err_reg;
+ }
+
return 0;
err_reg:
@@ -259,8 +91,6 @@ err_reg:
++i)
dss_output_drv_unreg_funcs[i]();
- platform_driver_unregister(&omap_dss_driver);
-
return r;
}
@@ -268,10 +98,10 @@ static void __exit omap_dss_exit(void)
{
int i;
+ platform_device_unregister(omap_drm_device);
+
for (i = 0; i < ARRAY_SIZE(dss_output_drv_unreg_funcs); ++i)
dss_output_drv_unreg_funcs[i]();
-
- platform_driver_unregister(&omap_dss_driver);
}
module_init(omap_dss_init);
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index fd7504b37e3b..0f4fdb221498 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -39,13 +39,14 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/component.h>
+#include <linux/sys_soc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_blend.h>
#include "omapdss.h"
#include "dss.h"
-#include "dss_features.h"
#include "dispc.h"
/* DISPC */
@@ -63,6 +64,33 @@ enum omap_burst_size {
#define REG_FLD_MOD(idx, val, start, end) \
dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end))
+/* DISPC has feature id */
+enum dispc_feature_id {
+ FEAT_LCDENABLEPOL,
+ FEAT_LCDENABLESIGNAL,
+ FEAT_PCKFREEENABLE,
+ FEAT_FUNCGATED,
+ FEAT_MGR_LCD2,
+ FEAT_MGR_LCD3,
+ FEAT_LINEBUFFERSPLIT,
+ FEAT_ROWREPEATENABLE,
+ FEAT_RESIZECONF,
+ /* Independent core clk divider */
+ FEAT_CORE_CLK_DIV,
+ FEAT_HANDLE_UV_SEPARATE,
+ FEAT_ATTR2,
+ FEAT_CPR,
+ FEAT_PRELOAD,
+ FEAT_FIR_COEF_V,
+ FEAT_ALPHA_FIXED_ZORDER,
+ FEAT_ALPHA_FREE_ZORDER,
+ FEAT_FIFO_MERGE,
+ /* An unknown HW bug causing the normal FIFO thresholds not to work */
+ FEAT_OMAP3_DSI_FIFO_BUG,
+ FEAT_BURST_2D,
+ FEAT_MFLAG,
+};
+
struct dispc_features {
u8 sw_start;
u8 fp_start;
@@ -76,6 +104,9 @@ struct dispc_features {
u16 mgr_height_max;
unsigned long max_lcd_pclk;
unsigned long max_tv_pclk;
+ unsigned int max_downscale;
+ unsigned int max_line_width;
+ unsigned int min_pcd;
int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
@@ -86,6 +117,16 @@ struct dispc_features {
u16 width, u16 height, u16 out_width, u16 out_height,
bool mem_to_mem);
u8 num_fifos;
+ const enum dispc_feature_id *features;
+ unsigned int num_features;
+ const struct dss_reg_field *reg_fields;
+ const unsigned int num_reg_fields;
+ const enum omap_overlay_caps *overlay_caps;
+ const u32 **supported_color_modes;
+ unsigned int num_mgrs;
+ unsigned int num_ovls;
+ unsigned int buffer_size_unit;
+ unsigned int burst_size_unit;
/* swap GFX & WB fifos */
bool gfx_fifo_workaround:1;
@@ -180,6 +221,17 @@ enum mgr_reg_fields {
DISPC_MGR_FLD_NUM,
};
+/* DISPC register field id */
+enum dispc_feat_reg_field {
+ FEAT_REG_FIRHINC,
+ FEAT_REG_FIRVINC,
+ FEAT_REG_FIFOHIGHTHRESHOLD,
+ FEAT_REG_FIFOLOWTHRESHOLD,
+ FEAT_REG_FIFOSIZE,
+ FEAT_REG_HORIZONTALACCU,
+ FEAT_REG_VERTICALACCU,
+};
+
struct dispc_reg_field {
u16 reg;
u8 high;
@@ -343,6 +395,38 @@ static void mgr_fld_write(enum omap_channel channel,
spin_unlock_irqrestore(&dispc.control_lock, flags);
}
+static int dispc_get_num_ovls(void)
+{
+ return dispc.feat->num_ovls;
+}
+
+static int dispc_get_num_mgrs(void)
+{
+ return dispc.feat->num_mgrs;
+}
+
+static void dispc_get_reg_field(enum dispc_feat_reg_field id,
+ u8 *start, u8 *end)
+{
+ if (id >= dispc.feat->num_reg_fields)
+ BUG();
+
+ *start = dispc.feat->reg_fields[id].start;
+ *end = dispc.feat->reg_fields[id].end;
+}
+
+static bool dispc_has_feature(enum dispc_feature_id id)
+{
+ unsigned int i;
+
+ for (i = 0; i < dispc.feat->num_features; i++) {
+ if (dispc.feat->features[i] == id)
+ return true;
+ }
+
+ return false;
+}
+
#define SR(reg) \
dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
#define RR(reg) \
@@ -358,19 +442,19 @@ static void dispc_save_context(void)
SR(CONTROL);
SR(CONFIG);
SR(LINE_NUMBER);
- if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
+ if (dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
+ dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
SR(GLOBAL_ALPHA);
- if (dss_has_feature(FEAT_MGR_LCD2)) {
+ if (dispc_has_feature(FEAT_MGR_LCD2)) {
SR(CONTROL2);
SR(CONFIG2);
}
- if (dss_has_feature(FEAT_MGR_LCD3)) {
+ if (dispc_has_feature(FEAT_MGR_LCD3)) {
SR(CONTROL3);
SR(CONFIG3);
}
- for (i = 0; i < dss_feat_get_num_mgrs(); i++) {
+ for (i = 0; i < dispc_get_num_mgrs(); i++) {
SR(DEFAULT_COLOR(i));
SR(TRANS_COLOR(i));
SR(SIZE_MGR(i));
@@ -385,14 +469,14 @@ static void dispc_save_context(void)
SR(DATA_CYCLE2(i));
SR(DATA_CYCLE3(i));
- if (dss_has_feature(FEAT_CPR)) {
+ if (dispc_has_feature(FEAT_CPR)) {
SR(CPR_COEF_R(i));
SR(CPR_COEF_G(i));
SR(CPR_COEF_B(i));
}
}
- for (i = 0; i < dss_feat_get_num_ovls(); i++) {
+ for (i = 0; i < dispc_get_num_ovls(); i++) {
SR(OVL_BA0(i));
SR(OVL_BA1(i));
SR(OVL_POSITION(i));
@@ -401,7 +485,7 @@ static void dispc_save_context(void)
SR(OVL_FIFO_THRESHOLD(i));
SR(OVL_ROW_INC(i));
SR(OVL_PIXEL_INC(i));
- if (dss_has_feature(FEAT_PRELOAD))
+ if (dispc_has_feature(FEAT_PRELOAD))
SR(OVL_PRELOAD(i));
if (i == OMAP_DSS_GFX) {
SR(OVL_WINDOW_SKIP(i));
@@ -422,12 +506,12 @@ static void dispc_save_context(void)
for (j = 0; j < 5; j++)
SR(OVL_CONV_COEF(i, j));
- if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ if (dispc_has_feature(FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
SR(OVL_FIR_COEF_V(i, j));
}
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
SR(OVL_BA0_UV(i));
SR(OVL_BA1_UV(i));
SR(OVL_FIR2(i));
@@ -443,11 +527,11 @@ static void dispc_save_context(void)
for (j = 0; j < 8; j++)
SR(OVL_FIR_COEF_V2(i, j));
}
- if (dss_has_feature(FEAT_ATTR2))
+ if (dispc_has_feature(FEAT_ATTR2))
SR(OVL_ATTRIBUTES2(i));
}
- if (dss_has_feature(FEAT_CORE_CLK_DIV))
+ if (dispc_has_feature(FEAT_CORE_CLK_DIV))
SR(DIVISOR);
dispc.ctx_valid = true;
@@ -468,15 +552,15 @@ static void dispc_restore_context(void)
/*RR(CONTROL);*/
RR(CONFIG);
RR(LINE_NUMBER);
- if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
+ if (dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
+ dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
RR(GLOBAL_ALPHA);
- if (dss_has_feature(FEAT_MGR_LCD2))
+ if (dispc_has_feature(FEAT_MGR_LCD2))
RR(CONFIG2);
- if (dss_has_feature(FEAT_MGR_LCD3))
+ if (dispc_has_feature(FEAT_MGR_LCD3))
RR(CONFIG3);
- for (i = 0; i < dss_feat_get_num_mgrs(); i++) {
+ for (i = 0; i < dispc_get_num_mgrs(); i++) {
RR(DEFAULT_COLOR(i));
RR(TRANS_COLOR(i));
RR(SIZE_MGR(i));
@@ -491,14 +575,14 @@ static void dispc_restore_context(void)
RR(DATA_CYCLE2(i));
RR(DATA_CYCLE3(i));
- if (dss_has_feature(FEAT_CPR)) {
+ if (dispc_has_feature(FEAT_CPR)) {
RR(CPR_COEF_R(i));
RR(CPR_COEF_G(i));
RR(CPR_COEF_B(i));
}
}
- for (i = 0; i < dss_feat_get_num_ovls(); i++) {
+ for (i = 0; i < dispc_get_num_ovls(); i++) {
RR(OVL_BA0(i));
RR(OVL_BA1(i));
RR(OVL_POSITION(i));
@@ -507,7 +591,7 @@ static void dispc_restore_context(void)
RR(OVL_FIFO_THRESHOLD(i));
RR(OVL_ROW_INC(i));
RR(OVL_PIXEL_INC(i));
- if (dss_has_feature(FEAT_PRELOAD))
+ if (dispc_has_feature(FEAT_PRELOAD))
RR(OVL_PRELOAD(i));
if (i == OMAP_DSS_GFX) {
RR(OVL_WINDOW_SKIP(i));
@@ -528,12 +612,12 @@ static void dispc_restore_context(void)
for (j = 0; j < 5; j++)
RR(OVL_CONV_COEF(i, j));
- if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ if (dispc_has_feature(FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
RR(OVL_FIR_COEF_V(i, j));
}
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
RR(OVL_BA0_UV(i));
RR(OVL_BA1_UV(i));
RR(OVL_FIR2(i));
@@ -549,18 +633,18 @@ static void dispc_restore_context(void)
for (j = 0; j < 8; j++)
RR(OVL_FIR_COEF_V2(i, j));
}
- if (dss_has_feature(FEAT_ATTR2))
+ if (dispc_has_feature(FEAT_ATTR2))
RR(OVL_ATTRIBUTES2(i));
}
- if (dss_has_feature(FEAT_CORE_CLK_DIV))
+ if (dispc_has_feature(FEAT_CORE_CLK_DIV))
RR(DIVISOR);
/* enable last, because LCD & DIGIT enable are here */
RR(CONTROL);
- if (dss_has_feature(FEAT_MGR_LCD2))
+ if (dispc_has_feature(FEAT_MGR_LCD2))
RR(CONTROL2);
- if (dss_has_feature(FEAT_MGR_LCD3))
+ if (dispc_has_feature(FEAT_MGR_LCD3))
RR(CONTROL3);
/* clear spurious SYNC_LOST_DIGIT interrupts */
dispc_clear_irqstatus(DISPC_IRQ_SYNC_LOST_DIGIT);
@@ -779,7 +863,7 @@ static void dispc_ovl_write_color_conv_coef(enum omap_plane_id plane,
static void dispc_setup_color_conv_coef(void)
{
int i;
- int num_ovl = dss_feat_get_num_ovls();
+ int num_ovl = dispc_get_num_ovls();
const struct color_conv_coef ctbl_bt601_5_ovl = {
/* YUV -> RGB */
298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
@@ -868,10 +952,10 @@ static void dispc_ovl_enable_zorder_planes(void)
{
int i;
- if (!dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
+ if (!dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
return;
- for (i = 0; i < dss_feat_get_num_ovls(); i++)
+ for (i = 0; i < dispc_get_num_ovls(); i++)
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(i), 1, 25, 25);
}
@@ -994,7 +1078,7 @@ static bool format_is_yuv(u32 fourcc)
static void dispc_ovl_configure_burst_type(enum omap_plane_id plane,
enum omap_dss_rotation_type rotation_type)
{
- if (dss_has_feature(FEAT_BURST_2D) == 0)
+ if (dispc_has_feature(FEAT_BURST_2D) == 0)
return;
if (rotation_type == OMAP_DSS_ROT_TILER)
@@ -1025,7 +1109,7 @@ static void dispc_ovl_set_channel_out(enum omap_plane_id plane,
}
val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
- if (dss_has_feature(FEAT_MGR_LCD2)) {
+ if (dispc_has_feature(FEAT_MGR_LCD2)) {
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
chan = 0;
@@ -1040,7 +1124,7 @@ static void dispc_ovl_set_channel_out(enum omap_plane_id plane,
chan2 = 1;
break;
case OMAP_DSS_CHANNEL_LCD3:
- if (dss_has_feature(FEAT_MGR_LCD3)) {
+ if (dispc_has_feature(FEAT_MGR_LCD3)) {
chan = 0;
chan2 = 2;
} else {
@@ -1089,7 +1173,7 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane_id plane)
if (FLD_GET(val, shift, shift) == 1)
return OMAP_DSS_CHANNEL_DIGIT;
- if (!dss_has_feature(FEAT_MGR_LCD2))
+ if (!dispc_has_feature(FEAT_MGR_LCD2))
return OMAP_DSS_CHANNEL_LCD;
switch (FLD_GET(val, 31, 30)) {
@@ -1128,7 +1212,7 @@ static void dispc_configure_burst_sizes(void)
const int burst_size = BURST_SIZE_X8;
/* Configure burst size always to maximum size */
- for (i = 0; i < dss_feat_get_num_ovls(); ++i)
+ for (i = 0; i < dispc_get_num_ovls(); ++i)
dispc_ovl_set_burst_size(i, burst_size);
if (dispc.feat->has_writeback)
dispc_ovl_set_burst_size(OMAP_DSS_WB, burst_size);
@@ -1136,19 +1220,28 @@ static void dispc_configure_burst_sizes(void)
static u32 dispc_ovl_get_burst_size(enum omap_plane_id plane)
{
- unsigned unit = dss_feat_get_burst_size_unit();
/* burst multiplier is always x8 (see dispc_configure_burst_sizes()) */
- return unit * 8;
+ return dispc.feat->burst_size_unit * 8;
}
-static const u32 *dispc_ovl_get_color_modes(enum omap_plane_id plane)
+static bool dispc_ovl_color_mode_supported(enum omap_plane_id plane, u32 fourcc)
{
- return dss_feat_get_supported_color_modes(plane);
+ const u32 *modes;
+ unsigned int i;
+
+ modes = dispc.feat->supported_color_modes[plane];
+
+ for (i = 0; modes[i]; ++i) {
+ if (modes[i] == fourcc)
+ return true;
+ }
+
+ return false;
}
-static int dispc_get_num_ovls(void)
+static const u32 *dispc_ovl_get_color_modes(enum omap_plane_id plane)
{
- return dss_feat_get_num_ovls();
+ return dispc.feat->supported_color_modes[plane];
}
static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
@@ -1223,9 +1316,9 @@ static void dispc_init_fifos(void)
u32 unit;
int i;
- unit = dss_feat_get_buffer_size_unit();
+ unit = dispc.feat->buffer_size_unit;
- dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
+ dispc_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
for (fifo = 0; fifo < dispc.feat->num_fifos; ++fifo) {
size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(fifo), start, end);
@@ -1265,7 +1358,7 @@ static void dispc_init_fifos(void)
/*
* Setup default fifo thresholds.
*/
- for (i = 0; i < dss_feat_get_num_ovls(); ++i) {
+ for (i = 0; i < dispc_get_num_ovls(); ++i) {
u32 low, high;
const bool use_fifomerge = false;
const bool manual_update = false;
@@ -1307,7 +1400,7 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane_id plane, u32 low,
u8 hi_start, hi_end, lo_start, lo_end;
u32 unit;
- unit = dss_feat_get_buffer_size_unit();
+ unit = dispc.feat->buffer_size_unit;
WARN_ON(low % unit != 0);
WARN_ON(high % unit != 0);
@@ -1315,8 +1408,8 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane_id plane, u32 low,
low /= unit;
high /= unit;
- dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
- dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
+ dispc_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
+ dispc_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
DSSDBG("fifo(%d) threshold (bytes), old %u/%u, new %u/%u\n",
plane,
@@ -1335,14 +1428,14 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane_id plane, u32 low,
* large for the preload field, set the threshold to the maximum value
* that can be held by the preload register
*/
- if (dss_has_feature(FEAT_PRELOAD) && dispc.feat->set_max_preload &&
+ if (dispc_has_feature(FEAT_PRELOAD) && dispc.feat->set_max_preload &&
plane != OMAP_DSS_WB)
dispc_write_reg(DISPC_OVL_PRELOAD(plane), min(high, 0xfffu));
}
void dispc_enable_fifomerge(bool enable)
{
- if (!dss_has_feature(FEAT_FIFO_MERGE)) {
+ if (!dispc_has_feature(FEAT_FIFO_MERGE)) {
WARN_ON(enable);
return;
}
@@ -1360,7 +1453,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane_id plane,
* buffer_units, and the fifo thresholds must be buffer_unit aligned.
*/
- unsigned buf_unit = dss_feat_get_buffer_size_unit();
+ unsigned buf_unit = dispc.feat->buffer_size_unit;
unsigned ovl_fifo_size, total_fifo_size, burst_size;
int i;
@@ -1369,7 +1462,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane_id plane,
if (use_fifomerge) {
total_fifo_size = 0;
- for (i = 0; i < dss_feat_get_num_ovls(); ++i)
+ for (i = 0; i < dispc_get_num_ovls(); ++i)
total_fifo_size += dispc_ovl_get_fifo_size(i);
} else {
total_fifo_size = ovl_fifo_size;
@@ -1381,7 +1474,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane_id plane,
* combined fifo size
*/
- if (manual_update && dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
+ if (manual_update && dispc_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
*fifo_low = ovl_fifo_size - burst_size * 2;
*fifo_high = total_fifo_size - burst_size;
} else if (plane == OMAP_DSS_WB) {
@@ -1435,9 +1528,9 @@ static void dispc_init_mflag(void)
(1 << 0) | /* MFLAG_CTRL = force always on */
(0 << 2)); /* MFLAG_START = disable */
- for (i = 0; i < dss_feat_get_num_ovls(); ++i) {
+ for (i = 0; i < dispc_get_num_ovls(); ++i) {
u32 size = dispc_ovl_get_fifo_size(i);
- u32 unit = dss_feat_get_buffer_size_unit();
+ u32 unit = dispc.feat->buffer_size_unit;
u32 low, high;
dispc_ovl_set_mflag(i, true);
@@ -1456,7 +1549,7 @@ static void dispc_init_mflag(void)
if (dispc.feat->has_writeback) {
u32 size = dispc_ovl_get_fifo_size(OMAP_DSS_WB);
- u32 unit = dss_feat_get_buffer_size_unit();
+ u32 unit = dispc.feat->buffer_size_unit;
u32 low, high;
dispc_ovl_set_mflag(OMAP_DSS_WB, true);
@@ -1483,10 +1576,8 @@ static void dispc_ovl_set_fir(enum omap_plane_id plane,
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
u8 hinc_start, hinc_end, vinc_start, vinc_end;
- dss_feat_get_reg_field(FEAT_REG_FIRHINC,
- &hinc_start, &hinc_end);
- dss_feat_get_reg_field(FEAT_REG_FIRVINC,
- &vinc_start, &vinc_end);
+ dispc_get_reg_field(FEAT_REG_FIRHINC, &hinc_start, &hinc_end);
+ dispc_get_reg_field(FEAT_REG_FIRVINC, &vinc_start, &vinc_end);
val = FLD_VAL(vinc, vinc_start, vinc_end) |
FLD_VAL(hinc, hinc_start, hinc_end);
@@ -1503,8 +1594,8 @@ static void dispc_ovl_set_vid_accu0(enum omap_plane_id plane, int haccu,
u32 val;
u8 hor_start, hor_end, vert_start, vert_end;
- dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
- dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
+ dispc_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
+ dispc_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
val = FLD_VAL(vaccu, vert_start, vert_end) |
FLD_VAL(haccu, hor_start, hor_end);
@@ -1518,8 +1609,8 @@ static void dispc_ovl_set_vid_accu1(enum omap_plane_id plane, int haccu,
u32 val;
u8 hor_start, hor_end, vert_start, vert_end;
- dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
- dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
+ dispc_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
+ dispc_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
val = FLD_VAL(vaccu, vert_start, vert_end) |
FLD_VAL(haccu, hor_start, hor_end);
@@ -1671,14 +1762,14 @@ static void dispc_ovl_set_scaling_common(enum omap_plane_id plane,
l |= five_taps ? (1 << 21) : 0;
/* VRESIZECONF and HRESIZECONF */
- if (dss_has_feature(FEAT_RESIZECONF)) {
+ if (dispc_has_feature(FEAT_RESIZECONF)) {
l &= ~(0x3 << 7);
l |= (orig_width <= out_width) ? 0 : (1 << 7);
l |= (orig_height <= out_height) ? 0 : (1 << 8);
}
/* LINEBUFFERSPLIT */
- if (dss_has_feature(FEAT_LINEBUFFERSPLIT)) {
+ if (dispc_has_feature(FEAT_LINEBUFFERSPLIT)) {
l &= ~(0x1 << 22);
l |= five_taps ? (1 << 22) : 0;
}
@@ -1713,7 +1804,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane_id plane,
int scale_y = out_height != orig_height;
bool chroma_upscale = plane != OMAP_DSS_WB;
- if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
+ if (!dispc_has_feature(FEAT_HANDLE_UV_SEPARATE))
return;
if (!format_is_yuv(fourcc)) {
@@ -1860,11 +1951,11 @@ static void dispc_ovl_set_rotation_attrs(enum omap_plane_id plane, u8 rotation,
vidrot = 1;
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), vidrot, 13, 12);
- if (dss_has_feature(FEAT_ROWREPEATENABLE))
+ if (dispc_has_feature(FEAT_ROWREPEATENABLE))
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane),
row_repeat ? 1 : 0, 18, 18);
- if (dss_feat_color_mode_supported(plane, DRM_FORMAT_NV12)) {
+ if (dispc_ovl_color_mode_supported(plane, DRM_FORMAT_NV12)) {
bool doublestride =
fourcc == DRM_FORMAT_NV12 &&
rotation_type == OMAP_DSS_ROT_TILER &&
@@ -2118,8 +2209,7 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
int error;
u16 in_width, in_height;
int min_factor = min(*decim_x, *decim_y);
- const int maxsinglelinewidth =
- dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
+ const int maxsinglelinewidth = dispc.feat->max_line_width;
*five_taps = false;
@@ -2163,8 +2253,7 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
{
int error;
u16 in_width, in_height;
- const int maxsinglelinewidth =
- dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
+ const int maxsinglelinewidth = dispc.feat->max_line_width;
do {
in_height = height / *decim_y;
@@ -2249,9 +2338,8 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
u16 in_width, in_width_max;
int decim_x_min = *decim_x;
u16 in_height = height / *decim_y;
- const int maxsinglelinewidth =
- dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
- const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
+ const int maxsinglelinewidth = dispc.feat->max_line_width;
+ const int maxdownscale = dispc.feat->max_downscale;
if (mem_to_mem) {
in_width_max = out_width * maxdownscale;
@@ -2311,7 +2399,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
int *x_predecim, int *y_predecim, u16 pos_x,
enum omap_dss_rotation_type rotation_type, bool mem_to_mem)
{
- const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
+ const int maxdownscale = dispc.feat->max_downscale;
const int max_decim_limit = 16;
unsigned long core_clk = 0;
int decim_x, decim_y, ret;
@@ -2332,7 +2420,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
} else {
*x_predecim = max_decim_limit;
*y_predecim = (rotation_type == OMAP_DSS_ROT_TILER &&
- dss_has_feature(FEAT_BURST_2D)) ?
+ dispc_has_feature(FEAT_BURST_2D)) ?
2 : max_decim_limit;
}
@@ -2428,7 +2516,7 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
out_height);
}
- if (!dss_feat_color_mode_supported(plane, fourcc))
+ if (!dispc_ovl_color_mode_supported(plane, fourcc))
return -EINVAL;
r = dispc_ovl_calc_scaling(pclk, lclk, caps, vm, in_width,
@@ -2549,7 +2637,7 @@ static int dispc_ovl_setup(enum omap_plane_id plane,
enum omap_channel channel)
{
int r;
- enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane);
+ enum omap_overlay_caps caps = dispc.feat->overlay_caps[plane];
const bool replication = true;
DSSDBG("dispc_ovl_setup %d, pa %pad, pa_uv %pad, sw %d, %d,%d, %dx%d ->"
@@ -2647,12 +2735,12 @@ static int dispc_ovl_enable(enum omap_plane_id plane, bool enable)
static enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channel)
{
- return dss_feat_get_supported_outputs(channel);
+ return dss_get_supported_outputs(channel);
}
static void dispc_lcd_enable_signal_polarity(bool act_high)
{
- if (!dss_has_feature(FEAT_LCDENABLEPOL))
+ if (!dispc_has_feature(FEAT_LCDENABLEPOL))
return;
REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
@@ -2660,7 +2748,7 @@ static void dispc_lcd_enable_signal_polarity(bool act_high)
void dispc_lcd_enable_signal(bool enable)
{
- if (!dss_has_feature(FEAT_LCDENABLESIGNAL))
+ if (!dispc_has_feature(FEAT_LCDENABLESIGNAL))
return;
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
@@ -2668,17 +2756,12 @@ void dispc_lcd_enable_signal(bool enable)
void dispc_pck_free_enable(bool enable)
{
- if (!dss_has_feature(FEAT_PCKFREEENABLE))
+ if (!dispc_has_feature(FEAT_PCKFREEENABLE))
return;
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
}
-static int dispc_get_num_mgrs(void)
-{
- return dss_feat_get_num_mgrs();
-}
-
static void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable)
{
mgr_fld_write(channel, DISPC_MGR_FLD_FIFOHANDCHECK, enable);
@@ -2718,7 +2801,7 @@ static void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable)
static void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch,
bool enable)
{
- if (!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
+ if (!dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER))
return;
if (ch == OMAP_DSS_CHANNEL_LCD)
@@ -2735,7 +2818,7 @@ static void dispc_mgr_setup(enum omap_channel channel,
dispc_mgr_enable_trans_key(channel, info->trans_enabled);
dispc_mgr_enable_alpha_fixed_zorder(channel,
info->partial_alpha_enabled);
- if (dss_has_feature(FEAT_CPR)) {
+ if (dispc_has_feature(FEAT_CPR)) {
dispc_mgr_enable_cpr(channel, info->cpr_enable);
dispc_mgr_set_cpr_coef(channel, &info->cpr_coefs);
}
@@ -3013,7 +3096,7 @@ static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
dispc_write_reg(DISPC_DIVISORo(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
- if (!dss_has_feature(FEAT_CORE_CLK_DIV) &&
+ if (!dispc_has_feature(FEAT_CORE_CLK_DIV) &&
channel == OMAP_DSS_CHANNEL_LCD)
dispc.core_clk_rate = dispc_fclk_rate() / lck_div;
}
@@ -3168,7 +3251,7 @@ void dispc_dump_clocks(struct seq_file *s)
seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
- if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
+ if (dispc_has_feature(FEAT_CORE_CLK_DIV)) {
seq_printf(s, "- DISPC-CORE-CLK -\n");
l = dispc_read_reg(DISPC_DIVISOR);
lcd = FLD_GET(l, 23, 16);
@@ -3179,9 +3262,9 @@ void dispc_dump_clocks(struct seq_file *s)
dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD);
- if (dss_has_feature(FEAT_MGR_LCD2))
+ if (dispc_has_feature(FEAT_MGR_LCD2))
dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD2);
- if (dss_has_feature(FEAT_MGR_LCD3))
+ if (dispc_has_feature(FEAT_MGR_LCD3))
dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD3);
dispc_runtime_put();
@@ -3221,18 +3304,18 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_CAPABLE);
DUMPREG(DISPC_LINE_STATUS);
DUMPREG(DISPC_LINE_NUMBER);
- if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
+ if (dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
+ dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
DUMPREG(DISPC_GLOBAL_ALPHA);
- if (dss_has_feature(FEAT_MGR_LCD2)) {
+ if (dispc_has_feature(FEAT_MGR_LCD2)) {
DUMPREG(DISPC_CONTROL2);
DUMPREG(DISPC_CONFIG2);
}
- if (dss_has_feature(FEAT_MGR_LCD3)) {
+ if (dispc_has_feature(FEAT_MGR_LCD3)) {
DUMPREG(DISPC_CONTROL3);
DUMPREG(DISPC_CONFIG3);
}
- if (dss_has_feature(FEAT_MFLAG))
+ if (dispc_has_feature(FEAT_MFLAG))
DUMPREG(DISPC_GLOBAL_MFLAG_ATTRIBUTE);
#undef DUMPREG
@@ -3245,7 +3328,7 @@ static void dispc_dump_regs(struct seq_file *s)
p_names = mgr_names;
/* DISPC channel specific registers */
- for (i = 0; i < dss_feat_get_num_mgrs(); i++) {
+ for (i = 0; i < dispc_get_num_mgrs(); i++) {
DUMPREG(i, DISPC_DEFAULT_COLOR);
DUMPREG(i, DISPC_TRANS_COLOR);
DUMPREG(i, DISPC_SIZE_MGR);
@@ -3262,7 +3345,7 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(i, DISPC_DATA_CYCLE2);
DUMPREG(i, DISPC_DATA_CYCLE3);
- if (dss_has_feature(FEAT_CPR)) {
+ if (dispc_has_feature(FEAT_CPR)) {
DUMPREG(i, DISPC_CPR_COEF_R);
DUMPREG(i, DISPC_CPR_COEF_G);
DUMPREG(i, DISPC_CPR_COEF_B);
@@ -3271,7 +3354,7 @@ static void dispc_dump_regs(struct seq_file *s)
p_names = ovl_names;
- for (i = 0; i < dss_feat_get_num_ovls(); i++) {
+ for (i = 0; i < dispc_get_num_ovls(); i++) {
DUMPREG(i, DISPC_OVL_BA0);
DUMPREG(i, DISPC_OVL_BA1);
DUMPREG(i, DISPC_OVL_POSITION);
@@ -3282,9 +3365,9 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(i, DISPC_OVL_ROW_INC);
DUMPREG(i, DISPC_OVL_PIXEL_INC);
- if (dss_has_feature(FEAT_PRELOAD))
+ if (dispc_has_feature(FEAT_PRELOAD))
DUMPREG(i, DISPC_OVL_PRELOAD);
- if (dss_has_feature(FEAT_MFLAG))
+ if (dispc_has_feature(FEAT_MFLAG))
DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
if (i == OMAP_DSS_GFX) {
@@ -3297,14 +3380,14 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(i, DISPC_OVL_PICTURE_SIZE);
DUMPREG(i, DISPC_OVL_ACCU0);
DUMPREG(i, DISPC_OVL_ACCU1);
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
DUMPREG(i, DISPC_OVL_BA0_UV);
DUMPREG(i, DISPC_OVL_BA1_UV);
DUMPREG(i, DISPC_OVL_FIR2);
DUMPREG(i, DISPC_OVL_ACCU2_0);
DUMPREG(i, DISPC_OVL_ACCU2_1);
}
- if (dss_has_feature(FEAT_ATTR2))
+ if (dispc_has_feature(FEAT_ATTR2))
DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
}
@@ -3319,21 +3402,21 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(i, DISPC_OVL_ROW_INC);
DUMPREG(i, DISPC_OVL_PIXEL_INC);
- if (dss_has_feature(FEAT_MFLAG))
+ if (dispc_has_feature(FEAT_MFLAG))
DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
DUMPREG(i, DISPC_OVL_FIR);
DUMPREG(i, DISPC_OVL_PICTURE_SIZE);
DUMPREG(i, DISPC_OVL_ACCU0);
DUMPREG(i, DISPC_OVL_ACCU1);
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
DUMPREG(i, DISPC_OVL_BA0_UV);
DUMPREG(i, DISPC_OVL_BA1_UV);
DUMPREG(i, DISPC_OVL_FIR2);
DUMPREG(i, DISPC_OVL_ACCU2_0);
DUMPREG(i, DISPC_OVL_ACCU2_1);
}
- if (dss_has_feature(FEAT_ATTR2))
+ if (dispc_has_feature(FEAT_ATTR2))
DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
}
@@ -3349,7 +3432,7 @@ static void dispc_dump_regs(struct seq_file *s)
/* Video pipeline coefficient registers */
/* start from OMAP_DSS_VIDEO1 */
- for (i = 1; i < dss_feat_get_num_ovls(); i++) {
+ for (i = 1; i < dispc_get_num_ovls(); i++) {
for (j = 0; j < 8; j++)
DUMPREG(i, DISPC_OVL_FIR_COEF_H, j);
@@ -3359,12 +3442,12 @@ static void dispc_dump_regs(struct seq_file *s)
for (j = 0; j < 5; j++)
DUMPREG(i, DISPC_OVL_CONV_COEF, j);
- if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ if (dispc_has_feature(FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
DUMPREG(i, DISPC_OVL_FIR_COEF_V, j);
}
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
for (j = 0; j < 8; j++)
DUMPREG(i, DISPC_OVL_FIR_COEF_H2, j);
@@ -3397,7 +3480,7 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
return 0;
}
-bool dispc_div_calc(unsigned long dispc,
+bool dispc_div_calc(unsigned long dispc_freq,
unsigned long pck_min, unsigned long pck_max,
dispc_div_calc_func func, void *data)
{
@@ -3415,19 +3498,19 @@ bool dispc_div_calc(unsigned long dispc,
min_fck_per_pck = 0;
#endif
- pckd_hw_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD);
- pckd_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD);
+ pckd_hw_min = dispc.feat->min_pcd;
+ pckd_hw_max = 255;
- lck_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+ lck_max = dss_get_max_fck_rate();
pck_min = pck_min ? pck_min : 1;
pck_max = pck_max ? pck_max : ULONG_MAX;
- lckd_start = max(DIV_ROUND_UP(dispc, lck_max), 1ul);
- lckd_stop = min(dispc / pck_min, 255ul);
+ lckd_start = max(DIV_ROUND_UP(dispc_freq, lck_max), 1ul);
+ lckd_stop = min(dispc_freq / pck_min, 255ul);
for (lckd = lckd_start; lckd <= lckd_stop; ++lckd) {
- lck = dispc / lckd;
+ lck = dispc_freq / lckd;
pckd_start = max(DIV_ROUND_UP(lck, pck_max), pckd_hw_min);
pckd_stop = min(lck / pck_min, pckd_hw_max);
@@ -3441,7 +3524,7 @@ bool dispc_div_calc(unsigned long dispc,
* also. Thus we need to use the calculated lck. For
* OMAP4+ the DISPC fclk is a separate clock.
*/
- if (dss_has_feature(FEAT_CORE_CLK_DIV))
+ if (dispc_has_feature(FEAT_CORE_CLK_DIV))
fck = dispc_core_clk_rate();
else
fck = lck;
@@ -3556,10 +3639,10 @@ static void dispc_restore_gamma_tables(void)
dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_DIGIT);
- if (dss_has_feature(FEAT_MGR_LCD2))
+ if (dispc_has_feature(FEAT_MGR_LCD2))
dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD2);
- if (dss_has_feature(FEAT_MGR_LCD3))
+ if (dispc_has_feature(FEAT_MGR_LCD3))
dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD3);
}
@@ -3627,11 +3710,11 @@ static int dispc_init_gamma_tables(void)
u32 *gt;
if (channel == OMAP_DSS_CHANNEL_LCD2 &&
- !dss_has_feature(FEAT_MGR_LCD2))
+ !dispc_has_feature(FEAT_MGR_LCD2))
continue;
if (channel == OMAP_DSS_CHANNEL_LCD3 &&
- !dss_has_feature(FEAT_MGR_LCD3))
+ !dispc_has_feature(FEAT_MGR_LCD3))
continue;
gt = devm_kmalloc_array(&dispc.pdev->dev, gdesc->len,
@@ -3651,7 +3734,7 @@ static void _omap_dispc_initial_config(void)
u32 l;
/* Exclusively enable DISPC_CORE_CLK and set divider to 1 */
- if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
+ if (dispc_has_feature(FEAT_CORE_CLK_DIV)) {
l = dispc_read_reg(DISPC_DIVISOR);
/* Use DISPC_DIVISOR.LCD, instead of DISPC_DIVISOR1.LCD */
l = FLD_MOD(l, 1, 0, 0);
@@ -3669,7 +3752,7 @@ static void _omap_dispc_initial_config(void)
* func-clock auto-gating. For newer versions
* (dispc.feat->has_gamma_table) this enables tv-out gamma tables.
*/
- if (dss_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table)
+ if (dispc_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table)
REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
dispc_setup_color_conv_coef();
@@ -3685,10 +3768,272 @@ static void _omap_dispc_initial_config(void)
if (dispc.feat->mstandby_workaround)
REG_FLD_MOD(DISPC_MSTANDBY_CTRL, 1, 0, 0);
- if (dss_has_feature(FEAT_MFLAG))
+ if (dispc_has_feature(FEAT_MFLAG))
dispc_init_mflag();
}
+static const enum dispc_feature_id omap2_dispc_features_list[] = {
+ FEAT_LCDENABLEPOL,
+ FEAT_LCDENABLESIGNAL,
+ FEAT_PCKFREEENABLE,
+ FEAT_FUNCGATED,
+ FEAT_ROWREPEATENABLE,
+ FEAT_RESIZECONF,
+};
+
+static const enum dispc_feature_id omap3_dispc_features_list[] = {
+ FEAT_LCDENABLEPOL,
+ FEAT_LCDENABLESIGNAL,
+ FEAT_PCKFREEENABLE,
+ FEAT_FUNCGATED,
+ FEAT_LINEBUFFERSPLIT,
+ FEAT_ROWREPEATENABLE,
+ FEAT_RESIZECONF,
+ FEAT_CPR,
+ FEAT_PRELOAD,
+ FEAT_FIR_COEF_V,
+ FEAT_ALPHA_FIXED_ZORDER,
+ FEAT_FIFO_MERGE,
+ FEAT_OMAP3_DSI_FIFO_BUG,
+};
+
+static const enum dispc_feature_id am43xx_dispc_features_list[] = {
+ FEAT_LCDENABLEPOL,
+ FEAT_LCDENABLESIGNAL,
+ FEAT_PCKFREEENABLE,
+ FEAT_FUNCGATED,
+ FEAT_LINEBUFFERSPLIT,
+ FEAT_ROWREPEATENABLE,
+ FEAT_RESIZECONF,
+ FEAT_CPR,
+ FEAT_PRELOAD,
+ FEAT_FIR_COEF_V,
+ FEAT_ALPHA_FIXED_ZORDER,
+ FEAT_FIFO_MERGE,
+};
+
+static const enum dispc_feature_id omap4_dispc_features_list[] = {
+ FEAT_MGR_LCD2,
+ FEAT_CORE_CLK_DIV,
+ FEAT_HANDLE_UV_SEPARATE,
+ FEAT_ATTR2,
+ FEAT_CPR,
+ FEAT_PRELOAD,
+ FEAT_FIR_COEF_V,
+ FEAT_ALPHA_FREE_ZORDER,
+ FEAT_FIFO_MERGE,
+ FEAT_BURST_2D,
+};
+
+static const enum dispc_feature_id omap5_dispc_features_list[] = {
+ FEAT_MGR_LCD2,
+ FEAT_MGR_LCD3,
+ FEAT_CORE_CLK_DIV,
+ FEAT_HANDLE_UV_SEPARATE,
+ FEAT_ATTR2,
+ FEAT_CPR,
+ FEAT_PRELOAD,
+ FEAT_FIR_COEF_V,
+ FEAT_ALPHA_FREE_ZORDER,
+ FEAT_FIFO_MERGE,
+ FEAT_BURST_2D,
+ FEAT_MFLAG,
+};
+
+static const struct dss_reg_field omap2_dispc_reg_fields[] = {
+ [FEAT_REG_FIRHINC] = { 11, 0 },
+ [FEAT_REG_FIRVINC] = { 27, 16 },
+ [FEAT_REG_FIFOLOWTHRESHOLD] = { 8, 0 },
+ [FEAT_REG_FIFOHIGHTHRESHOLD] = { 24, 16 },
+ [FEAT_REG_FIFOSIZE] = { 8, 0 },
+ [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
+ [FEAT_REG_VERTICALACCU] = { 25, 16 },
+};
+
+static const struct dss_reg_field omap3_dispc_reg_fields[] = {
+ [FEAT_REG_FIRHINC] = { 12, 0 },
+ [FEAT_REG_FIRVINC] = { 28, 16 },
+ [FEAT_REG_FIFOLOWTHRESHOLD] = { 11, 0 },
+ [FEAT_REG_FIFOHIGHTHRESHOLD] = { 27, 16 },
+ [FEAT_REG_FIFOSIZE] = { 10, 0 },
+ [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
+ [FEAT_REG_VERTICALACCU] = { 25, 16 },
+};
+
+static const struct dss_reg_field omap4_dispc_reg_fields[] = {
+ [FEAT_REG_FIRHINC] = { 12, 0 },
+ [FEAT_REG_FIRVINC] = { 28, 16 },
+ [FEAT_REG_FIFOLOWTHRESHOLD] = { 15, 0 },
+ [FEAT_REG_FIFOHIGHTHRESHOLD] = { 31, 16 },
+ [FEAT_REG_FIFOSIZE] = { 15, 0 },
+ [FEAT_REG_HORIZONTALACCU] = { 10, 0 },
+ [FEAT_REG_VERTICALACCU] = { 26, 16 },
+};
+
+static const enum omap_overlay_caps omap2_dispc_overlay_caps[] = {
+ /* OMAP_DSS_GFX */
+ OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO1 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
+ OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
+ OMAP_DSS_OVL_CAP_REPLICATION,
+};
+
+static const enum omap_overlay_caps omap3430_dispc_overlay_caps[] = {
+ /* OMAP_DSS_GFX */
+ OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_POS |
+ OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO1 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
+ OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
+ OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
+};
+
+static const enum omap_overlay_caps omap3630_dispc_overlay_caps[] = {
+ /* OMAP_DSS_GFX */
+ OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA |
+ OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO1 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
+ OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
+ OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_POS |
+ OMAP_DSS_OVL_CAP_REPLICATION,
+};
+
+static const enum omap_overlay_caps omap4_dispc_overlay_caps[] = {
+ /* OMAP_DSS_GFX */
+ OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA |
+ OMAP_DSS_OVL_CAP_ZORDER | OMAP_DSS_OVL_CAP_POS |
+ OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO1 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
+ OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
+ OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
+ OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
+ OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO3 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
+ OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
+ OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
+};
+
+#define COLOR_ARRAY(arr...) (const u32[]) { arr, 0 }
+
+static const u32 *omap2_dispc_supported_color_modes[] = {
+
+ /* OMAP_DSS_GFX */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB888),
+
+ /* OMAP_DSS_VIDEO1 */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY),
+
+ /* OMAP_DSS_VIDEO2 */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY),
+};
+
+static const u32 *omap3_dispc_supported_color_modes[] = {
+ /* OMAP_DSS_GFX */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888),
+
+ /* OMAP_DSS_VIDEO1 */
+ COLOR_ARRAY(
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_RGB565,
+ DRM_FORMAT_YUYV, DRM_FORMAT_UYVY),
+
+ /* OMAP_DSS_VIDEO2 */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888),
+};
+
+static const u32 *omap4_dispc_supported_color_modes[] = {
+ /* OMAP_DSS_GFX */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_ARGB1555, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB1555),
+
+ /* OMAP_DSS_VIDEO1 */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
+ DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBX8888),
+
+ /* OMAP_DSS_VIDEO2 */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
+ DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBX8888),
+
+ /* OMAP_DSS_VIDEO3 */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
+ DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBX8888),
+
+ /* OMAP_DSS_WB */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
+ DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBX8888),
+};
+
static const struct dispc_features omap24xx_dispc_feats = {
.sw_start = 5,
.fp_start = 15,
@@ -3701,9 +4046,26 @@ static const struct dispc_features omap24xx_dispc_feats = {
.mgr_width_max = 2048,
.mgr_height_max = 2048,
.max_lcd_pclk = 66500000,
+ .max_downscale = 2,
+ /*
+ * Assume the line width buffer to be 768 pixels as OMAP2 DISPC scaler
+ * cannot scale an image width larger than 768.
+ */
+ .max_line_width = 768,
+ .min_pcd = 2,
.calc_scaling = dispc_ovl_calc_scaling_24xx,
.calc_core_clk = calc_core_clk_24xx,
.num_fifos = 3,
+ .features = omap2_dispc_features_list,
+ .num_features = ARRAY_SIZE(omap2_dispc_features_list),
+ .reg_fields = omap2_dispc_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap2_dispc_reg_fields),
+ .overlay_caps = omap2_dispc_overlay_caps,
+ .supported_color_modes = omap2_dispc_supported_color_modes,
+ .num_mgrs = 2,
+ .num_ovls = 3,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
.no_framedone_tv = true,
.set_max_preload = false,
.last_pixel_inc_missing = true,
@@ -3722,9 +4084,22 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats = {
.mgr_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
+ .max_downscale = 4,
+ .max_line_width = 1024,
+ .min_pcd = 1,
.calc_scaling = dispc_ovl_calc_scaling_34xx,
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
+ .features = omap3_dispc_features_list,
+ .num_features = ARRAY_SIZE(omap3_dispc_features_list),
+ .reg_fields = omap3_dispc_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
+ .overlay_caps = omap3430_dispc_overlay_caps,
+ .supported_color_modes = omap3_dispc_supported_color_modes,
+ .num_mgrs = 2,
+ .num_ovls = 3,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
.no_framedone_tv = true,
.set_max_preload = false,
.last_pixel_inc_missing = true,
@@ -3743,9 +4118,90 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats = {
.mgr_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
+ .max_downscale = 4,
+ .max_line_width = 1024,
+ .min_pcd = 1,
+ .calc_scaling = dispc_ovl_calc_scaling_34xx,
+ .calc_core_clk = calc_core_clk_34xx,
+ .num_fifos = 3,
+ .features = omap3_dispc_features_list,
+ .num_features = ARRAY_SIZE(omap3_dispc_features_list),
+ .reg_fields = omap3_dispc_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
+ .overlay_caps = omap3430_dispc_overlay_caps,
+ .supported_color_modes = omap3_dispc_supported_color_modes,
+ .num_mgrs = 2,
+ .num_ovls = 3,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
+ .no_framedone_tv = true,
+ .set_max_preload = false,
+ .last_pixel_inc_missing = true,
+};
+
+static const struct dispc_features omap36xx_dispc_feats = {
+ .sw_start = 7,
+ .fp_start = 19,
+ .bp_start = 31,
+ .sw_max = 256,
+ .vp_max = 4095,
+ .hp_max = 4096,
+ .mgr_width_start = 10,
+ .mgr_height_start = 26,
+ .mgr_width_max = 2048,
+ .mgr_height_max = 2048,
+ .max_lcd_pclk = 173000000,
+ .max_tv_pclk = 59000000,
+ .max_downscale = 4,
+ .max_line_width = 1024,
+ .min_pcd = 1,
.calc_scaling = dispc_ovl_calc_scaling_34xx,
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
+ .features = omap3_dispc_features_list,
+ .num_features = ARRAY_SIZE(omap3_dispc_features_list),
+ .reg_fields = omap3_dispc_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
+ .overlay_caps = omap3630_dispc_overlay_caps,
+ .supported_color_modes = omap3_dispc_supported_color_modes,
+ .num_mgrs = 2,
+ .num_ovls = 3,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
+ .no_framedone_tv = true,
+ .set_max_preload = false,
+ .last_pixel_inc_missing = true,
+};
+
+static const struct dispc_features am43xx_dispc_feats = {
+ .sw_start = 7,
+ .fp_start = 19,
+ .bp_start = 31,
+ .sw_max = 256,
+ .vp_max = 4095,
+ .hp_max = 4096,
+ .mgr_width_start = 10,
+ .mgr_height_start = 26,
+ .mgr_width_max = 2048,
+ .mgr_height_max = 2048,
+ .max_lcd_pclk = 173000000,
+ .max_tv_pclk = 59000000,
+ .max_downscale = 4,
+ .max_line_width = 1024,
+ .min_pcd = 1,
+ .calc_scaling = dispc_ovl_calc_scaling_34xx,
+ .calc_core_clk = calc_core_clk_34xx,
+ .num_fifos = 3,
+ .features = am43xx_dispc_features_list,
+ .num_features = ARRAY_SIZE(am43xx_dispc_features_list),
+ .reg_fields = omap3_dispc_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
+ .overlay_caps = omap3430_dispc_overlay_caps,
+ .supported_color_modes = omap3_dispc_supported_color_modes,
+ .num_mgrs = 1,
+ .num_ovls = 3,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
.no_framedone_tv = true,
.set_max_preload = false,
.last_pixel_inc_missing = true,
@@ -3764,9 +4220,22 @@ static const struct dispc_features omap44xx_dispc_feats = {
.mgr_height_max = 2048,
.max_lcd_pclk = 170000000,
.max_tv_pclk = 185625000,
+ .max_downscale = 4,
+ .max_line_width = 2048,
+ .min_pcd = 1,
.calc_scaling = dispc_ovl_calc_scaling_44xx,
.calc_core_clk = calc_core_clk_44xx,
.num_fifos = 5,
+ .features = omap4_dispc_features_list,
+ .num_features = ARRAY_SIZE(omap4_dispc_features_list),
+ .reg_fields = omap4_dispc_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap4_dispc_reg_fields),
+ .overlay_caps = omap4_dispc_overlay_caps,
+ .supported_color_modes = omap4_dispc_supported_color_modes,
+ .num_mgrs = 3,
+ .num_ovls = 4,
+ .buffer_size_unit = 16,
+ .burst_size_unit = 16,
.gfx_fifo_workaround = true,
.set_max_preload = true,
.supports_sync_align = true,
@@ -3790,9 +4259,22 @@ static const struct dispc_features omap54xx_dispc_feats = {
.mgr_height_max = 4096,
.max_lcd_pclk = 170000000,
.max_tv_pclk = 186000000,
+ .max_downscale = 4,
+ .max_line_width = 2048,
+ .min_pcd = 1,
.calc_scaling = dispc_ovl_calc_scaling_44xx,
.calc_core_clk = calc_core_clk_44xx,
.num_fifos = 5,
+ .features = omap5_dispc_features_list,
+ .num_features = ARRAY_SIZE(omap5_dispc_features_list),
+ .reg_fields = omap4_dispc_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap4_dispc_reg_fields),
+ .overlay_caps = omap4_dispc_overlay_caps,
+ .supported_color_modes = omap4_dispc_supported_color_modes,
+ .num_mgrs = 4,
+ .num_ovls = 4,
+ .buffer_size_unit = 16,
+ .burst_size_unit = 16,
.gfx_fifo_workaround = true,
.mstandby_workaround = true,
.set_max_preload = true,
@@ -3804,54 +4286,6 @@ static const struct dispc_features omap54xx_dispc_feats = {
.has_gamma_i734_bug = true,
};
-static int dispc_init_features(struct platform_device *pdev)
-{
- const struct dispc_features *src;
- struct dispc_features *dst;
-
- dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
- if (!dst) {
- dev_err(&pdev->dev, "Failed to allocate DISPC Features\n");
- return -ENOMEM;
- }
-
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP24xx:
- src = &omap24xx_dispc_feats;
- break;
-
- case OMAPDSS_VER_OMAP34xx_ES1:
- src = &omap34xx_rev1_0_dispc_feats;
- break;
-
- case OMAPDSS_VER_OMAP34xx_ES3:
- case OMAPDSS_VER_OMAP3630:
- case OMAPDSS_VER_AM35xx:
- case OMAPDSS_VER_AM43xx:
- src = &omap34xx_rev3_0_dispc_feats;
- break;
-
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
- src = &omap44xx_dispc_feats;
- break;
-
- case OMAPDSS_VER_OMAP5:
- case OMAPDSS_VER_DRA7xx:
- src = &omap54xx_dispc_feats;
- break;
-
- default:
- return -ENODEV;
- }
-
- memcpy(dst, src, sizeof(*dst));
- dispc.feat = dst;
-
- return 0;
-}
-
static irqreturn_t dispc_irq_handler(int irq, void *arg)
{
if (!dispc.is_enabled)
@@ -4083,9 +4517,28 @@ static const struct dispc_ops dispc_ops = {
};
/* DISPC HW IP initialisation */
+static const struct of_device_id dispc_of_match[] = {
+ { .compatible = "ti,omap2-dispc", .data = &omap24xx_dispc_feats },
+ { .compatible = "ti,omap3-dispc", .data = &omap36xx_dispc_feats },
+ { .compatible = "ti,omap4-dispc", .data = &omap44xx_dispc_feats },
+ { .compatible = "ti,omap5-dispc", .data = &omap54xx_dispc_feats },
+ { .compatible = "ti,dra7-dispc", .data = &omap54xx_dispc_feats },
+ {},
+};
+
+static const struct soc_device_attribute dispc_soc_devices[] = {
+ { .machine = "OMAP3[45]*",
+ .revision = "ES[12].?", .data = &omap34xx_rev1_0_dispc_feats },
+ { .machine = "OMAP3[45]*", .data = &omap34xx_rev3_0_dispc_feats },
+ { .machine = "AM35*", .data = &omap34xx_rev3_0_dispc_feats },
+ { .machine = "AM43*", .data = &am43xx_dispc_feats },
+ { /* sentinel */ }
+};
+
static int dispc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
+ const struct soc_device_attribute *soc;
u32 rev;
int r = 0;
struct resource *dispc_mem;
@@ -4095,9 +4548,15 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
spin_lock_init(&dispc.control_lock);
- r = dispc_init_features(dispc.pdev);
- if (r)
- return r;
+ /*
+ * The OMAP3-based models can't be told apart using the compatible
+ * string, use SoC device matching.
+ */
+ soc = soc_device_match(dispc_soc_devices);
+ if (soc)
+ dispc.feat = soc->data;
+ else
+ dispc.feat = of_match_device(dispc_of_match, &pdev->dev)->data;
r = dispc_errata_i734_wa_init();
if (r)
@@ -4226,15 +4685,6 @@ static const struct dev_pm_ops dispc_pm_ops = {
.runtime_resume = dispc_runtime_resume,
};
-static const struct of_device_id dispc_of_match[] = {
- { .compatible = "ti,omap2-dispc", },
- { .compatible = "ti,omap3-dispc", },
- { .compatible = "ti,omap4-dispc", },
- { .compatible = "ti,omap5-dispc", },
- { .compatible = "ti,dra7-dispc", },
- {},
-};
-
static struct platform_driver omap_dispchw_driver = {
.probe = dispc_probe,
.remove = dispc_remove,
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 86dbb65a6c28..daf286fc8a40 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -32,13 +32,14 @@
#include <linux/string.h>
#include <linux/of.h>
#include <linux/clk.h>
+#include <linux/sys_soc.h>
#include "omapdss.h"
#include "dss.h"
-#include "dss_features.h"
struct dpi_data {
struct platform_device *pdev;
+ enum dss_model dss_model;
struct regulator *vdds_dsi_reg;
enum dss_clk_source clk_src;
@@ -99,25 +100,21 @@ static enum dss_clk_source dpi_get_clk_src_dra7xx(enum omap_channel channel)
return DSS_CLK_SRC_FCK;
}
-static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel)
+static enum dss_clk_source dpi_get_clk_src(struct dpi_data *dpi)
{
+ enum omap_channel channel = dpi->output.dispc_channel;
+
/*
* XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL
* would also be used for DISPC fclk. Meaning, when the DPI output is
* disabled, DISPC clock will be disabled, and TV out will stop.
*/
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP24xx:
- case OMAPDSS_VER_OMAP34xx_ES1:
- case OMAPDSS_VER_OMAP34xx_ES3:
- case OMAPDSS_VER_OMAP3630:
- case OMAPDSS_VER_AM35xx:
- case OMAPDSS_VER_AM43xx:
+ switch (dpi->dss_model) {
+ case DSS_MODEL_OMAP2:
+ case DSS_MODEL_OMAP3:
return DSS_CLK_SRC_FCK;
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
+ case DSS_MODEL_OMAP4:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
return DSS_CLK_SRC_PLL1_1;
@@ -127,7 +124,7 @@ static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel)
return DSS_CLK_SRC_FCK;
}
- case OMAPDSS_VER_OMAP5:
+ case DSS_MODEL_OMAP5:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
return DSS_CLK_SRC_PLL1_1;
@@ -138,7 +135,7 @@ static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel)
return DSS_CLK_SRC_FCK;
}
- case OMAPDSS_VER_DRA7xx:
+ case DSS_MODEL_DRA7:
return dpi_get_clk_src_dra7xx(channel);
default:
@@ -213,7 +210,7 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
ctx->pll_cinfo.clkdco = clkdco;
return dss_pll_hsdiv_calc_a(ctx->pll, clkdco,
- ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
+ ctx->pck_min, dss_get_max_fck_rate(),
dpi_calc_hsdiv_cb, ctx);
}
@@ -403,19 +400,13 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
mutex_lock(&dpi->lock);
- if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI) && !dpi->vdds_dsi_reg) {
- DSSERR("no VDSS_DSI regulator\n");
- r = -ENODEV;
- goto err_no_reg;
- }
-
if (!out->dispc_channel_connected) {
DSSERR("failed to enable display: no output/manager\n");
r = -ENODEV;
goto err_no_out_mgr;
}
- if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) {
+ if (dpi->vdds_dsi_reg) {
r = regulator_enable(dpi->vdds_dsi_reg);
if (r)
goto err_reg_enable;
@@ -459,11 +450,10 @@ err_pll_init:
err_src_sel:
dispc_runtime_put();
err_get_dispc:
- if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
+ if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
err_reg_enable:
err_no_out_mgr:
-err_no_reg:
mutex_unlock(&dpi->lock);
return r;
}
@@ -484,7 +474,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
dispc_runtime_put();
- if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
+ if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
mutex_unlock(&dpi->lock);
@@ -575,11 +565,21 @@ static int dpi_verify_pll(struct dss_pll *pll)
return 0;
}
+static const struct soc_device_attribute dpi_soc_devices[] = {
+ { .family = "OMAP3[456]*" },
+ { .family = "[AD]M37*" },
+ { /* sentinel */ }
+};
+
static int dpi_init_regulator(struct dpi_data *dpi)
{
struct regulator *vdds_dsi;
- if (!dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
+ /*
+ * The DPI uses the DSI VDDS on OMAP34xx, OMAP35xx, OMAP36xx, AM37xx and
+ * DM37xx only.
+ */
+ if (!soc_device_match(dpi_soc_devices))
return 0;
if (dpi->vdds_dsi_reg)
@@ -604,7 +604,7 @@ static void dpi_init_pll(struct dpi_data *dpi)
if (dpi->pll)
return;
- dpi->clk_src = dpi_get_clk_src(dpi->output.dispc_channel);
+ dpi->clk_src = dpi_get_clk_src(dpi);
pll = dss_pll_find_by_src(dpi->clk_src);
if (!pll)
@@ -624,18 +624,14 @@ static void dpi_init_pll(struct dpi_data *dpi)
* the channel in some more dynamic manner, or get the channel as a user
* parameter.
*/
-static enum omap_channel dpi_get_channel(int port_num)
+static enum omap_channel dpi_get_channel(struct dpi_data *dpi, int port_num)
{
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP24xx:
- case OMAPDSS_VER_OMAP34xx_ES1:
- case OMAPDSS_VER_OMAP34xx_ES3:
- case OMAPDSS_VER_OMAP3630:
- case OMAPDSS_VER_AM35xx:
- case OMAPDSS_VER_AM43xx:
+ switch (dpi->dss_model) {
+ case DSS_MODEL_OMAP2:
+ case DSS_MODEL_OMAP3:
return OMAP_DSS_CHANNEL_LCD;
- case OMAPDSS_VER_DRA7xx:
+ case DSS_MODEL_DRA7:
switch (port_num) {
case 2:
return OMAP_DSS_CHANNEL_LCD3;
@@ -646,12 +642,10 @@ static enum omap_channel dpi_get_channel(int port_num)
return OMAP_DSS_CHANNEL_LCD;
}
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
+ case DSS_MODEL_OMAP4:
return OMAP_DSS_CHANNEL_LCD2;
- case OMAPDSS_VER_OMAP5:
+ case DSS_MODEL_OMAP5:
return OMAP_DSS_CHANNEL_LCD3;
default:
@@ -716,10 +710,8 @@ static const struct omapdss_dpi_ops dpi_ops = {
.get_timings = dpi_get_timings,
};
-static void dpi_init_output_port(struct platform_device *pdev,
- struct device_node *port)
+static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
{
- struct dpi_data *dpi = port->data;
struct omap_dss_device *out = &dpi->output;
int r;
u32 port_num;
@@ -741,10 +733,10 @@ static void dpi_init_output_port(struct platform_device *pdev,
break;
}
- out->dev = &pdev->dev;
+ out->dev = &dpi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_DPI;
out->output_type = OMAP_DISPLAY_TYPE_DPI;
- out->dispc_channel = dpi_get_channel(port_num);
+ out->dispc_channel = dpi_get_channel(dpi, port_num);
out->port_num = port_num;
out->ops.dpi = &dpi_ops;
out->owner = THIS_MODULE;
@@ -760,7 +752,8 @@ static void dpi_uninit_output_port(struct device_node *port)
omapdss_unregister_output(out);
}
-int dpi_init_port(struct platform_device *pdev, struct device_node *port)
+int dpi_init_port(struct platform_device *pdev, struct device_node *port,
+ enum dss_model dss_model)
{
struct dpi_data *dpi;
struct device_node *ep;
@@ -786,11 +779,12 @@ int dpi_init_port(struct platform_device *pdev, struct device_node *port)
of_node_put(ep);
dpi->pdev = pdev;
+ dpi->dss_model = dss_model;
port->data = dpi;
mutex_init(&dpi->lock);
- dpi_init_output_port(pdev, port);
+ dpi_init_output_port(dpi, port);
dpi->port_initialized = true;
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 835f49004bc3..b56a05730314 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -20,6 +20,8 @@
#define DSS_SUBSYS_NAME "DSI"
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -42,12 +44,12 @@
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/component.h>
+#include <linux/sys_soc.h>
#include <video/mipi_display.h>
#include "omapdss.h"
#include "dss.h"
-#include "dss_features.h"
#define DSI_CATCH_MISSING_TE
@@ -228,6 +230,12 @@ static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
#define DSI_MAX_NR_ISRS 2
#define DSI_MAX_NR_LANES 5
+enum dsi_model {
+ DSI_MODEL_OMAP3,
+ DSI_MODEL_OMAP4,
+ DSI_MODEL_OMAP5,
+};
+
enum dsi_lane_function {
DSI_LANE_UNUSED = 0,
DSI_LANE_CLK,
@@ -299,12 +307,36 @@ struct dsi_lp_clock_info {
u16 lp_clk_div;
};
+struct dsi_module_id_data {
+ u32 address;
+ int id;
+};
+
+enum dsi_quirks {
+ DSI_QUIRK_PLL_PWR_BUG = (1 << 0), /* DSI-PLL power command 0x3 is not working */
+ DSI_QUIRK_DCS_CMD_CONFIG_VC = (1 << 1),
+ DSI_QUIRK_VC_OCP_WIDTH = (1 << 2),
+ DSI_QUIRK_REVERSE_TXCLKESC = (1 << 3),
+ DSI_QUIRK_GNQ = (1 << 4),
+ DSI_QUIRK_PHY_DCC = (1 << 5),
+};
+
+struct dsi_of_data {
+ enum dsi_model model;
+ const struct dss_pll_hw *pll_hw;
+ const struct dsi_module_id_data *modules;
+ unsigned int max_fck_freq;
+ unsigned int max_pll_lpdiv;
+ enum dsi_quirks quirks;
+};
+
struct dsi_data {
struct platform_device *pdev;
void __iomem *proto_base;
void __iomem *phy_base;
void __iomem *pll_base;
+ const struct dsi_of_data *data;
int module_id;
int irq;
@@ -312,6 +344,7 @@ struct dsi_data {
bool is_enabled;
struct clk *dss_clk;
+ struct regmap *syscon;
struct dispc_clock_info user_dispc_cinfo;
struct dss_pll_clock_info user_dsi_cinfo;
@@ -397,13 +430,6 @@ struct dsi_packet_sent_handler_data {
struct completion *completion;
};
-struct dsi_module_id_data {
- u32 address;
- int id;
-};
-
-static const struct of_device_id dsi_of_match[];
-
#ifdef DSI_PERF_MEASURE
static bool dsi_perf;
module_param(dsi_perf, bool, 0644);
@@ -1186,6 +1212,7 @@ static int dsi_regulator_init(struct platform_device *dsidev)
static void _dsi_print_reset_status(struct platform_device *dsidev)
{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 l;
int b0, b1, b2;
@@ -1194,7 +1221,7 @@ static void _dsi_print_reset_status(struct platform_device *dsidev)
* I/O. */
l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
- if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
+ if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC) {
b0 = 28;
b1 = 27;
b2 = 26;
@@ -1297,7 +1324,7 @@ static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
unsigned long dsi_fclk;
unsigned lp_clk_div;
unsigned long lp_clk;
- unsigned lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
+ unsigned lpdiv_max = dsi->data->max_pll_lpdiv;
lp_clk_div = dsi->user_lp_cinfo.lp_clk_div;
@@ -1349,11 +1376,12 @@ enum dsi_pll_power_state {
static int dsi_pll_power(struct platform_device *dsidev,
enum dsi_pll_power_state state)
{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int t = 0;
/* DSI-PLL power command 0x3 is not working */
- if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) &&
- state == DSI_PLL_POWER_ON_DIV)
+ if ((dsi->data->quirks & DSI_QUIRK_PLL_PWR_BUG) &&
+ state == DSI_PLL_POWER_ON_DIV)
state = DSI_PLL_POWER_ON_ALL;
/* PLL_PWR_CMD */
@@ -1373,11 +1401,12 @@ static int dsi_pll_power(struct platform_device *dsidev,
}
-static void dsi_pll_calc_dsi_fck(struct dss_pll_clock_info *cinfo)
+static void dsi_pll_calc_dsi_fck(struct dsi_data *dsi,
+ struct dss_pll_clock_info *cinfo)
{
unsigned long max_dsi_fck;
- max_dsi_fck = dss_feat_get_param_max(FEAT_PARAM_DSI_FCK);
+ max_dsi_fck = dsi->data->max_fck_freq;
cinfo->mX[HSDIV_DSI] = DIV_ROUND_UP(cinfo->clkdco, max_dsi_fck);
cinfo->clkout[HSDIV_DSI] = cinfo->clkdco / cinfo->mX[HSDIV_DSI];
@@ -1773,13 +1802,14 @@ static int dsi_cio_power(struct platform_device *dsidev,
static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int val;
/* line buffer on OMAP3 is 1024 x 24bits */
/* XXX: for some reason using full buffer size causes
* considerable TX slowdown with update sizes that fill the
* whole buffer */
- if (!dss_has_feature(FEAT_DSI_GNQ))
+ if (!(dsi->data->quirks & DSI_QUIRK_GNQ))
return 1023 * 3;
val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
@@ -1872,6 +1902,7 @@ static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
static void dsi_cio_timings(struct platform_device *dsidev)
{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
u32 tlpx_half, tclk_trail, tclk_zero;
@@ -1934,7 +1965,7 @@ static void dsi_cio_timings(struct platform_device *dsidev)
r = FLD_MOD(r, tclk_trail, 15, 8);
r = FLD_MOD(r, tclk_zero, 7, 0);
- if (dss_has_feature(FEAT_DSI_PHY_DCC)) {
+ if (dsi->data->quirks & DSI_QUIRK_PHY_DCC) {
r = FLD_MOD(r, 0, 21, 21); /* DCCEN = disable */
r = FLD_MOD(r, 1, 22, 22); /* CLKINP_DIVBY2EN = enable */
r = FLD_MOD(r, 1, 23, 23); /* CLKINP_SEL = enable */
@@ -2006,7 +2037,7 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
static const u8 offsets_new[] = { 24, 25, 26, 27, 28 };
const u8 *offsets;
- if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC))
+ if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC)
offsets = offsets_old;
else
offsets = offsets_new;
@@ -2060,6 +2091,83 @@ static unsigned dsi_get_lane_mask(struct platform_device *dsidev)
return mask;
}
+/* OMAP4 CONTROL_DSIPHY */
+#define OMAP4_DSIPHY_SYSCON_OFFSET 0x78
+
+#define OMAP4_DSI2_LANEENABLE_SHIFT 29
+#define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29)
+#define OMAP4_DSI1_LANEENABLE_SHIFT 24
+#define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24)
+#define OMAP4_DSI1_PIPD_SHIFT 19
+#define OMAP4_DSI1_PIPD_MASK (0x1f << 19)
+#define OMAP4_DSI2_PIPD_SHIFT 14
+#define OMAP4_DSI2_PIPD_MASK (0x1f << 14)
+
+static int dsi_omap4_mux_pads(struct dsi_data *dsi, unsigned int lanes)
+{
+ u32 enable_mask, enable_shift;
+ u32 pipd_mask, pipd_shift;
+
+ if (dsi->module_id == 0) {
+ enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
+ enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT;
+ pipd_mask = OMAP4_DSI1_PIPD_MASK;
+ pipd_shift = OMAP4_DSI1_PIPD_SHIFT;
+ } else if (dsi->module_id == 1) {
+ enable_mask = OMAP4_DSI2_LANEENABLE_MASK;
+ enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT;
+ pipd_mask = OMAP4_DSI2_PIPD_MASK;
+ pipd_shift = OMAP4_DSI2_PIPD_SHIFT;
+ } else {
+ return -ENODEV;
+ }
+
+ return regmap_update_bits(dsi->syscon, OMAP4_DSIPHY_SYSCON_OFFSET,
+ enable_mask | pipd_mask,
+ (lanes << enable_shift) | (lanes << pipd_shift));
+}
+
+/* OMAP5 CONTROL_DSIPHY */
+
+#define OMAP5_DSIPHY_SYSCON_OFFSET 0x74
+
+#define OMAP5_DSI1_LANEENABLE_SHIFT 24
+#define OMAP5_DSI2_LANEENABLE_SHIFT 19
+#define OMAP5_DSI_LANEENABLE_MASK 0x1f
+
+static int dsi_omap5_mux_pads(struct dsi_data *dsi, unsigned int lanes)
+{
+ u32 enable_shift;
+
+ if (dsi->module_id == 0)
+ enable_shift = OMAP5_DSI1_LANEENABLE_SHIFT;
+ else if (dsi->module_id == 1)
+ enable_shift = OMAP5_DSI2_LANEENABLE_SHIFT;
+ else
+ return -ENODEV;
+
+ return regmap_update_bits(dsi->syscon, OMAP5_DSIPHY_SYSCON_OFFSET,
+ OMAP5_DSI_LANEENABLE_MASK << enable_shift,
+ lanes << enable_shift);
+}
+
+static int dsi_enable_pads(struct dsi_data *dsi, unsigned int lane_mask)
+{
+ if (dsi->data->model == DSI_MODEL_OMAP4)
+ return dsi_omap4_mux_pads(dsi, lane_mask);
+ if (dsi->data->model == DSI_MODEL_OMAP5)
+ return dsi_omap5_mux_pads(dsi, lane_mask);
+ return 0;
+}
+
+static void dsi_disable_pads(struct dsi_data *dsi)
+{
+ if (dsi->data->model == DSI_MODEL_OMAP4)
+ dsi_omap4_mux_pads(dsi, 0);
+ else if (dsi->data->model == DSI_MODEL_OMAP5)
+ dsi_omap5_mux_pads(dsi, 0);
+}
+
static int dsi_cio_init(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -2068,7 +2176,7 @@ static int dsi_cio_init(struct platform_device *dsidev)
DSSDBG("DSI CIO init starts");
- r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
+ r = dsi_enable_pads(dsi, dsi_get_lane_mask(dsidev));
if (r)
return r;
@@ -2178,7 +2286,7 @@ err_cio_pwr:
dsi_cio_disable_lane_override(dsidev);
err_scp_clk_dom:
dsi_disable_scp_clk(dsidev);
- dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
+ dsi_disable_pads(dsi);
return r;
}
@@ -2191,7 +2299,7 @@ static void dsi_cio_uninit(struct platform_device *dsidev)
dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
dsi_disable_scp_clk(dsidev);
- dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
+ dsi_disable_pads(dsi);
}
static void dsi_config_tx_fifo(struct platform_device *dsidev,
@@ -2439,7 +2547,7 @@ static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
- if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH))
+ if (dsi->data->quirks & DSI_QUIRK_VC_OCP_WIDTH)
r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */
r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
@@ -2474,7 +2582,7 @@ static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), source, 1, 1);
/* DCS_CMD_ENABLE */
- if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
+ if (dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) {
bool enable = source == DSI_VC_SOURCE_VP;
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 30, 30);
}
@@ -3607,7 +3715,7 @@ static int dsi_proto_config(struct platform_device *dsidev)
r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
- if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
+ if (!(dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC)) {
r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
/* DCS_CMD_CODE, 1=start, 0=continue */
r = FLD_MOD(r, 0, 25, 25);
@@ -4450,6 +4558,7 @@ static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
unsigned long clkdco, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
+ struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
ctx->dsi_cinfo.n = n;
ctx->dsi_cinfo.m = m;
@@ -4457,7 +4566,7 @@ static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
ctx->dsi_cinfo.clkdco = clkdco;
return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
- dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
+ dsi->data->max_fck_freq,
dsi_cm_calc_hsdiv_cb, ctx);
}
@@ -4749,6 +4858,7 @@ static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
unsigned long clkdco, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
+ struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
ctx->dsi_cinfo.n = n;
ctx->dsi_cinfo.m = m;
@@ -4756,7 +4866,7 @@ static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
ctx->dsi_cinfo.clkdco = clkdco;
return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
- dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
+ dsi->data->max_fck_freq,
dsi_vm_calc_hsdiv_cb, ctx);
}
@@ -4827,7 +4937,7 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
goto err;
}
- dsi_pll_calc_dsi_fck(&ctx.dsi_cinfo);
+ dsi_pll_calc_dsi_fck(dsi, &ctx.dsi_cinfo);
r = dsi_lp_clock_calc(ctx.dsi_cinfo.clkout[HSDIV_DSI],
config->lp_clk_min, config->lp_clk_max, &dsi->user_lp_cinfo);
@@ -4857,24 +4967,14 @@ err:
* the channel in some more dynamic manner, or get the channel as a user
* parameter.
*/
-static enum omap_channel dsi_get_channel(int module_id)
+static enum omap_channel dsi_get_channel(struct dsi_data *dsi)
{
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP24xx:
- case OMAPDSS_VER_AM43xx:
- DSSWARN("DSI not supported\n");
+ switch (dsi->data->model) {
+ case DSI_MODEL_OMAP3:
return OMAP_DSS_CHANNEL_LCD;
- case OMAPDSS_VER_OMAP34xx_ES1:
- case OMAPDSS_VER_OMAP34xx_ES3:
- case OMAPDSS_VER_OMAP3630:
- case OMAPDSS_VER_AM35xx:
- return OMAP_DSS_CHANNEL_LCD;
-
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
- switch (module_id) {
+ case DSI_MODEL_OMAP4:
+ switch (dsi->module_id) {
case 0:
return OMAP_DSS_CHANNEL_LCD;
case 1:
@@ -4884,8 +4984,8 @@ static enum omap_channel dsi_get_channel(int module_id)
return OMAP_DSS_CHANNEL_LCD;
}
- case OMAPDSS_VER_OMAP5:
- switch (module_id) {
+ case DSI_MODEL_OMAP5:
+ switch (dsi->module_id) {
case 0:
return OMAP_DSS_CHANNEL_LCD;
case 1:
@@ -5065,7 +5165,7 @@ static void dsi_init_output(struct platform_device *dsidev)
out->output_type = OMAP_DISPLAY_TYPE_DSI;
out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
- out->dispc_channel = dsi_get_channel(dsi->module_id);
+ out->dispc_channel = dsi_get_channel(dsi);
out->ops.dsi = &dsi_ops;
out->owner = THIS_MODULE;
@@ -5240,29 +5340,7 @@ static int dsi_init_pll_data(struct platform_device *dsidev)
pll->id = dsi->module_id == 0 ? DSS_PLL_DSI1 : DSS_PLL_DSI2;
pll->clkin = clk;
pll->base = dsi->pll_base;
-
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP34xx_ES1:
- case OMAPDSS_VER_OMAP34xx_ES3:
- case OMAPDSS_VER_OMAP3630:
- case OMAPDSS_VER_AM35xx:
- pll->hw = &dss_omap3_dsi_pll_hw;
- break;
-
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
- pll->hw = &dss_omap4_dsi_pll_hw;
- break;
-
- case OMAPDSS_VER_OMAP5:
- pll->hw = &dss_omap5_dsi_pll_hw;
- break;
-
- default:
- return -ENODEV;
- }
-
+ pll->hw = dsi->data->pll_hw;
pll->ops = &dsi_pll_ops;
r = dss_pll_register(pll);
@@ -5273,9 +5351,74 @@ static int dsi_init_pll_data(struct platform_device *dsidev)
}
/* DSI1 HW IP initialisation */
+static const struct dsi_of_data dsi_of_data_omap34xx = {
+ .model = DSI_MODEL_OMAP3,
+ .pll_hw = &dss_omap3_dsi_pll_hw,
+ .modules = (const struct dsi_module_id_data[]) {
+ { .address = 0x4804fc00, .id = 0, },
+ { },
+ },
+ .max_fck_freq = 173000000,
+ .max_pll_lpdiv = (1 << 13) - 1,
+ .quirks = DSI_QUIRK_REVERSE_TXCLKESC,
+};
+
+static const struct dsi_of_data dsi_of_data_omap36xx = {
+ .model = DSI_MODEL_OMAP3,
+ .pll_hw = &dss_omap3_dsi_pll_hw,
+ .modules = (const struct dsi_module_id_data[]) {
+ { .address = 0x4804fc00, .id = 0, },
+ { },
+ },
+ .max_fck_freq = 173000000,
+ .max_pll_lpdiv = (1 << 13) - 1,
+ .quirks = DSI_QUIRK_PLL_PWR_BUG,
+};
+
+static const struct dsi_of_data dsi_of_data_omap4 = {
+ .model = DSI_MODEL_OMAP4,
+ .pll_hw = &dss_omap4_dsi_pll_hw,
+ .modules = (const struct dsi_module_id_data[]) {
+ { .address = 0x58004000, .id = 0, },
+ { .address = 0x58005000, .id = 1, },
+ { },
+ },
+ .max_fck_freq = 170000000,
+ .max_pll_lpdiv = (1 << 13) - 1,
+ .quirks = DSI_QUIRK_DCS_CMD_CONFIG_VC | DSI_QUIRK_VC_OCP_WIDTH
+ | DSI_QUIRK_GNQ,
+};
+
+static const struct dsi_of_data dsi_of_data_omap5 = {
+ .model = DSI_MODEL_OMAP5,
+ .pll_hw = &dss_omap5_dsi_pll_hw,
+ .modules = (const struct dsi_module_id_data[]) {
+ { .address = 0x58004000, .id = 0, },
+ { .address = 0x58009000, .id = 1, },
+ { },
+ },
+ .max_fck_freq = 209250000,
+ .max_pll_lpdiv = (1 << 13) - 1,
+ .quirks = DSI_QUIRK_DCS_CMD_CONFIG_VC | DSI_QUIRK_VC_OCP_WIDTH
+ | DSI_QUIRK_GNQ | DSI_QUIRK_PHY_DCC,
+};
+
+static const struct of_device_id dsi_of_match[] = {
+ { .compatible = "ti,omap3-dsi", .data = &dsi_of_data_omap36xx, },
+ { .compatible = "ti,omap4-dsi", .data = &dsi_of_data_omap4, },
+ { .compatible = "ti,omap5-dsi", .data = &dsi_of_data_omap5, },
+ {},
+};
+
+static const struct soc_device_attribute dsi_soc_devices[] = {
+ { .machine = "OMAP3[45]*", .data = &dsi_of_data_omap34xx },
+ { .machine = "AM35*", .data = &dsi_of_data_omap34xx },
+ { /* sentinel */ }
+};
static int dsi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *dsidev = to_platform_device(dev);
+ const struct soc_device_attribute *soc;
const struct dsi_module_id_data *d;
u32 rev;
int r, i;
@@ -5339,7 +5482,13 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
return r;
}
- d = of_match_node(dsi_of_match, dsidev->dev.of_node)->data;
+ soc = soc_device_match(dsi_soc_devices);
+ if (soc)
+ dsi->data = soc->data;
+ else
+ dsi->data = of_match_node(dsi_of_match, dev->of_node)->data;
+
+ d = dsi->data->modules;
while (d->address != 0 && d->address != dsi_mem->start)
d++;
@@ -5350,6 +5499,24 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->module_id = d->id;
+ if (dsi->data->model == DSI_MODEL_OMAP4 ||
+ dsi->data->model == DSI_MODEL_OMAP5) {
+ struct device_node *np;
+
+ /*
+ * The OMAP4/5 display DT bindings don't reference the padconf
+ * syscon. Our only option to retrieve it is to find it by name.
+ */
+ np = of_find_node_by_name(NULL,
+ dsi->data->model == DSI_MODEL_OMAP4 ?
+ "omap4_padconf_global" : "omap5_padconf_global");
+ if (!np)
+ return -ENODEV;
+
+ dsi->syscon = syscon_node_to_regmap(np);
+ of_node_put(np);
+ }
+
/* DSI VCs initialization */
for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
dsi->vc[i].source = DSI_VC_SOURCE_L4;
@@ -5375,7 +5542,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
/* DSI on OMAP3 doesn't have register DSI_GNQ, set number
* of data to 3 by default */
- if (dss_has_feature(FEAT_DSI_GNQ))
+ if (dsi->data->quirks & DSI_QUIRK_GNQ)
/* NB_DATA_LANES */
dsi->num_lanes_supported = 1 + REG_GET(dsidev, DSI_GNQ, 11, 9);
else
@@ -5495,30 +5662,6 @@ static const struct dev_pm_ops dsi_pm_ops = {
.runtime_resume = dsi_runtime_resume,
};
-static const struct dsi_module_id_data dsi_of_data_omap3[] = {
- { .address = 0x4804fc00, .id = 0, },
- { },
-};
-
-static const struct dsi_module_id_data dsi_of_data_omap4[] = {
- { .address = 0x58004000, .id = 0, },
- { .address = 0x58005000, .id = 1, },
- { },
-};
-
-static const struct dsi_module_id_data dsi_of_data_omap5[] = {
- { .address = 0x58004000, .id = 0, },
- { .address = 0x58009000, .id = 1, },
- { },
-};
-
-static const struct of_device_id dsi_of_match[] = {
- { .compatible = "ti,omap3-dsi", .data = dsi_of_data_omap3, },
- { .compatible = "ti,omap4-dsi", .data = dsi_of_data_omap4, },
- { .compatible = "ti,omap5-dsi", .data = dsi_of_data_omap5, },
- {},
-};
-
static struct platform_driver omap_dsihw_driver = {
.probe = dsi_probe,
.remove = dsi_remove,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 99e22ca972c7..d1755f12236b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -22,6 +22,7 @@
#define DSS_SUBSYS_NAME "DSS"
+#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -38,14 +39,15 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <linux/suspend.h>
#include <linux/component.h>
+#include <linux/sys_soc.h>
#include "omapdss.h"
#include "dss.h"
-#include "dss_features.h"
#define DSS_SZ_REGS SZ_512
@@ -69,15 +71,24 @@ struct dss_reg {
#define REG_FLD_MOD(idx, val, start, end) \
dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
+struct dss_ops {
+ int (*dpi_select_source)(int port, enum omap_channel channel);
+ int (*select_lcd_source)(enum omap_channel channel,
+ enum dss_clk_source clk_src);
+};
+
struct dss_features {
+ enum dss_model model;
u8 fck_div_max;
+ unsigned int fck_freq_max;
u8 dss_fck_multiplier;
const char *parent_clk_name;
const enum omap_display_type *ports;
int num_ports;
- int (*dpi_select_source)(int port, enum omap_channel channel);
- int (*select_lcd_source)(enum omap_channel channel,
- enum dss_clk_source clk_src);
+ const enum omap_dss_output_id *outputs;
+ const struct dss_ops *ops;
+ struct dss_reg_field dispc_clk_switch;
+ bool has_lcd_clk_src;
};
static struct {
@@ -139,8 +150,7 @@ static void dss_save_context(void)
SR(CONTROL);
- if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
- OMAP_DISPLAY_TYPE_SDI) {
+ if (dss.feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
SR(SDI_CONTROL);
SR(PLL_CONTROL);
}
@@ -159,8 +169,7 @@ static void dss_restore_context(void)
RR(CONTROL);
- if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
- OMAP_DISPLAY_TYPE_SDI) {
+ if (dss.feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
RR(SDI_CONTROL);
RR(PLL_CONTROL);
}
@@ -390,8 +399,7 @@ static void dss_dump_regs(struct seq_file *s)
DUMPREG(DSS_SYSSTATUS);
DUMPREG(DSS_CONTROL);
- if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
- OMAP_DISPLAY_TYPE_SDI) {
+ if (dss.feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
DUMPREG(DSS_SDI_CONTROL);
DUMPREG(DSS_PLL_CONTROL);
DUMPREG(DSS_SDI_STATUS);
@@ -419,14 +427,12 @@ static int dss_get_channel_index(enum omap_channel channel)
static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
{
int b;
- u8 start, end;
/*
* We always use PRCM clock as the DISPC func clock, except on DSS3,
* where we don't have separate DISPC and LCD clock sources.
*/
- if (WARN_ON(dss_has_feature(FEAT_LCD_CLK_SRC) &&
- clk_src != DSS_CLK_SRC_FCK))
+ if (WARN_ON(dss.feat->has_lcd_clk_src && clk_src != DSS_CLK_SRC_FCK))
return;
switch (clk_src) {
@@ -444,9 +450,9 @@ static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
return;
}
- dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end);
-
- REG_FLD_MOD(DSS_CONTROL, b, start, end); /* DISPC_CLK_SWITCH */
+ REG_FLD_MOD(DSS_CONTROL, b, /* DISPC_CLK_SWITCH */
+ dss.feat->dispc_clk_switch.start,
+ dss.feat->dispc_clk_switch.end);
dss.dispc_clk_source = clk_src;
}
@@ -570,13 +576,13 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
int idx = dss_get_channel_index(channel);
int r;
- if (!dss_has_feature(FEAT_LCD_CLK_SRC)) {
+ if (!dss.feat->has_lcd_clk_src) {
dss_select_dispc_clk_source(clk_src);
dss.lcd_clk_source[idx] = clk_src;
return;
}
- r = dss.feat->select_lcd_source(channel, clk_src);
+ r = dss.feat->ops->select_lcd_source(channel, clk_src);
if (r)
return;
@@ -595,7 +601,7 @@ enum dss_clk_source dss_get_dsi_clk_source(int dsi_module)
enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
{
- if (dss_has_feature(FEAT_LCD_CLK_SRC)) {
+ if (dss.feat->has_lcd_clk_src) {
int idx = dss_get_channel_index(channel);
return dss.lcd_clk_source[idx];
} else {
@@ -615,7 +621,7 @@ bool dss_div_calc(unsigned long pck, unsigned long fck_min,
unsigned long prate;
unsigned m;
- fck_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+ fck_hw_max = dss.feat->fck_freq_max;
if (dss.parent_clk == NULL) {
unsigned pckd;
@@ -673,6 +679,16 @@ unsigned long dss_get_dispc_clk_rate(void)
return dss.dss_clk_rate;
}
+unsigned long dss_get_max_fck_rate(void)
+{
+ return dss.feat->fck_freq_max;
+}
+
+enum omap_dss_output_id dss_get_supported_outputs(enum omap_channel channel)
+{
+ return dss.feat->outputs[channel];
+}
+
static int dss_setup_default_clock(void)
{
unsigned long max_dss_fck, prate;
@@ -680,7 +696,7 @@ static int dss_setup_default_clock(void)
unsigned fck_div;
int r;
- max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+ max_dss_fck = dss.feat->fck_freq_max;
if (dss.parent_clk == NULL) {
fck = clk_round_rate(dss.dss_clk, max_dss_fck);
@@ -721,27 +737,29 @@ void dss_set_dac_pwrdn_bgz(bool enable)
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select src)
{
- enum omap_display_type dp;
- dp = dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_DIGIT);
+ enum omap_dss_output_id outputs;
+
+ outputs = dss.feat->outputs[OMAP_DSS_CHANNEL_DIGIT];
/* Complain about invalid selections */
- WARN_ON((src == DSS_VENC_TV_CLK) && !(dp & OMAP_DISPLAY_TYPE_VENC));
- WARN_ON((src == DSS_HDMI_M_PCLK) && !(dp & OMAP_DISPLAY_TYPE_HDMI));
+ WARN_ON((src == DSS_VENC_TV_CLK) && !(outputs & OMAP_DSS_OUTPUT_VENC));
+ WARN_ON((src == DSS_HDMI_M_PCLK) && !(outputs & OMAP_DSS_OUTPUT_HDMI));
/* Select only if we have options */
- if ((dp & OMAP_DISPLAY_TYPE_VENC) && (dp & OMAP_DISPLAY_TYPE_HDMI))
+ if ((outputs & OMAP_DSS_OUTPUT_VENC) &&
+ (outputs & OMAP_DSS_OUTPUT_HDMI))
REG_FLD_MOD(DSS_CONTROL, src, 15, 15); /* VENC_HDMI_SWITCH */
}
enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void)
{
- enum omap_display_type displays;
+ enum omap_dss_output_id outputs;
- displays = dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_DIGIT);
- if ((displays & OMAP_DISPLAY_TYPE_HDMI) == 0)
+ outputs = dss.feat->outputs[OMAP_DSS_CHANNEL_DIGIT];
+ if ((outputs & OMAP_DSS_OUTPUT_HDMI) == 0)
return DSS_VENC_TV_CLK;
- if ((displays & OMAP_DISPLAY_TYPE_VENC) == 0)
+ if ((outputs & OMAP_DSS_OUTPUT_VENC) == 0)
return DSS_HDMI_M_PCLK;
return REG_GET(DSS_CONTROL, 15, 15);
@@ -823,7 +841,7 @@ static int dss_dpi_select_source_dra7xx(int port, enum omap_channel channel)
int dss_dpi_select_source(int port, enum omap_channel channel)
{
- return dss.feat->dpi_select_source(port, channel);
+ return dss.feat->ops->dpi_select_source(port, channel);
}
static int dss_get_clocks(void)
@@ -882,7 +900,7 @@ void dss_runtime_put(void)
/* DEBUGFS */
#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
-void dss_debug_dump_clocks(struct seq_file *s)
+static void dss_debug_dump_clocks(struct seq_file *s)
{
dss_dump_clocks(s);
dispc_dump_clocks(s);
@@ -890,8 +908,88 @@ void dss_debug_dump_clocks(struct seq_file *s)
dsi_dump_clocks(s);
#endif
}
-#endif
+static int dss_debug_show(struct seq_file *s, void *unused)
+{
+ void (*func)(struct seq_file *) = s->private;
+
+ func(s);
+ return 0;
+}
+
+static int dss_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dss_debug_show, inode->i_private);
+}
+
+static const struct file_operations dss_debug_fops = {
+ .open = dss_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *dss_debugfs_dir;
+
+static int dss_initialize_debugfs(void)
+{
+ dss_debugfs_dir = debugfs_create_dir("omapdss", NULL);
+ if (IS_ERR(dss_debugfs_dir)) {
+ int err = PTR_ERR(dss_debugfs_dir);
+
+ dss_debugfs_dir = NULL;
+ return err;
+ }
+
+ debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
+ &dss_debug_dump_clocks, &dss_debug_fops);
+
+ return 0;
+}
+
+static void dss_uninitialize_debugfs(void)
+{
+ if (dss_debugfs_dir)
+ debugfs_remove_recursive(dss_debugfs_dir);
+}
+
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+{
+ struct dentry *d;
+
+ d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
+ write, &dss_debug_fops);
+
+ return PTR_ERR_OR_ZERO(d);
+}
+#else /* CONFIG_OMAP2_DSS_DEBUGFS */
+static inline int dss_initialize_debugfs(void)
+{
+ return 0;
+}
+static inline void dss_uninitialize_debugfs(void)
+{
+}
+#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
+
+static const struct dss_ops dss_ops_omap2_omap3 = {
+ .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
+};
+
+static const struct dss_ops dss_ops_omap4 = {
+ .dpi_select_source = &dss_dpi_select_source_omap4,
+ .select_lcd_source = &dss_lcd_clk_mux_omap4,
+};
+
+static const struct dss_ops dss_ops_omap5 = {
+ .dpi_select_source = &dss_dpi_select_source_omap5,
+ .select_lcd_source = &dss_lcd_clk_mux_omap5,
+};
+
+static const struct dss_ops dss_ops_dra7 = {
+ .dpi_select_source = &dss_dpi_select_source_dra7xx,
+ .select_lcd_source = &dss_lcd_clk_mux_dra7,
+};
static const enum omap_display_type omap2plus_ports[] = {
OMAP_DISPLAY_TYPE_DPI,
@@ -908,130 +1006,168 @@ static const enum omap_display_type dra7xx_ports[] = {
OMAP_DISPLAY_TYPE_DPI,
};
+static const enum omap_dss_output_id omap2_dss_supported_outputs[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DSS_OUTPUT_VENC,
+};
+
+static const enum omap_dss_output_id omap3430_dss_supported_outputs[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
+ OMAP_DSS_OUTPUT_SDI | OMAP_DSS_OUTPUT_DSI1,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DSS_OUTPUT_VENC,
+};
+
+static const enum omap_dss_output_id omap3630_dss_supported_outputs[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
+ OMAP_DSS_OUTPUT_DSI1,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DSS_OUTPUT_VENC,
+};
+
+static const enum omap_dss_output_id am43xx_dss_supported_outputs[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI,
+};
+
+static const enum omap_dss_output_id omap4_dss_supported_outputs[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DSS_OUTPUT_DBI | OMAP_DSS_OUTPUT_DSI1,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI,
+
+ /* OMAP_DSS_CHANNEL_LCD2 */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
+ OMAP_DSS_OUTPUT_DSI2,
+};
+
+static const enum omap_dss_output_id omap5_dss_supported_outputs[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
+ OMAP_DSS_OUTPUT_DSI1 | OMAP_DSS_OUTPUT_DSI2,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DSS_OUTPUT_HDMI,
+
+ /* OMAP_DSS_CHANNEL_LCD2 */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
+ OMAP_DSS_OUTPUT_DSI1,
+
+ /* OMAP_DSS_CHANNEL_LCD3 */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
+ OMAP_DSS_OUTPUT_DSI2,
+};
+
static const struct dss_features omap24xx_dss_feats = {
+ .model = DSS_MODEL_OMAP2,
/*
* fck div max is really 16, but the divider range has gaps. The range
* from 1 to 6 has no gaps, so let's use that as a max.
*/
.fck_div_max = 6,
+ .fck_freq_max = 133000000,
.dss_fck_multiplier = 2,
.parent_clk_name = "core_ck",
- .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
+ .outputs = omap2_dss_supported_outputs,
+ .ops = &dss_ops_omap2_omap3,
+ .dispc_clk_switch = { 0, 0 },
+ .has_lcd_clk_src = false,
};
static const struct dss_features omap34xx_dss_feats = {
+ .model = DSS_MODEL_OMAP3,
.fck_div_max = 16,
+ .fck_freq_max = 173000000,
.dss_fck_multiplier = 2,
.parent_clk_name = "dpll4_ck",
- .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
.ports = omap34xx_ports,
+ .outputs = omap3430_dss_supported_outputs,
.num_ports = ARRAY_SIZE(omap34xx_ports),
+ .ops = &dss_ops_omap2_omap3,
+ .dispc_clk_switch = { 0, 0 },
+ .has_lcd_clk_src = false,
};
static const struct dss_features omap3630_dss_feats = {
+ .model = DSS_MODEL_OMAP3,
.fck_div_max = 32,
+ .fck_freq_max = 173000000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll4_ck",
- .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
+ .outputs = omap3630_dss_supported_outputs,
+ .ops = &dss_ops_omap2_omap3,
+ .dispc_clk_switch = { 0, 0 },
+ .has_lcd_clk_src = false,
};
static const struct dss_features omap44xx_dss_feats = {
+ .model = DSS_MODEL_OMAP4,
.fck_div_max = 32,
+ .fck_freq_max = 186000000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll_per_x2_ck",
- .dpi_select_source = &dss_dpi_select_source_omap4,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
- .select_lcd_source = &dss_lcd_clk_mux_omap4,
+ .outputs = omap4_dss_supported_outputs,
+ .ops = &dss_ops_omap4,
+ .dispc_clk_switch = { 9, 8 },
+ .has_lcd_clk_src = true,
};
static const struct dss_features omap54xx_dss_feats = {
+ .model = DSS_MODEL_OMAP5,
.fck_div_max = 64,
+ .fck_freq_max = 209250000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll_per_x2_ck",
- .dpi_select_source = &dss_dpi_select_source_omap5,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
- .select_lcd_source = &dss_lcd_clk_mux_omap5,
+ .outputs = omap5_dss_supported_outputs,
+ .ops = &dss_ops_omap5,
+ .dispc_clk_switch = { 9, 7 },
+ .has_lcd_clk_src = true,
};
static const struct dss_features am43xx_dss_feats = {
+ .model = DSS_MODEL_OMAP3,
.fck_div_max = 0,
+ .fck_freq_max = 200000000,
.dss_fck_multiplier = 0,
.parent_clk_name = NULL,
- .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
+ .outputs = am43xx_dss_supported_outputs,
+ .ops = &dss_ops_omap2_omap3,
+ .dispc_clk_switch = { 0, 0 },
+ .has_lcd_clk_src = true,
};
static const struct dss_features dra7xx_dss_feats = {
+ .model = DSS_MODEL_DRA7,
.fck_div_max = 64,
+ .fck_freq_max = 209250000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll_per_x2_ck",
- .dpi_select_source = &dss_dpi_select_source_dra7xx,
.ports = dra7xx_ports,
.num_ports = ARRAY_SIZE(dra7xx_ports),
- .select_lcd_source = &dss_lcd_clk_mux_dra7,
+ .outputs = omap5_dss_supported_outputs,
+ .ops = &dss_ops_dra7,
+ .dispc_clk_switch = { 9, 7 },
+ .has_lcd_clk_src = true,
};
-static int dss_init_features(struct platform_device *pdev)
-{
- const struct dss_features *src;
- struct dss_features *dst;
-
- dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
- if (!dst) {
- dev_err(&pdev->dev, "Failed to allocate local DSS Features\n");
- return -ENOMEM;
- }
-
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP24xx:
- src = &omap24xx_dss_feats;
- break;
-
- case OMAPDSS_VER_OMAP34xx_ES1:
- case OMAPDSS_VER_OMAP34xx_ES3:
- case OMAPDSS_VER_AM35xx:
- src = &omap34xx_dss_feats;
- break;
-
- case OMAPDSS_VER_OMAP3630:
- src = &omap3630_dss_feats;
- break;
-
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
- src = &omap44xx_dss_feats;
- break;
-
- case OMAPDSS_VER_OMAP5:
- src = &omap54xx_dss_feats;
- break;
-
- case OMAPDSS_VER_AM43xx:
- src = &am43xx_dss_feats;
- break;
-
- case OMAPDSS_VER_DRA7xx:
- src = &dra7xx_dss_feats;
- break;
-
- default:
- return -ENODEV;
- }
-
- memcpy(dst, src, sizeof(*dst));
- dss.feat = dst;
-
- return 0;
-}
-
static int dss_init_ports(struct platform_device *pdev)
{
struct device_node *parent = pdev->dev.of_node;
@@ -1045,7 +1181,7 @@ static int dss_init_ports(struct platform_device *pdev)
switch (dss.feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
- dpi_init_port(pdev, port);
+ dpi_init_port(pdev, port, dss.feat->model);
break;
case OMAP_DISPLAY_TYPE_SDI:
sdi_init_port(pdev, port);
@@ -1144,6 +1280,23 @@ static int dss_video_pll_probe(struct platform_device *pdev)
}
/* DSS HW IP initialisation */
+static const struct of_device_id dss_of_match[] = {
+ { .compatible = "ti,omap2-dss", .data = &omap24xx_dss_feats },
+ { .compatible = "ti,omap3-dss", .data = &omap3630_dss_feats },
+ { .compatible = "ti,omap4-dss", .data = &omap44xx_dss_feats },
+ { .compatible = "ti,omap5-dss", .data = &omap54xx_dss_feats },
+ { .compatible = "ti,dra7-dss", .data = &dra7xx_dss_feats },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dss_of_match);
+
+static const struct soc_device_attribute dss_soc_devices[] = {
+ { .machine = "OMAP3430/3530", .data = &omap34xx_dss_feats },
+ { .machine = "AM35??", .data = &omap34xx_dss_feats },
+ { .family = "AM43xx", .data = &am43xx_dss_feats },
+ { /* sentinel */ }
+};
+
static int dss_bind(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1151,12 +1304,6 @@ static int dss_bind(struct device *dev)
u32 rev;
int r;
- dss.pdev = pdev;
-
- r = dss_init_features(dss.pdev);
- if (r)
- return r;
-
dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
dss.base = devm_ioremap_resource(&pdev->dev, dss_mem);
if (IS_ERR(dss.base))
@@ -1288,15 +1435,34 @@ static int dss_add_child_component(struct device *dev, void *data)
static int dss_probe(struct platform_device *pdev)
{
+ const struct soc_device_attribute *soc;
struct component_match *match = NULL;
int r;
+ dss.pdev = pdev;
+
+ /*
+ * The various OMAP3-based SoCs can't be told apart using the compatible
+ * string, use SoC device matching.
+ */
+ soc = soc_device_match(dss_soc_devices);
+ if (soc)
+ dss.feat = soc->data;
+ else
+ dss.feat = of_match_device(dss_of_match, &pdev->dev)->data;
+
+ r = dss_initialize_debugfs();
+ if (r)
+ return r;
+
/* add all the child devices as components */
device_for_each_child(&pdev->dev, &match, dss_add_child_component);
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
- if (r)
+ if (r) {
+ dss_uninitialize_debugfs();
return r;
+ }
return 0;
}
@@ -1304,9 +1470,27 @@ static int dss_probe(struct platform_device *pdev)
static int dss_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &dss_component_ops);
+
+ dss_uninitialize_debugfs();
+
return 0;
}
+static void dss_shutdown(struct platform_device *pdev)
+{
+ struct omap_dss_device *dssdev = NULL;
+
+ DSSDBG("shutdown\n");
+
+ for_each_dss_dev(dssdev) {
+ if (!dssdev->driver)
+ continue;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ dssdev->driver->disable(dssdev);
+ }
+}
+
static int dss_runtime_suspend(struct device *dev)
{
dss_save_context();
@@ -1343,20 +1527,10 @@ static const struct dev_pm_ops dss_pm_ops = {
.runtime_resume = dss_runtime_resume,
};
-static const struct of_device_id dss_of_match[] = {
- { .compatible = "ti,omap2-dss", },
- { .compatible = "ti,omap3-dss", },
- { .compatible = "ti,omap4-dss", },
- { .compatible = "ti,omap5-dss", },
- { .compatible = "ti,dra7-dss", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, dss_of_match);
-
static struct platform_driver omap_dsshw_driver = {
.probe = dss_probe,
.remove = dss_remove,
+ .shutdown = dss_shutdown,
.driver = {
.name = "omapdss_dss",
.pm = &dss_pm_ops,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 8dbf35f3ab23..ed465572491e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -27,6 +27,9 @@
#include "omapdss.h"
+#define MAX_DSS_LCD_MANAGERS 3
+#define MAX_NUM_DSI 2
+
#ifdef pr_fmt
#undef pr_fmt
#endif
@@ -72,6 +75,14 @@
#define FLD_MOD(orig, val, start, end) \
(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
+enum dss_model {
+ DSS_MODEL_OMAP2,
+ DSS_MODEL_OMAP3,
+ DSS_MODEL_OMAP4,
+ DSS_MODEL_OMAP5,
+ DSS_MODEL_DRA7,
+};
+
enum dss_io_pad_mode {
DSS_IO_PAD_MODE_RESET,
DSS_IO_PAD_MODE_RFBI,
@@ -174,6 +185,9 @@ struct dss_pll_hw {
bool has_freqsel;
bool has_selfreqdco;
bool has_refsel;
+
+ /* DRA7 errata i886: use high N & M to avoid jitter */
+ bool errata_i886;
};
struct dss_pll {
@@ -192,6 +206,11 @@ struct dss_pll {
struct dss_pll_clock_info cinfo;
};
+/* Defines a generic omap register field */
+struct dss_reg_field {
+ u8 start, end;
+};
+
struct dispc_clock_info {
/* rates that we get with dividers below */
unsigned long lck;
@@ -219,10 +238,11 @@ struct seq_file;
struct platform_device;
/* core */
-int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
-void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
-int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
-int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
+static inline int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
+{
+ /* To be implemented when the OMAP platform will provide this feature */
+ return 0;
+}
static inline bool dss_mgr_is_lcd(enum omap_channel id)
{
@@ -234,6 +254,16 @@ static inline bool dss_mgr_is_lcd(enum omap_channel id)
}
/* DSS */
+#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
+#else
+static inline int dss_debugfs_create_file(const char *name,
+ void (*write)(struct seq_file *))
+{
+ return 0;
+}
+#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
+
int dss_init_platform_driver(void) __init;
void dss_uninit_platform_driver(void);
@@ -241,6 +271,8 @@ int dss_runtime_get(void);
void dss_runtime_put(void);
unsigned long dss_get_dispc_clk_rate(void);
+unsigned long dss_get_max_fck_rate(void);
+enum omap_dss_output_id dss_get_supported_outputs(enum omap_channel channel);
int dss_dpi_select_source(int port, enum omap_channel channel);
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
@@ -252,10 +284,6 @@ struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
struct regulator *regulator);
void dss_video_pll_uninit(struct dss_pll *pll);
-#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
-void dss_debug_dump_clocks(struct seq_file *s);
-#endif
-
void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable);
void dss_sdi_init(int datapairs);
@@ -312,11 +340,12 @@ void dsi_irq_handler(void);
/* DPI */
#ifdef CONFIG_OMAP2_DSS_DPI
-int dpi_init_port(struct platform_device *pdev, struct device_node *port);
+int dpi_init_port(struct platform_device *pdev, struct device_node *port,
+ enum dss_model dss_model);
void dpi_uninit_port(struct device_node *port);
#else
static inline int dpi_init_port(struct platform_device *pdev,
- struct device_node *port)
+ struct device_node *port, enum dss_model dss_model)
{
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.c b/drivers/gpu/drm/omapdrm/dss/dss_features.c
deleted file mode 100644
index 0e599710dd95..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.c
+++ /dev/null
@@ -1,905 +0,0 @@
-/*
- * linux/drivers/video/omap2/dss/dss_features.c
- *
- * Copyright (C) 2010 Texas Instruments
- * Author: Archit Taneja <archit@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <drm/drm_fourcc.h>
-
-#include "omapdss.h"
-#include "dss.h"
-#include "dss_features.h"
-
-/* Defines a generic omap register field */
-struct dss_reg_field {
- u8 start, end;
-};
-
-struct dss_param_range {
- int min, max;
-};
-
-struct omap_dss_features {
- const struct dss_reg_field *reg_fields;
- const int num_reg_fields;
-
- const enum dss_feat_id *features;
- const int num_features;
-
- const int num_mgrs;
- const int num_ovls;
- const enum omap_display_type *supported_displays;
- const enum omap_dss_output_id *supported_outputs;
- const u32 **supported_color_modes;
- const enum omap_overlay_caps *overlay_caps;
- const struct dss_param_range *dss_params;
-
- const u32 buffer_size_unit;
- const u32 burst_size_unit;
-};
-
-/* This struct is assigned to one of the below during initialization */
-static const struct omap_dss_features *omap_current_dss_features;
-
-static const struct dss_reg_field omap2_dss_reg_fields[] = {
- [FEAT_REG_FIRHINC] = { 11, 0 },
- [FEAT_REG_FIRVINC] = { 27, 16 },
- [FEAT_REG_FIFOLOWTHRESHOLD] = { 8, 0 },
- [FEAT_REG_FIFOHIGHTHRESHOLD] = { 24, 16 },
- [FEAT_REG_FIFOSIZE] = { 8, 0 },
- [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
- [FEAT_REG_VERTICALACCU] = { 25, 16 },
- [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 },
-};
-
-static const struct dss_reg_field omap3_dss_reg_fields[] = {
- [FEAT_REG_FIRHINC] = { 12, 0 },
- [FEAT_REG_FIRVINC] = { 28, 16 },
- [FEAT_REG_FIFOLOWTHRESHOLD] = { 11, 0 },
- [FEAT_REG_FIFOHIGHTHRESHOLD] = { 27, 16 },
- [FEAT_REG_FIFOSIZE] = { 10, 0 },
- [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
- [FEAT_REG_VERTICALACCU] = { 25, 16 },
- [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 },
-};
-
-static const struct dss_reg_field am43xx_dss_reg_fields[] = {
- [FEAT_REG_FIRHINC] = { 12, 0 },
- [FEAT_REG_FIRVINC] = { 28, 16 },
- [FEAT_REG_FIFOLOWTHRESHOLD] = { 11, 0 },
- [FEAT_REG_FIFOHIGHTHRESHOLD] = { 27, 16 },
- [FEAT_REG_FIFOSIZE] = { 10, 0 },
- [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
- [FEAT_REG_VERTICALACCU] = { 25, 16 },
- [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 },
-};
-
-static const struct dss_reg_field omap4_dss_reg_fields[] = {
- [FEAT_REG_FIRHINC] = { 12, 0 },
- [FEAT_REG_FIRVINC] = { 28, 16 },
- [FEAT_REG_FIFOLOWTHRESHOLD] = { 15, 0 },
- [FEAT_REG_FIFOHIGHTHRESHOLD] = { 31, 16 },
- [FEAT_REG_FIFOSIZE] = { 15, 0 },
- [FEAT_REG_HORIZONTALACCU] = { 10, 0 },
- [FEAT_REG_VERTICALACCU] = { 26, 16 },
- [FEAT_REG_DISPC_CLK_SWITCH] = { 9, 8 },
-};
-
-static const struct dss_reg_field omap5_dss_reg_fields[] = {
- [FEAT_REG_FIRHINC] = { 12, 0 },
- [FEAT_REG_FIRVINC] = { 28, 16 },
- [FEAT_REG_FIFOLOWTHRESHOLD] = { 15, 0 },
- [FEAT_REG_FIFOHIGHTHRESHOLD] = { 31, 16 },
- [FEAT_REG_FIFOSIZE] = { 15, 0 },
- [FEAT_REG_HORIZONTALACCU] = { 10, 0 },
- [FEAT_REG_VERTICALACCU] = { 26, 16 },
- [FEAT_REG_DISPC_CLK_SWITCH] = { 9, 7 },
-};
-
-static const enum omap_display_type omap2_dss_supported_displays[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DISPLAY_TYPE_VENC,
-};
-
-static const enum omap_display_type omap3430_dss_supported_displays[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
- OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DISPLAY_TYPE_VENC,
-};
-
-static const enum omap_display_type omap3630_dss_supported_displays[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
- OMAP_DISPLAY_TYPE_DSI,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DISPLAY_TYPE_VENC,
-};
-
-static const enum omap_display_type am43xx_dss_supported_displays[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI,
-};
-
-static const enum omap_display_type omap4_dss_supported_displays[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DISPLAY_TYPE_VENC | OMAP_DISPLAY_TYPE_HDMI,
-
- /* OMAP_DSS_CHANNEL_LCD2 */
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
- OMAP_DISPLAY_TYPE_DSI,
-};
-
-static const enum omap_display_type omap5_dss_supported_displays[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
- OMAP_DISPLAY_TYPE_DSI,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DISPLAY_TYPE_HDMI | OMAP_DISPLAY_TYPE_DPI,
-
- /* OMAP_DSS_CHANNEL_LCD2 */
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
- OMAP_DISPLAY_TYPE_DSI,
-};
-
-static const enum omap_dss_output_id omap2_dss_supported_outputs[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DSS_OUTPUT_VENC,
-};
-
-static const enum omap_dss_output_id omap3430_dss_supported_outputs[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
- OMAP_DSS_OUTPUT_SDI | OMAP_DSS_OUTPUT_DSI1,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DSS_OUTPUT_VENC,
-};
-
-static const enum omap_dss_output_id omap3630_dss_supported_outputs[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
- OMAP_DSS_OUTPUT_DSI1,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DSS_OUTPUT_VENC,
-};
-
-static const enum omap_dss_output_id am43xx_dss_supported_outputs[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI,
-};
-
-static const enum omap_dss_output_id omap4_dss_supported_outputs[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DSS_OUTPUT_DBI | OMAP_DSS_OUTPUT_DSI1,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI,
-
- /* OMAP_DSS_CHANNEL_LCD2 */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
- OMAP_DSS_OUTPUT_DSI2,
-};
-
-static const enum omap_dss_output_id omap5_dss_supported_outputs[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
- OMAP_DSS_OUTPUT_DSI1 | OMAP_DSS_OUTPUT_DSI2,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DSS_OUTPUT_HDMI,
-
- /* OMAP_DSS_CHANNEL_LCD2 */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
- OMAP_DSS_OUTPUT_DSI1,
-
- /* OMAP_DSS_CHANNEL_LCD3 */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
- OMAP_DSS_OUTPUT_DSI2,
-};
-
-#define COLOR_ARRAY(arr...) (const u32[]) { arr, 0 }
-
-static const u32 *omap2_dss_supported_color_modes[] = {
-
- /* OMAP_DSS_GFX */
- COLOR_ARRAY(
- DRM_FORMAT_RGBX4444, DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB888),
-
- /* OMAP_DSS_VIDEO1 */
- COLOR_ARRAY(
- DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
- DRM_FORMAT_UYVY),
-
- /* OMAP_DSS_VIDEO2 */
- COLOR_ARRAY(
- DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
- DRM_FORMAT_UYVY),
-};
-
-static const u32 *omap3_dss_supported_color_modes[] = {
- /* OMAP_DSS_GFX */
- COLOR_ARRAY(
- DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
- DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888),
-
- /* OMAP_DSS_VIDEO1 */
- COLOR_ARRAY(
- DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB888,
- DRM_FORMAT_RGBX4444, DRM_FORMAT_RGB565,
- DRM_FORMAT_YUYV, DRM_FORMAT_UYVY),
-
- /* OMAP_DSS_VIDEO2 */
- COLOR_ARRAY(
- DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
- DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
- DRM_FORMAT_UYVY, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888),
-};
-
-static const u32 *omap4_dss_supported_color_modes[] = {
- /* OMAP_DSS_GFX */
- COLOR_ARRAY(
- DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
- DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888,
- DRM_FORMAT_ARGB1555, DRM_FORMAT_XRGB4444,
- DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB1555),
-
- /* OMAP_DSS_VIDEO1 */
- COLOR_ARRAY(
- DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
- DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
- DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
- DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
- DRM_FORMAT_RGBX8888),
-
- /* OMAP_DSS_VIDEO2 */
- COLOR_ARRAY(
- DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
- DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
- DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
- DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
- DRM_FORMAT_RGBX8888),
-
- /* OMAP_DSS_VIDEO3 */
- COLOR_ARRAY(
- DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
- DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
- DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
- DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
- DRM_FORMAT_RGBX8888),
-
- /* OMAP_DSS_WB */
- COLOR_ARRAY(
- DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
- DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
- DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
- DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
- DRM_FORMAT_RGBX8888),
-};
-
-static const enum omap_overlay_caps omap2_dss_overlay_caps[] = {
- /* OMAP_DSS_GFX */
- OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO1 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
- OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO2 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
- OMAP_DSS_OVL_CAP_REPLICATION,
-};
-
-static const enum omap_overlay_caps omap3430_dss_overlay_caps[] = {
- /* OMAP_DSS_GFX */
- OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_POS |
- OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO1 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
- OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO2 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
- OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
-};
-
-static const enum omap_overlay_caps omap3630_dss_overlay_caps[] = {
- /* OMAP_DSS_GFX */
- OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA |
- OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO1 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
- OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO2 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
- OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_POS |
- OMAP_DSS_OVL_CAP_REPLICATION,
-};
-
-static const enum omap_overlay_caps omap4_dss_overlay_caps[] = {
- /* OMAP_DSS_GFX */
- OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA |
- OMAP_DSS_OVL_CAP_ZORDER | OMAP_DSS_OVL_CAP_POS |
- OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO1 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
- OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
- OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO2 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
- OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
- OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO3 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
- OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
- OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
-};
-
-static const struct dss_param_range omap2_dss_param_range[] = {
- [FEAT_PARAM_DSS_FCK] = { 0, 133000000 },
- [FEAT_PARAM_DSS_PCD] = { 2, 255 },
- [FEAT_PARAM_DOWNSCALE] = { 1, 2 },
- /*
- * Assuming the line width buffer to be 768 pixels as OMAP2 DISPC
- * scaler cannot scale a image with width more than 768.
- */
- [FEAT_PARAM_LINEWIDTH] = { 1, 768 },
-};
-
-static const struct dss_param_range omap3_dss_param_range[] = {
- [FEAT_PARAM_DSS_FCK] = { 0, 173000000 },
- [FEAT_PARAM_DSS_PCD] = { 1, 255 },
- [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1},
- [FEAT_PARAM_DSI_FCK] = { 0, 173000000 },
- [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
- [FEAT_PARAM_LINEWIDTH] = { 1, 1024 },
-};
-
-static const struct dss_param_range am43xx_dss_param_range[] = {
- [FEAT_PARAM_DSS_FCK] = { 0, 200000000 },
- [FEAT_PARAM_DSS_PCD] = { 1, 255 },
- [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
- [FEAT_PARAM_LINEWIDTH] = { 1, 1024 },
-};
-
-static const struct dss_param_range omap4_dss_param_range[] = {
- [FEAT_PARAM_DSS_FCK] = { 0, 186000000 },
- [FEAT_PARAM_DSS_PCD] = { 1, 255 },
- [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
- [FEAT_PARAM_DSI_FCK] = { 0, 170000000 },
- [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
- [FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
-};
-
-static const struct dss_param_range omap5_dss_param_range[] = {
- [FEAT_PARAM_DSS_FCK] = { 0, 209250000 },
- [FEAT_PARAM_DSS_PCD] = { 1, 255 },
- [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
- [FEAT_PARAM_DSI_FCK] = { 0, 209250000 },
- [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
- [FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
-};
-
-static const enum dss_feat_id omap2_dss_feat_list[] = {
- FEAT_LCDENABLEPOL,
- FEAT_LCDENABLESIGNAL,
- FEAT_PCKFREEENABLE,
- FEAT_FUNCGATED,
- FEAT_ROWREPEATENABLE,
- FEAT_RESIZECONF,
-};
-
-static const enum dss_feat_id omap3430_dss_feat_list[] = {
- FEAT_LCDENABLEPOL,
- FEAT_LCDENABLESIGNAL,
- FEAT_PCKFREEENABLE,
- FEAT_FUNCGATED,
- FEAT_LINEBUFFERSPLIT,
- FEAT_ROWREPEATENABLE,
- FEAT_RESIZECONF,
- FEAT_DSI_REVERSE_TXCLKESC,
- FEAT_VENC_REQUIRES_TV_DAC_CLK,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FIXED_ZORDER,
- FEAT_FIFO_MERGE,
- FEAT_OMAP3_DSI_FIFO_BUG,
- FEAT_DPI_USES_VDDS_DSI,
-};
-
-static const enum dss_feat_id am35xx_dss_feat_list[] = {
- FEAT_LCDENABLEPOL,
- FEAT_LCDENABLESIGNAL,
- FEAT_PCKFREEENABLE,
- FEAT_FUNCGATED,
- FEAT_LINEBUFFERSPLIT,
- FEAT_ROWREPEATENABLE,
- FEAT_RESIZECONF,
- FEAT_DSI_REVERSE_TXCLKESC,
- FEAT_VENC_REQUIRES_TV_DAC_CLK,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FIXED_ZORDER,
- FEAT_FIFO_MERGE,
- FEAT_OMAP3_DSI_FIFO_BUG,
-};
-
-static const enum dss_feat_id am43xx_dss_feat_list[] = {
- FEAT_LCDENABLEPOL,
- FEAT_LCDENABLESIGNAL,
- FEAT_PCKFREEENABLE,
- FEAT_FUNCGATED,
- FEAT_LINEBUFFERSPLIT,
- FEAT_ROWREPEATENABLE,
- FEAT_RESIZECONF,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FIXED_ZORDER,
- FEAT_FIFO_MERGE,
-};
-
-static const enum dss_feat_id omap3630_dss_feat_list[] = {
- FEAT_LCDENABLEPOL,
- FEAT_LCDENABLESIGNAL,
- FEAT_PCKFREEENABLE,
- FEAT_FUNCGATED,
- FEAT_LINEBUFFERSPLIT,
- FEAT_ROWREPEATENABLE,
- FEAT_RESIZECONF,
- FEAT_DSI_PLL_PWR_BUG,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FIXED_ZORDER,
- FEAT_FIFO_MERGE,
- FEAT_OMAP3_DSI_FIFO_BUG,
- FEAT_DPI_USES_VDDS_DSI,
-};
-
-static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = {
- FEAT_MGR_LCD2,
- FEAT_CORE_CLK_DIV,
- FEAT_LCD_CLK_SRC,
- FEAT_DSI_DCS_CMD_CONFIG_VC,
- FEAT_DSI_VC_OCP_WIDTH,
- FEAT_DSI_GNQ,
- FEAT_HANDLE_UV_SEPARATE,
- FEAT_ATTR2,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FREE_ZORDER,
- FEAT_FIFO_MERGE,
- FEAT_BURST_2D,
-};
-
-static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
- FEAT_MGR_LCD2,
- FEAT_CORE_CLK_DIV,
- FEAT_LCD_CLK_SRC,
- FEAT_DSI_DCS_CMD_CONFIG_VC,
- FEAT_DSI_VC_OCP_WIDTH,
- FEAT_DSI_GNQ,
- FEAT_HDMI_CTS_SWMODE,
- FEAT_HANDLE_UV_SEPARATE,
- FEAT_ATTR2,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FREE_ZORDER,
- FEAT_FIFO_MERGE,
- FEAT_BURST_2D,
-};
-
-static const enum dss_feat_id omap4_dss_feat_list[] = {
- FEAT_MGR_LCD2,
- FEAT_CORE_CLK_DIV,
- FEAT_LCD_CLK_SRC,
- FEAT_DSI_DCS_CMD_CONFIG_VC,
- FEAT_DSI_VC_OCP_WIDTH,
- FEAT_DSI_GNQ,
- FEAT_HDMI_CTS_SWMODE,
- FEAT_HDMI_AUDIO_USE_MCLK,
- FEAT_HANDLE_UV_SEPARATE,
- FEAT_ATTR2,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FREE_ZORDER,
- FEAT_FIFO_MERGE,
- FEAT_BURST_2D,
-};
-
-static const enum dss_feat_id omap5_dss_feat_list[] = {
- FEAT_MGR_LCD2,
- FEAT_MGR_LCD3,
- FEAT_CORE_CLK_DIV,
- FEAT_LCD_CLK_SRC,
- FEAT_DSI_DCS_CMD_CONFIG_VC,
- FEAT_DSI_VC_OCP_WIDTH,
- FEAT_DSI_GNQ,
- FEAT_HDMI_CTS_SWMODE,
- FEAT_HDMI_AUDIO_USE_MCLK,
- FEAT_HANDLE_UV_SEPARATE,
- FEAT_ATTR2,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FREE_ZORDER,
- FEAT_FIFO_MERGE,
- FEAT_BURST_2D,
- FEAT_DSI_PHY_DCC,
- FEAT_MFLAG,
-};
-
-/* OMAP2 DSS Features */
-static const struct omap_dss_features omap2_dss_features = {
- .reg_fields = omap2_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap2_dss_reg_fields),
-
- .features = omap2_dss_feat_list,
- .num_features = ARRAY_SIZE(omap2_dss_feat_list),
-
- .num_mgrs = 2,
- .num_ovls = 3,
- .supported_displays = omap2_dss_supported_displays,
- .supported_outputs = omap2_dss_supported_outputs,
- .supported_color_modes = omap2_dss_supported_color_modes,
- .overlay_caps = omap2_dss_overlay_caps,
- .dss_params = omap2_dss_param_range,
- .buffer_size_unit = 1,
- .burst_size_unit = 8,
-};
-
-/* OMAP3 DSS Features */
-static const struct omap_dss_features omap3430_dss_features = {
- .reg_fields = omap3_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
-
- .features = omap3430_dss_feat_list,
- .num_features = ARRAY_SIZE(omap3430_dss_feat_list),
-
- .num_mgrs = 2,
- .num_ovls = 3,
- .supported_displays = omap3430_dss_supported_displays,
- .supported_outputs = omap3430_dss_supported_outputs,
- .supported_color_modes = omap3_dss_supported_color_modes,
- .overlay_caps = omap3430_dss_overlay_caps,
- .dss_params = omap3_dss_param_range,
- .buffer_size_unit = 1,
- .burst_size_unit = 8,
-};
-
-/*
- * AM35xx DSS Features. This is basically OMAP3 DSS Features without the
- * vdds_dsi regulator.
- */
-static const struct omap_dss_features am35xx_dss_features = {
- .reg_fields = omap3_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
-
- .features = am35xx_dss_feat_list,
- .num_features = ARRAY_SIZE(am35xx_dss_feat_list),
-
- .num_mgrs = 2,
- .num_ovls = 3,
- .supported_displays = omap3430_dss_supported_displays,
- .supported_outputs = omap3430_dss_supported_outputs,
- .supported_color_modes = omap3_dss_supported_color_modes,
- .overlay_caps = omap3430_dss_overlay_caps,
- .dss_params = omap3_dss_param_range,
- .buffer_size_unit = 1,
- .burst_size_unit = 8,
-};
-
-static const struct omap_dss_features am43xx_dss_features = {
- .reg_fields = am43xx_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(am43xx_dss_reg_fields),
-
- .features = am43xx_dss_feat_list,
- .num_features = ARRAY_SIZE(am43xx_dss_feat_list),
-
- .num_mgrs = 1,
- .num_ovls = 3,
- .supported_displays = am43xx_dss_supported_displays,
- .supported_outputs = am43xx_dss_supported_outputs,
- .supported_color_modes = omap3_dss_supported_color_modes,
- .overlay_caps = omap3430_dss_overlay_caps,
- .dss_params = am43xx_dss_param_range,
- .buffer_size_unit = 1,
- .burst_size_unit = 8,
-};
-
-static const struct omap_dss_features omap3630_dss_features = {
- .reg_fields = omap3_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
-
- .features = omap3630_dss_feat_list,
- .num_features = ARRAY_SIZE(omap3630_dss_feat_list),
-
- .num_mgrs = 2,
- .num_ovls = 3,
- .supported_displays = omap3630_dss_supported_displays,
- .supported_outputs = omap3630_dss_supported_outputs,
- .supported_color_modes = omap3_dss_supported_color_modes,
- .overlay_caps = omap3630_dss_overlay_caps,
- .dss_params = omap3_dss_param_range,
- .buffer_size_unit = 1,
- .burst_size_unit = 8,
-};
-
-/* OMAP4 DSS Features */
-/* For OMAP4430 ES 1.0 revision */
-static const struct omap_dss_features omap4430_es1_0_dss_features = {
- .reg_fields = omap4_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields),
-
- .features = omap4430_es1_0_dss_feat_list,
- .num_features = ARRAY_SIZE(omap4430_es1_0_dss_feat_list),
-
- .num_mgrs = 3,
- .num_ovls = 4,
- .supported_displays = omap4_dss_supported_displays,
- .supported_outputs = omap4_dss_supported_outputs,
- .supported_color_modes = omap4_dss_supported_color_modes,
- .overlay_caps = omap4_dss_overlay_caps,
- .dss_params = omap4_dss_param_range,
- .buffer_size_unit = 16,
- .burst_size_unit = 16,
-};
-
-/* For OMAP4430 ES 2.0, 2.1 and 2.2 revisions */
-static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
- .reg_fields = omap4_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields),
-
- .features = omap4430_es2_0_1_2_dss_feat_list,
- .num_features = ARRAY_SIZE(omap4430_es2_0_1_2_dss_feat_list),
-
- .num_mgrs = 3,
- .num_ovls = 4,
- .supported_displays = omap4_dss_supported_displays,
- .supported_outputs = omap4_dss_supported_outputs,
- .supported_color_modes = omap4_dss_supported_color_modes,
- .overlay_caps = omap4_dss_overlay_caps,
- .dss_params = omap4_dss_param_range,
- .buffer_size_unit = 16,
- .burst_size_unit = 16,
-};
-
-/* For all the other OMAP4 versions */
-static const struct omap_dss_features omap4_dss_features = {
- .reg_fields = omap4_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields),
-
- .features = omap4_dss_feat_list,
- .num_features = ARRAY_SIZE(omap4_dss_feat_list),
-
- .num_mgrs = 3,
- .num_ovls = 4,
- .supported_displays = omap4_dss_supported_displays,
- .supported_outputs = omap4_dss_supported_outputs,
- .supported_color_modes = omap4_dss_supported_color_modes,
- .overlay_caps = omap4_dss_overlay_caps,
- .dss_params = omap4_dss_param_range,
- .buffer_size_unit = 16,
- .burst_size_unit = 16,
-};
-
-/* OMAP5 DSS Features */
-static const struct omap_dss_features omap5_dss_features = {
- .reg_fields = omap5_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap5_dss_reg_fields),
-
- .features = omap5_dss_feat_list,
- .num_features = ARRAY_SIZE(omap5_dss_feat_list),
-
- .num_mgrs = 4,
- .num_ovls = 4,
- .supported_displays = omap5_dss_supported_displays,
- .supported_outputs = omap5_dss_supported_outputs,
- .supported_color_modes = omap4_dss_supported_color_modes,
- .overlay_caps = omap4_dss_overlay_caps,
- .dss_params = omap5_dss_param_range,
- .buffer_size_unit = 16,
- .burst_size_unit = 16,
-};
-
-/* Functions returning values related to a DSS feature */
-int dss_feat_get_num_mgrs(void)
-{
- return omap_current_dss_features->num_mgrs;
-}
-
-int dss_feat_get_num_ovls(void)
-{
- return omap_current_dss_features->num_ovls;
-}
-
-unsigned long dss_feat_get_param_min(enum dss_range_param param)
-{
- return omap_current_dss_features->dss_params[param].min;
-}
-
-unsigned long dss_feat_get_param_max(enum dss_range_param param)
-{
- return omap_current_dss_features->dss_params[param].max;
-}
-
-enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel)
-{
- return omap_current_dss_features->supported_displays[channel];
-}
-
-enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel)
-{
- return omap_current_dss_features->supported_outputs[channel];
-}
-
-const u32 *dss_feat_get_supported_color_modes(enum omap_plane_id plane)
-{
- return omap_current_dss_features->supported_color_modes[plane];
-}
-
-enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane_id plane)
-{
- return omap_current_dss_features->overlay_caps[plane];
-}
-
-bool dss_feat_color_mode_supported(enum omap_plane_id plane, u32 fourcc)
-{
- const u32 *modes;
- unsigned int i;
-
- modes = omap_current_dss_features->supported_color_modes[plane];
-
- for (i = 0; modes[i]; ++i) {
- if (modes[i] == fourcc)
- return true;
- }
-
- return false;
-}
-
-u32 dss_feat_get_buffer_size_unit(void)
-{
- return omap_current_dss_features->buffer_size_unit;
-}
-
-u32 dss_feat_get_burst_size_unit(void)
-{
- return omap_current_dss_features->burst_size_unit;
-}
-
-/* DSS has_feature check */
-bool dss_has_feature(enum dss_feat_id id)
-{
- int i;
- const enum dss_feat_id *features = omap_current_dss_features->features;
- const int num_features = omap_current_dss_features->num_features;
-
- for (i = 0; i < num_features; i++) {
- if (features[i] == id)
- return true;
- }
-
- return false;
-}
-
-void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end)
-{
- if (id >= omap_current_dss_features->num_reg_fields)
- BUG();
-
- *start = omap_current_dss_features->reg_fields[id].start;
- *end = omap_current_dss_features->reg_fields[id].end;
-}
-
-void dss_features_init(enum omapdss_version version)
-{
- switch (version) {
- case OMAPDSS_VER_OMAP24xx:
- omap_current_dss_features = &omap2_dss_features;
- break;
-
- case OMAPDSS_VER_OMAP34xx_ES1:
- case OMAPDSS_VER_OMAP34xx_ES3:
- omap_current_dss_features = &omap3430_dss_features;
- break;
-
- case OMAPDSS_VER_OMAP3630:
- omap_current_dss_features = &omap3630_dss_features;
- break;
-
- case OMAPDSS_VER_OMAP4430_ES1:
- omap_current_dss_features = &omap4430_es1_0_dss_features;
- break;
-
- case OMAPDSS_VER_OMAP4430_ES2:
- omap_current_dss_features = &omap4430_es2_0_1_2_dss_features;
- break;
-
- case OMAPDSS_VER_OMAP4:
- omap_current_dss_features = &omap4_dss_features;
- break;
-
- case OMAPDSS_VER_OMAP5:
- case OMAPDSS_VER_DRA7xx:
- omap_current_dss_features = &omap5_dss_features;
- break;
-
- case OMAPDSS_VER_AM35xx:
- omap_current_dss_features = &am35xx_dss_features;
- break;
-
- case OMAPDSS_VER_AM43xx:
- omap_current_dss_features = &am43xx_dss_features;
- break;
-
- default:
- DSSWARN("Unsupported OMAP version");
- break;
- }
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.h b/drivers/gpu/drm/omapdrm/dss/dss_features.h
deleted file mode 100644
index c36436d27ff5..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * linux/drivers/video/omap2/dss/dss_features.h
- *
- * Copyright (C) 2010 Texas Instruments
- * Author: Archit Taneja <archit@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __OMAP2_DSS_FEATURES_H
-#define __OMAP2_DSS_FEATURES_H
-
-#define MAX_DSS_MANAGERS 4
-#define MAX_DSS_OVERLAYS 4
-#define MAX_DSS_LCD_MANAGERS 3
-#define MAX_NUM_DSI 2
-
-/* DSS has feature id */
-enum dss_feat_id {
- FEAT_LCDENABLEPOL,
- FEAT_LCDENABLESIGNAL,
- FEAT_PCKFREEENABLE,
- FEAT_FUNCGATED,
- FEAT_MGR_LCD2,
- FEAT_MGR_LCD3,
- FEAT_LINEBUFFERSPLIT,
- FEAT_ROWREPEATENABLE,
- FEAT_RESIZECONF,
- /* Independent core clk divider */
- FEAT_CORE_CLK_DIV,
- FEAT_LCD_CLK_SRC,
- /* DSI-PLL power command 0x3 is not working */
- FEAT_DSI_PLL_PWR_BUG,
- FEAT_DSI_DCS_CMD_CONFIG_VC,
- FEAT_DSI_VC_OCP_WIDTH,
- FEAT_DSI_REVERSE_TXCLKESC,
- FEAT_DSI_GNQ,
- FEAT_DPI_USES_VDDS_DSI,
- FEAT_HDMI_CTS_SWMODE,
- FEAT_HDMI_AUDIO_USE_MCLK,
- FEAT_HANDLE_UV_SEPARATE,
- FEAT_ATTR2,
- FEAT_VENC_REQUIRES_TV_DAC_CLK,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FIXED_ZORDER,
- FEAT_ALPHA_FREE_ZORDER,
- FEAT_FIFO_MERGE,
- /* An unknown HW bug causing the normal FIFO thresholds not to work */
- FEAT_OMAP3_DSI_FIFO_BUG,
- FEAT_BURST_2D,
- FEAT_DSI_PHY_DCC,
- FEAT_MFLAG,
-};
-
-/* DSS register field id */
-enum dss_feat_reg_field {
- FEAT_REG_FIRHINC,
- FEAT_REG_FIRVINC,
- FEAT_REG_FIFOHIGHTHRESHOLD,
- FEAT_REG_FIFOLOWTHRESHOLD,
- FEAT_REG_FIFOSIZE,
- FEAT_REG_HORIZONTALACCU,
- FEAT_REG_VERTICALACCU,
- FEAT_REG_DISPC_CLK_SWITCH,
-};
-
-enum dss_range_param {
- FEAT_PARAM_DSS_FCK,
- FEAT_PARAM_DSS_PCD,
- FEAT_PARAM_DSIPLL_LPDIV,
- FEAT_PARAM_DSI_FCK,
- FEAT_PARAM_DOWNSCALE,
- FEAT_PARAM_LINEWIDTH,
-};
-
-/* DSS Feature Functions */
-unsigned long dss_feat_get_param_min(enum dss_range_param param);
-unsigned long dss_feat_get_param_max(enum dss_range_param param);
-enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane_id plane);
-bool dss_feat_color_mode_supported(enum omap_plane_id plane,
- u32 fourcc);
-
-u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
-u32 dss_feat_get_burst_size_unit(void); /* in bytes */
-
-bool dss_has_feature(enum dss_feat_id id);
-void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
-void dss_features_init(enum omapdss_version version);
-
-enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel);
-enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel);
-
-int dss_feat_get_num_mgrs(void);
-int dss_feat_get_num_ovls(void);
-const u32 *dss_feat_get_supported_color_modes(enum omap_plane_id plane);
-
-#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index fb6cccd02374..a820b394af09 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -234,6 +234,7 @@ struct hdmi_core_audio_config {
struct hdmi_wp_data {
void __iomem *base;
phys_addr_t phys_base;
+ unsigned int version;
};
struct hdmi_pll_data {
@@ -245,15 +246,24 @@ struct hdmi_pll_data {
struct hdmi_wp_data *wp;
};
+struct hdmi_phy_features {
+ bool bist_ctrl;
+ bool ldo_voltage;
+ unsigned long max_phy;
+};
+
struct hdmi_phy_data {
void __iomem *base;
+ const struct hdmi_phy_features *features;
u8 lane_function[4];
u8 lane_polarity[4];
};
struct hdmi_core_data {
void __iomem *base;
+ bool cts_swmode;
+ bool audio_use_mclk;
};
static inline void hdmi_write_reg(void __iomem *base_addr, const u32 idx,
@@ -303,7 +313,8 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
struct videomode *vm);
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
struct videomode *vm, struct hdmi_config *param);
-int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp);
+int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp,
+ unsigned int version);
phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
/* HDMI PLL funcs */
@@ -316,7 +327,8 @@ void hdmi_pll_uninit(struct hdmi_pll_data *hpll);
int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
unsigned long lfbitclk);
void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s);
-int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy);
+int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy,
+ unsigned int version);
int hdmi_phy_parse_lanes(struct hdmi_phy_data *phy, const u32 *lanes);
/* HDMI common funcs */
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 284b4942b9ac..f169348da377 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -40,7 +40,6 @@
#include "omapdss.h"
#include "hdmi4_core.h"
#include "dss.h"
-#include "dss_features.h"
#include "hdmi.h"
static struct omap_hdmi hdmi;
@@ -668,7 +667,7 @@ static int hdmi_audio_register(struct device *dev)
{
struct omap_hdmi_audio_pdata pdata = {
.dev = dev,
- .dss_version = omapdss_get_version(),
+ .version = 4,
.audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp),
.ops = &hdmi_audio_ops,
};
@@ -700,7 +699,7 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
if (r)
return r;
- r = hdmi_wp_init(pdev, &hdmi.wp);
+ r = hdmi_wp_init(pdev, &hdmi.wp, 4);
if (r)
return r;
@@ -708,7 +707,7 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
if (r)
return r;
- r = hdmi_phy_init(pdev, &hdmi.phy);
+ r = hdmi_phy_init(pdev, &hdmi.phy, 4);
if (r)
goto err;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index ed6001613405..365cf07daa01 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -31,11 +31,11 @@
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/seq_file.h>
+#include <linux/sys_soc.h>
#include <sound/asound.h>
#include <sound/asoundef.h>
#include "hdmi4_core.h"
-#include "dss_features.h"
#define HDMI_CORE_AV 0x500
@@ -757,10 +757,10 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
/* Audio clock regeneration settings */
acore.n = n;
acore.cts = cts;
- if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
+ if (core->cts_swmode) {
acore.aud_par_busclk = 0;
acore.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
- acore.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
+ acore.use_mclk = core->audio_use_mclk;
} else {
acore.aud_par_busclk = (((128 * 31) - 1) << 8);
acore.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
@@ -884,10 +884,42 @@ void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp)
hdmi_wp_audio_core_req_enable(wp, false);
}
+struct hdmi4_features {
+ bool cts_swmode;
+ bool audio_use_mclk;
+};
+
+static const struct hdmi4_features hdmi4_es1_features = {
+ .cts_swmode = false,
+ .audio_use_mclk = false,
+};
+
+static const struct hdmi4_features hdmi4_es2_features = {
+ .cts_swmode = true,
+ .audio_use_mclk = false,
+};
+
+static const struct hdmi4_features hdmi4_es3_features = {
+ .cts_swmode = true,
+ .audio_use_mclk = true,
+};
+
+static const struct soc_device_attribute hdmi4_soc_devices[] = {
+ { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features },
+ { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features },
+ { .family = "OMAP4", .data = &hdmi4_es3_features },
+ { /* sentinel */ }
+};
+
int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
{
+ const struct hdmi4_features *features;
struct resource *res;
+ features = soc_device_match(hdmi4_soc_devices)->data;
+ core->cts_swmode = features->cts_swmode;
+ core->audio_use_mclk = features->audio_use_mclk;
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
core->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(core->base))
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 441e1999d86a..b3221ca5bcd8 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -45,7 +45,6 @@
#include "omapdss.h"
#include "hdmi5_core.h"
#include "dss.h"
-#include "dss_features.h"
static struct omap_hdmi hdmi;
@@ -695,7 +694,7 @@ static int hdmi_audio_register(struct device *dev)
{
struct omap_hdmi_audio_pdata pdata = {
.dev = dev,
- .dss_version = omapdss_get_version(),
+ .version = 5,
.audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp),
.ops = &hdmi_audio_ops,
};
@@ -732,7 +731,7 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data)
if (r)
return r;
- r = hdmi_wp_init(pdev, &hdmi.wp);
+ r = hdmi_wp_init(pdev, &hdmi.wp, 5);
if (r)
return r;
@@ -740,7 +739,7 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data)
if (r)
return r;
- r = hdmi_phy_init(pdev, &hdmi.phy);
+ r = hdmi_phy_init(pdev, &hdmi.phy, 5);
if (r)
goto err;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
index fb5e4c724b4b..a156292b1820 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
@@ -19,14 +19,6 @@
#include "dss.h"
#include "hdmi.h"
-struct hdmi_phy_features {
- bool bist_ctrl;
- bool ldo_voltage;
- unsigned long max_phy;
-};
-
-static const struct hdmi_phy_features *phy_feat;
-
void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s)
{
#define DUMPPHY(r) seq_printf(s, "%-35s %08x\n", #r,\
@@ -36,7 +28,7 @@ void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s)
DUMPPHY(HDMI_TXPHY_DIGITAL_CTRL);
DUMPPHY(HDMI_TXPHY_POWER_CTRL);
DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
- if (phy_feat->bist_ctrl)
+ if (phy->features->bist_ctrl)
DUMPPHY(HDMI_TXPHY_BIST_CONTROL);
}
@@ -146,7 +138,7 @@ int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
* In OMAP5+, the HFBITCLK must be divided by 2 before issuing the
* HDMI_PHYPWRCMD_LDOON command.
*/
- if (phy_feat->bist_ctrl)
+ if (phy->features->bist_ctrl)
REG_FLD_MOD(phy->base, HDMI_TXPHY_BIST_CONTROL, 1, 11, 11);
/*
@@ -155,7 +147,7 @@ int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
*/
if (hfbitclk != lfbitclk)
freqout = 0;
- else if (hfbitclk / 10 < phy_feat->max_phy)
+ else if (hfbitclk / 10 < phy->features->max_phy)
freqout = 1;
else
freqout = 2;
@@ -170,7 +162,7 @@ int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
hdmi_write_reg(phy->base, HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
/* Setup max LDO voltage */
- if (phy_feat->ldo_voltage)
+ if (phy->features->ldo_voltage)
REG_FLD_MOD(phy->base, HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
hdmi_phy_configure_lanes(phy);
@@ -190,47 +182,15 @@ static const struct hdmi_phy_features omap54xx_phy_feats = {
.max_phy = 186000000,
};
-static int hdmi_phy_init_features(struct platform_device *pdev)
-{
- struct hdmi_phy_features *dst;
- const struct hdmi_phy_features *src;
-
- dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
- if (!dst) {
- dev_err(&pdev->dev, "Failed to allocate HDMI PHY Features\n");
- return -ENOMEM;
- }
-
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
- src = &omap44xx_phy_feats;
- break;
-
- case OMAPDSS_VER_OMAP5:
- case OMAPDSS_VER_DRA7xx:
- src = &omap54xx_phy_feats;
- break;
-
- default:
- return -ENODEV;
- }
-
- memcpy(dst, src, sizeof(*dst));
- phy_feat = dst;
-
- return 0;
-}
-
-int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy)
+int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy,
+ unsigned int version)
{
- int r;
struct resource *res;
- r = hdmi_phy_init_features(pdev);
- if (r)
- return r;
+ if (version == 4)
+ phy->features = &omap44xx_phy_feats;
+ else
+ phy->features = &omap54xx_phy_feats;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
phy->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
index 46239358655a..55bee81f4dd5 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
@@ -71,7 +71,7 @@ static void hdmi_pll_disable(struct dss_pll *dsspll)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static const struct dss_pll_ops dsi_pll_ops = {
+static const struct dss_pll_ops hdmi_pll_ops = {
.enable = hdmi_pll_enable,
.disable = hdmi_pll_disable,
.set_config = dss_pll_write_config_type_b,
@@ -128,7 +128,8 @@ static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
.has_refsel = true,
};
-static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data *hpll)
+static int hdmi_init_pll_data(struct platform_device *pdev,
+ struct hdmi_pll_data *hpll)
{
struct dss_pll *pll = &hpll->pll;
struct clk *clk;
@@ -145,23 +146,12 @@ static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data
pll->base = hpll->base;
pll->clkin = clk;
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
+ if (hpll->wp->version == 4)
pll->hw = &dss_omap4_hdmi_pll_hw;
- break;
-
- case OMAPDSS_VER_OMAP5:
- case OMAPDSS_VER_DRA7xx:
+ else
pll->hw = &dss_omap5_hdmi_pll_hw;
- break;
-
- default:
- return -ENODEV;
- }
- pll->ops = &dsi_pll_ops;
+ pll->ops = &hdmi_pll_ops;
r = dss_pll_register(pll);
if (r)
@@ -184,7 +174,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
if (IS_ERR(pll->base))
return PTR_ERR(pll->base);
- r = dsi_init_pll_data(pdev, pll);
+ r = hdmi_init_pll_data(pdev, pll);
if (r) {
DSSERR("failed to init HDMI PLL\n");
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index ab129df2e310..88034fbe0e9f 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -178,9 +178,7 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
* However, we don't support OMAP5 ES1 at all, so we can just check for
* OMAP4 here.
*/
- if (omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES1 ||
- omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES2 ||
- omapdss_get_version() == OMAPDSS_VER_OMAP4)
+ if (wp->version == 4)
hsync_len_offset = 0;
timing_h |= FLD_VAL(vm->hback_porch, 31, 20);
@@ -235,9 +233,7 @@ void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
DSSDBG("Enter hdmi_wp_audio_config_format\n");
r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG);
- if (omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES1 ||
- omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES2 ||
- omapdss_get_version() == OMAPDSS_VER_OMAP4) {
+ if (wp->version == 4) {
r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
}
@@ -282,7 +278,8 @@ int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable)
return 0;
}
-int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
+int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp,
+ unsigned int version)
{
struct resource *res;
@@ -292,6 +289,7 @@ int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
return PTR_ERR(wp->base);
wp->phys_base = res->start;
+ wp->version = version;
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 85953a0bc7c2..47a331670963 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -25,6 +25,7 @@
#include <video/videomode.h>
#include <linux/platform_data/omapdss.h>
#include <uapi/drm/drm_mode.h>
+#include <drm/drm_crtc.h>
#define DISPC_IRQ_FRAMEDONE (1 << 0)
#define DISPC_IRQ_VSYNC (1 << 1)
@@ -241,13 +242,6 @@ struct omap_dss_dsi_config {
enum omap_dss_dsi_trans_mode trans_mode;
};
-/* Hardcoded videomodes for tv. Venc only uses these to
- * identify the mode, and does not actually use the configs
- * itself. However, the configs should be something that
- * a normal monitor can also show */
-extern const struct videomode omap_dss_pal_vm;
-extern const struct videomode omap_dss_ntsc_vm;
-
struct omap_dss_cpr_coefs {
s16 rr, rg, rb;
s16 gr, gg, gb;
@@ -403,6 +397,14 @@ struct omapdss_hdmi_ops {
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
bool (*detect)(struct omap_dss_device *dssdev);
+ int (*register_hpd_cb)(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
+ enum drm_connector_status status),
+ void *cb_data);
+ void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
+ void (*enable_hpd)(struct omap_dss_device *dssdev);
+ void (*disable_hpd)(struct omap_dss_device *dssdev);
+
int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
int (*set_infoframe)(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi);
@@ -567,12 +569,19 @@ struct omap_dss_driver {
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
bool (*detect)(struct omap_dss_device *dssdev);
+ int (*register_hpd_cb)(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
+ enum drm_connector_status status),
+ void *cb_data);
+ void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
+ void (*enable_hpd)(struct omap_dss_device *dssdev);
+ void (*disable_hpd)(struct omap_dss_device *dssdev);
+
int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi);
};
-enum omapdss_version omapdss_get_version(void);
bool omapdss_is_initialized(void);
int omap_dss_register_driver(struct omap_dss_driver *);
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index 5e221302768b..9d9d9d42009b 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -215,8 +215,8 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
dss_pll_calc_func func, void *data)
{
const struct dss_pll_hw *hw = pll->hw;
- int n, n_min, n_max;
- int m, m_min, m_max;
+ int n, n_start, n_stop, n_inc;
+ int m, m_start, m_stop, m_inc;
unsigned long fint, clkdco;
unsigned long pll_hw_max;
unsigned long fint_hw_min, fint_hw_max;
@@ -226,22 +226,33 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
fint_hw_min = hw->fint_min;
fint_hw_max = hw->fint_max;
- n_min = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul);
- n_max = min((unsigned)(clkin / fint_hw_min), hw->n_max);
+ n_start = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul);
+ n_stop = min((unsigned)(clkin / fint_hw_min), hw->n_max);
+ n_inc = 1;
+
+ if (hw->errata_i886) {
+ swap(n_start, n_stop);
+ n_inc = -1;
+ }
pll_max = pll_max ? pll_max : ULONG_MAX;
- /* Try to find high N & M to avoid jitter (DRA7 errata i886) */
- for (n = n_max; n >= n_min; --n) {
+ for (n = n_start; n != n_stop; n += n_inc) {
fint = clkin / n;
- m_min = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
+ m_start = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
1ul);
- m_max = min3((unsigned)(pll_max / fint / 2),
+ m_stop = min3((unsigned)(pll_max / fint / 2),
(unsigned)(pll_hw_max / fint / 2),
hw->m_max);
+ m_inc = 1;
+
+ if (hw->errata_i886) {
+ swap(m_start, m_stop);
+ m_inc = -1;
+ }
- for (m = m_max; m >= m_min; --m) {
+ for (m = m_start; m != m_stop; m += m_inc) {
clkdco = 2 * m * fint;
if (func(n, m, fint, clkdco, data))
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index a6bfb3918b8d..d58da6f32693 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -37,10 +37,10 @@
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/component.h>
+#include <linux/sys_soc.h>
#include "omapdss.h"
#include "dss.h"
-#include "dss_features.h"
/* Venc registers */
#define VENC_REV_ID 0x00
@@ -263,7 +263,13 @@ static const struct venc_config venc_config_pal_bdghi = {
.fid_ext_start_y__fid_ext_offset_y = 0x01380005,
};
-const struct videomode omap_dss_pal_vm = {
+enum venc_videomode {
+ VENC_MODE_UNKNOWN,
+ VENC_MODE_PAL,
+ VENC_MODE_NTSC,
+};
+
+static const struct videomode omap_dss_pal_vm = {
.hactive = 720,
.vactive = 574,
.pixelclock = 13500000,
@@ -279,9 +285,8 @@ const struct videomode omap_dss_pal_vm = {
DISPLAY_FLAGS_PIXDATA_POSEDGE |
DISPLAY_FLAGS_SYNC_NEGEDGE,
};
-EXPORT_SYMBOL(omap_dss_pal_vm);
-const struct videomode omap_dss_ntsc_vm = {
+static const struct videomode omap_dss_ntsc_vm = {
.hactive = 720,
.vactive = 482,
.pixelclock = 13500000,
@@ -297,7 +302,24 @@ const struct videomode omap_dss_ntsc_vm = {
DISPLAY_FLAGS_PIXDATA_POSEDGE |
DISPLAY_FLAGS_SYNC_NEGEDGE,
};
-EXPORT_SYMBOL(omap_dss_ntsc_vm);
+
+static enum venc_videomode venc_get_videomode(const struct videomode *vm)
+{
+ if (!(vm->flags & DISPLAY_FLAGS_INTERLACED))
+ return VENC_MODE_UNKNOWN;
+
+ if (vm->pixelclock == omap_dss_pal_vm.pixelclock &&
+ vm->hactive == omap_dss_pal_vm.hactive &&
+ vm->vactive == omap_dss_pal_vm.vactive)
+ return VENC_MODE_PAL;
+
+ if (vm->pixelclock == omap_dss_ntsc_vm.pixelclock &&
+ vm->hactive == omap_dss_ntsc_vm.hactive &&
+ vm->vactive == omap_dss_ntsc_vm.vactive)
+ return VENC_MODE_NTSC;
+
+ return VENC_MODE_UNKNOWN;
+}
static struct {
struct platform_device *pdev;
@@ -311,6 +333,7 @@ static struct {
struct videomode vm;
enum omap_dss_venc_type type;
bool invert_polarity;
+ bool requires_tv_dac_clk;
struct omap_dss_device output;
} venc;
@@ -424,14 +447,14 @@ static void venc_runtime_put(void)
static const struct venc_config *venc_timings_to_config(struct videomode *vm)
{
- if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
+ switch (venc_get_videomode(vm)) {
+ default:
+ WARN_ON_ONCE(1);
+ case VENC_MODE_PAL:
return &venc_config_pal_trm;
-
- if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
+ case VENC_MODE_NTSC:
return &venc_config_ntsc_trm;
-
- BUG();
- return NULL;
+ }
}
static int venc_power_on(struct omap_dss_device *dssdev)
@@ -542,15 +565,28 @@ static void venc_display_disable(struct omap_dss_device *dssdev)
static void venc_set_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
+ struct videomode actual_vm;
+
DSSDBG("venc_set_timings\n");
mutex_lock(&venc.venc_lock);
+ switch (venc_get_videomode(vm)) {
+ default:
+ WARN_ON_ONCE(1);
+ case VENC_MODE_PAL:
+ actual_vm = omap_dss_pal_vm;
+ break;
+ case VENC_MODE_NTSC:
+ actual_vm = omap_dss_ntsc_vm;
+ break;
+ }
+
/* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc.vm, vm, sizeof(*vm)))
+ if (memcmp(&venc.vm, &actual_vm, sizeof(actual_vm)))
venc.wss_data = 0;
- venc.vm = *vm;
+ venc.vm = actual_vm;
dispc_set_tv_pclk(13500000);
@@ -562,13 +598,13 @@ static int venc_check_timings(struct omap_dss_device *dssdev,
{
DSSDBG("venc_check_timings\n");
- if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
- return 0;
-
- if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
+ switch (venc_get_videomode(vm)) {
+ case VENC_MODE_PAL:
+ case VENC_MODE_NTSC:
return 0;
-
- return -EINVAL;
+ default:
+ return -EINVAL;
+ }
}
static void venc_get_timings(struct omap_dss_device *dssdev,
@@ -693,7 +729,7 @@ static int venc_get_clocks(struct platform_device *pdev)
{
struct clk *clk;
- if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) {
+ if (venc.requires_tv_dac_clk) {
clk = devm_clk_get(&pdev->dev, "tv_dac_clk");
if (IS_ERR(clk)) {
DSSERR("can't get tv_dac_clk\n");
@@ -828,6 +864,12 @@ err:
}
/* VENC HW IP initialisation */
+static const struct soc_device_attribute venc_soc_devices[] = {
+ { .machine = "OMAP3[45]*" },
+ { .machine = "AM35*" },
+ { /* sentinel */ }
+};
+
static int venc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -837,6 +879,10 @@ static int venc_bind(struct device *dev, struct device *master, void *data)
venc.pdev = pdev;
+ /* The OMAP34xx, OMAP35xx and AM35xx VENC require the TV DAC clock. */
+ if (soc_device_match(venc_soc_devices))
+ venc.requires_tv_dac_clk = true;
+
mutex_init(&venc.venc_lock);
venc.wss_data = 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index fbd1263a29a4..38a239cc5e04 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -19,7 +19,6 @@
#include "omapdss.h"
#include "dss.h"
-#include "dss_features.h"
struct dss_video_pll {
struct dss_pll pll;
@@ -131,6 +130,8 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
.mX_lsb[3] = 5,
.has_refsel = true,
+
+ .errata_i886 = true,
};
struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index d1ec76ef5cc6..aa5ba9ae2191 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -35,6 +35,23 @@ struct omap_connector {
bool hdmi_mode;
};
+static void omap_connector_hpd_cb(void *cb_data,
+ enum drm_connector_status status)
+{
+ struct omap_connector *omap_connector = cb_data;
+ struct drm_connector *connector = &omap_connector->base;
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status old_status;
+
+ mutex_lock(&dev->mode_config.mutex);
+ old_status = connector->status;
+ connector->status = status;
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (old_status != status)
+ drm_kms_helper_hotplug_event(dev);
+}
+
bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
@@ -75,6 +92,10 @@ static void omap_connector_destroy(struct drm_connector *connector)
struct omap_dss_device *dssdev = omap_connector->dssdev;
DBG("%s", omap_connector->dssdev->name);
+ if (connector->polled == DRM_CONNECTOR_POLL_HPD &&
+ dssdev->driver->unregister_hpd_cb) {
+ dssdev->driver->unregister_hpd_cb(dssdev);
+ }
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(omap_connector);
@@ -215,6 +236,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
{
struct drm_connector *connector = NULL;
struct omap_connector *omap_connector;
+ bool hpd_supported = false;
DBG("%s", dssdev->name);
@@ -232,7 +254,20 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
connector_type);
drm_connector_helper_add(connector, &omap_connector_helper_funcs);
- if (dssdev->driver->detect)
+ if (dssdev->driver->register_hpd_cb) {
+ int ret = dssdev->driver->register_hpd_cb(dssdev,
+ omap_connector_hpd_cb,
+ omap_connector);
+ if (!ret)
+ hpd_supported = true;
+ else if (ret != -ENOTSUPP)
+ DBG("%s: Failed to register HPD callback (%d).",
+ dssdev->name, ret);
+ }
+
+ if (hpd_supported)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else if (dssdev->driver->detect)
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
else
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 400d0d2f6790..cc85c16cbc2a 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -589,8 +589,10 @@ omap_crtc_duplicate_state(struct drm_crtc *crtc)
current_state = to_omap_crtc_state(crtc->state);
state = kmalloc(sizeof(*state), GFP_KERNEL);
- if (state)
- __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
state->zpos = current_state->zpos;
state->rotation = current_state->rotation;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 721a358531b0..cdf5b0601eba 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -84,23 +84,36 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
- /* With the current dss dispc implementation we have to enable
- * the new modeset before we can commit planes. The dispc ovl
- * configuration relies on the video mode configuration been
- * written into the HW when the ovl configuration is
- * calculated.
- *
- * This approach is not ideal because after a mode change the
- * plane update is executed only after the first vblank
- * interrupt. The dispc implementation should be fixed so that
- * it is able use uncommitted drm state information.
- */
- drm_atomic_helper_commit_modeset_enables(dev, old_state);
- omap_atomic_wait_for_completion(dev, old_state);
-
- drm_atomic_helper_commit_planes(dev, old_state, 0);
-
- drm_atomic_helper_commit_hw_done(old_state);
+ if (priv->omaprev != 0x3430) {
+ /* With the current dss dispc implementation we have to enable
+ * the new modeset before we can commit planes. The dispc ovl
+ * configuration relies on the video mode configuration been
+ * written into the HW when the ovl configuration is
+ * calculated.
+ *
+ * This approach is not ideal because after a mode change the
+ * plane update is executed only after the first vblank
+ * interrupt. The dispc implementation should be fixed so that
+ * it is able use uncommitted drm state information.
+ */
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ omap_atomic_wait_for_completion(dev, old_state);
+
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+ } else {
+ /*
+ * OMAP3 DSS seems to have issues with the work-around above,
+ * resulting in endless sync losts if a crtc is enabled without
+ * a plane. For now, skip the WA for OMAP3.
+ */
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+ }
/*
* Wait for completion of the page flips to ensure that old buffers
@@ -324,6 +337,32 @@ static int omap_modeset_init(struct drm_device *dev)
}
/*
+ * Enable the HPD in external components if supported
+ */
+static void omap_modeset_enable_external_hpd(void)
+{
+ struct omap_dss_device *dssdev = NULL;
+
+ for_each_dss_dev(dssdev) {
+ if (dssdev->driver->enable_hpd)
+ dssdev->driver->enable_hpd(dssdev);
+ }
+}
+
+/*
+ * Disable the HPD in external components if supported
+ */
+static void omap_modeset_disable_external_hpd(void)
+{
+ struct omap_dss_device *dssdev = NULL;
+
+ for_each_dss_dev(dssdev) {
+ if (dssdev->driver->disable_hpd)
+ dssdev->driver->disable_hpd(dssdev);
+ }
+}
+
+/*
* drm ioctl funcs
*/
@@ -438,44 +477,11 @@ static int dev_open(struct drm_device *dev, struct drm_file *file)
*/
static void dev_lastclose(struct drm_device *dev)
{
- int i;
-
- /* we don't support vga_switcheroo.. so just make sure the fbdev
- * mode is active
- */
struct omap_drm_private *priv = dev->dev_private;
int ret;
DBG("lastclose: dev=%p", dev);
- /* need to restore default rotation state.. not sure
- * if there is a cleaner way to restore properties to
- * default state? Maybe a flag that properties should
- * automatically be restored to default state on
- * lastclose?
- */
- for (i = 0; i < priv->num_crtcs; i++) {
- struct drm_crtc *crtc = priv->crtcs[i];
-
- if (!crtc->primary->rotation_property)
- continue;
-
- drm_object_property_set_value(&crtc->base,
- crtc->primary->rotation_property,
- DRM_MODE_ROTATE_0);
- }
-
- for (i = 0; i < priv->num_planes; i++) {
- struct drm_plane *plane = priv->planes[i];
-
- if (!plane->rotation_property)
- continue;
-
- drm_object_property_set_value(&plane->base,
- plane->rotation_property,
- DRM_MODE_ROTATE_0);
- }
-
if (priv->fbdev) {
ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
if (ret)
@@ -549,6 +555,12 @@ static int pdev_probe(struct platform_device *pdev)
if (omapdss_is_initialized() == false)
return -EPROBE_DEFER;
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set the DMA mask\n");
+ return ret;
+ }
+
omap_crtc_pre_init();
ret = omap_connect_dssdevs();
@@ -602,6 +614,7 @@ static int pdev_probe(struct platform_device *pdev)
priv->fbdev = omap_fbdev_init(ddev);
drm_kms_helper_poll_init(ddev);
+ omap_modeset_enable_external_hpd();
/*
* Register the DRM device with the core and the connectors with
@@ -614,6 +627,7 @@ static int pdev_probe(struct platform_device *pdev)
return 0;
err_cleanup_helpers:
+ omap_modeset_disable_external_hpd();
drm_kms_helper_poll_fini(ddev);
if (priv->fbdev)
omap_fbdev_free(ddev);
@@ -642,6 +656,7 @@ static int pdev_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
+ omap_modeset_disable_external_hpd();
drm_kms_helper_poll_fini(ddev);
if (priv->fbdev)
@@ -733,7 +748,7 @@ static SIMPLE_DEV_PM_OPS(omapdrm_pm_ops, omap_drm_suspend, omap_drm_resume);
static struct platform_driver pdev = {
.driver = {
- .name = DRIVER_NAME,
+ .name = "omapdrm",
.pm = &omapdrm_pm_ops,
},
.probe = pdev_probe,
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index ddf7a457951b..b1a762b70cbf 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -379,7 +379,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
return fb;
error:
- while (--i > 0)
+ while (--i >= 0)
drm_gem_object_unreference_unlocked(bos[i]);
return fb;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 863a881dd7cd..afdbad5c866a 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -144,7 +144,7 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
return omap_gem_mmap_obj(obj, vma);
}
-static struct dma_buf_ops omap_dmabuf_ops = {
+static const struct dma_buf_ops omap_dmabuf_ops = {
.map_dma_buf = omap_gem_map_dma_buf,
.unmap_dma_buf = omap_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index f0139fa58d55..b58c988d9da0 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -23,6 +23,7 @@
#include <drm/drmP.h>
#include <drm/drm_panel.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include "pl111_drm.h"
@@ -274,7 +275,7 @@ void pl111_disable_vblank(struct drm_device *drm, unsigned int crtc)
static int pl111_display_prepare_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
- return drm_fb_cma_prepare_fb(&pipe->plane, plane_state);
+ return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
}
static const struct drm_simple_display_pipe_funcs pl111_display_funcs = {
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 29653fe5285c..581c452cede1 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -66,14 +66,15 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include "pl111_drm.h"
#define DRIVER_DESC "DRM module for PL111"
-static struct drm_mode_config_funcs mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 403e135895bf..2445e75cf7ea 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -263,7 +263,6 @@ static struct drm_driver qxl_driver = {
.dumb_create = qxl_mode_dumb_create,
.dumb_map_offset = qxl_mode_dumb_mmap,
- .dumb_destroy = drm_gem_dumb_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5008f3d4cccc..ec63bc5e9de7 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -464,7 +464,7 @@ struct radeon_bo_list {
struct radeon_bo *robj;
struct ttm_validate_buffer tv;
uint64_t gpu_offset;
- unsigned prefered_domains;
+ unsigned preferred_domains;
unsigned allowed_domains;
uint32_t tiling_flags;
};
@@ -2327,7 +2327,7 @@ struct radeon_device {
uint8_t *bios;
bool is_atom_bios;
uint16_t bios_header_start;
- struct radeon_bo *stollen_vga_memory;
+ struct radeon_bo *stolen_vga_memory;
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 6efbd65c929e..8d3251a10cd4 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -351,7 +351,7 @@ out:
* handles it.
* Returns NOTIFY code
*/
-int radeon_atif_handler(struct radeon_device *rdev,
+static int radeon_atif_handler(struct radeon_device *rdev,
struct acpi_bus_event *event)
{
struct radeon_atif *atif = &rdev->atif;
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.h b/drivers/gpu/drm/radeon/radeon_acpi.h
index 7af1977c2c68..35202a453e66 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.h
+++ b/drivers/gpu/drm/radeon/radeon_acpi.h
@@ -27,9 +27,6 @@
struct radeon_device;
struct acpi_bus_event;
-int radeon_atif_handler(struct radeon_device *rdev,
- struct acpi_bus_event *event);
-
/* AMD hw uses four ACPI control methods:
* 1. ATIF
* ARG0: (ACPI_INTEGER) function code
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 00b22af70f5c..1ae31dbc61c6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -130,7 +130,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->rdev->family == CHIP_RS880)) {
/* TODO: is this still needed for NI+ ? */
- p->relocs[i].prefered_domains =
+ p->relocs[i].preferred_domains =
RADEON_GEM_DOMAIN_VRAM;
p->relocs[i].allowed_domains =
@@ -148,14 +148,14 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
return -EINVAL;
}
- p->relocs[i].prefered_domains = domain;
+ p->relocs[i].preferred_domains = domain;
if (domain == RADEON_GEM_DOMAIN_VRAM)
domain |= RADEON_GEM_DOMAIN_GTT;
p->relocs[i].allowed_domains = domain;
}
if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
- uint32_t domain = p->relocs[i].prefered_domains;
+ uint32_t domain = p->relocs[i].preferred_domains;
if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
"allowed for userptr BOs\n");
@@ -163,7 +163,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
need_mmap_lock = true;
domain = RADEON_GEM_DOMAIN_GTT;
- p->relocs[i].prefered_domains = domain;
+ p->relocs[i].preferred_domains = domain;
p->relocs[i].allowed_domains = domain;
}
@@ -437,7 +437,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
if (bo == NULL)
continue;
- drm_gem_object_unreference_unlocked(&bo->gem_base);
+ drm_gem_object_put_unlocked(&bo->gem_base);
}
}
kfree(parser->track);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 4a4f9533c53b..91952277557e 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -307,7 +307,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
robj = gem_to_radeon_bo(obj);
ret = radeon_bo_reserve(robj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
/* Only 27 bit offset for legacy cursor */
@@ -317,7 +317,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
radeon_bo_unreserve(robj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -352,7 +352,7 @@ unpin:
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
- drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+ drm_gem_object_put_unlocked(radeon_crtc->cursor_bo);
}
radeon_crtc->cursor_bo = obj;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index ee274c6e374d..ddfe91efa61e 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -267,7 +267,7 @@ static void radeon_unpin_work_func(struct work_struct *__work)
} else
DRM_ERROR("failed to reserve buffer after flip\n");
- drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+ drm_gem_object_put_unlocked(&work->old_rbo->gem_base);
kfree(work);
}
@@ -504,7 +504,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
obj = old_radeon_fb->obj;
/* take a reference to the old object */
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
work->old_rbo = gem_to_radeon_bo(obj);
new_radeon_fb = to_radeon_framebuffer(fb);
@@ -603,7 +603,7 @@ pflip_cleanup:
radeon_bo_unreserve(new_rbo);
cleanup:
- drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+ drm_gem_object_put_unlocked(&work->old_rbo->gem_base);
dma_fence_put(work->fence);
kfree(work);
return r;
@@ -1288,7 +1288,7 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
- drm_gem_object_unreference_unlocked(radeon_fb->obj);
+ drm_gem_object_put_unlocked(radeon_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(radeon_fb);
}
@@ -1348,14 +1348,14 @@ radeon_user_framebuffer_create(struct drm_device *dev,
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
if (radeon_fb == NULL) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
if (ret) {
kfree(radeon_fb);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index af6ee7d9b465..fd25361ac681 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -118,7 +118,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
}
static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
@@ -299,7 +299,7 @@ out:
}
if (fb && ret) {
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 574bf7e6b118..3386452bd2f0 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -271,7 +271,7 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
}
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r) {
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
@@ -352,7 +352,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r)
goto handle_lockup;
@@ -361,7 +361,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
return 0;
release_object:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
handle_lockup:
up_read(&rdev->exclusive_lock);
@@ -395,7 +395,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
@@ -414,11 +414,11 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
}
robj = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return -EPERM;
}
*offset_p = radeon_bo_mmap_offset(robj);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return 0;
}
@@ -453,7 +453,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -485,7 +485,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
@@ -504,7 +504,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
robj = gem_to_radeon_bo(gobj);
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -527,7 +527,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
radeon_bo_unreserve(rbo);
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -661,14 +661,14 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
r = radeon_bo_reserve(rbo, false);
if (r) {
args->operation = RADEON_VA_RESULT_ERROR;
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
if (!bo_va) {
args->operation = RADEON_VA_RESULT_ERROR;
radeon_bo_unreserve(rbo);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return -ENOENT;
}
@@ -695,7 +695,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
args->operation = RADEON_VA_RESULT_ERROR;
}
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -736,7 +736,7 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
radeon_bo_unreserve(robj);
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -762,7 +762,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
r = drm_gem_handle_create(file_priv, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index a2ab6dcdf4a2..f6578c96925c 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -75,12 +75,14 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr);
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr);
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm);
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
@@ -482,7 +484,9 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
}
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr)
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm)
{
uint32_t wptr_shadow, is_wptr_shadow_valid;
struct cik_mqd *m;
@@ -636,7 +640,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
@@ -785,7 +789,8 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
unsigned int watch_point_id,
unsigned int reg_offset)
{
- return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
+ return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]
+ / 4;
}
static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 8b722297a05c..093594976126 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -445,7 +445,7 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_unreference_unlocked(&bo->gem_base);
+ drm_gem_object_put_unlocked(&bo->gem_base);
}
}
@@ -546,7 +546,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
list_for_each_entry(lobj, head, tv.head) {
struct radeon_bo *bo = lobj->robj;
if (!bo->pin_count) {
- u32 domain = lobj->prefered_domains;
+ u32 domain = lobj->preferred_domains;
u32 allowed = lobj->allowed_domains;
u32 current_domain =
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 2804b4a15896..bf69bf9086bf 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -907,17 +907,17 @@ int radeon_ttm_init(struct radeon_device *rdev)
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
- NULL, &rdev->stollen_vga_memory);
+ NULL, &rdev->stolen_vga_memory);
if (r) {
return r;
}
- r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+ r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
if (r)
return r;
- r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
- radeon_bo_unreserve(rdev->stollen_vga_memory);
+ r = radeon_bo_pin(rdev->stolen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+ radeon_bo_unreserve(rdev->stolen_vga_memory);
if (r) {
- radeon_bo_unref(&rdev->stollen_vga_memory);
+ radeon_bo_unref(&rdev->stolen_vga_memory);
return r;
}
DRM_INFO("radeon: %uM of VRAM memory ready\n",
@@ -946,13 +946,13 @@ void radeon_ttm_fini(struct radeon_device *rdev)
if (!rdev->mman.initialized)
return;
radeon_ttm_debugfs_fini(rdev);
- if (rdev->stollen_vga_memory) {
- r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+ if (rdev->stolen_vga_memory) {
+ r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
if (r == 0) {
- radeon_bo_unpin(rdev->stollen_vga_memory);
- radeon_bo_unreserve(rdev->stollen_vga_memory);
+ radeon_bo_unpin(rdev->stolen_vga_memory);
+ radeon_bo_unreserve(rdev->stolen_vga_memory);
}
- radeon_bo_unref(&rdev->stollen_vga_memory);
+ radeon_bo_unref(&rdev->stolen_vga_memory);
}
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
@@ -1030,19 +1030,17 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
static int radeon_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
- unsigned ttm_pl = *(int *)node->info_ent->data;
+ unsigned ttm_pl = *(int*)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv;
- struct ttm_bo_global *glob = rdev->mman.bdev.glob;
+ struct ttm_mem_type_manager *man = &rdev->mman.bdev.man[ttm_pl];
struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&glob->lru_lock);
- drm_mm_print(mm, &p);
- spin_unlock(&glob->lru_lock);
+ man->func->debug(man, &p);
return 0;
}
+
static int ttm_pl_vram = TTM_PL_VRAM;
static int ttm_pl_tt = TTM_PL_TT;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 5f68245579a3..5e82b408d522 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -139,7 +139,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
/* add the vm page table to the list */
list[0].robj = vm->page_directory;
- list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
+ list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo;
list[0].tv.shared = true;
@@ -151,7 +151,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
continue;
list[idx].robj = vm->page_tables[i].bo;
- list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
+ list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo;
list[idx].tv.shared = true;
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 50c41c0a50ef..dcc539ba85d6 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -5,6 +5,10 @@ config DRM_ROCKCHIP
select DRM_KMS_HELPER
select DRM_PANEL
select VIDEOMODE_HELPERS
+ select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
+ select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
+ select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
+ select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
help
Choose this option if you have a Rockchip soc chipset.
This driver provides kernel mode setting and buffer
@@ -12,10 +16,10 @@ config DRM_ROCKCHIP
2D or 3D acceleration; acceleration is performed by other
IP found on the SoC.
+if DRM_ROCKCHIP
+
config ROCKCHIP_ANALOGIX_DP
bool "Rockchip specific extensions for Analogix DP driver"
- depends on DRM_ROCKCHIP
- select DRM_ANALOGIX_DP
help
This selects support for Rockchip SoC specific extensions
for the Analogix Core DP driver. If you want to enable DP
@@ -23,9 +27,7 @@ config ROCKCHIP_ANALOGIX_DP
config ROCKCHIP_CDN_DP
bool "Rockchip cdn DP"
- depends on DRM_ROCKCHIP
- depends on EXTCON
- select SND_SOC_HDMI_CODEC if SND_SOC
+ depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
help
This selects support for Rockchip SoC specific extensions
for the cdn DP driver. If you want to enable Dp on
@@ -34,8 +36,6 @@ config ROCKCHIP_CDN_DP
config ROCKCHIP_DW_HDMI
bool "Rockchip specific extensions for Synopsys DW HDMI"
- depends on DRM_ROCKCHIP
- select DRM_DW_HDMI
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
@@ -44,8 +44,6 @@ config ROCKCHIP_DW_HDMI
config ROCKCHIP_DW_MIPI_DSI
bool "Rockchip specific extensions for Synopsys DW MIPI DSI"
- depends on DRM_ROCKCHIP
- select DRM_MIPI_DSI
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
@@ -54,8 +52,9 @@ config ROCKCHIP_DW_MIPI_DSI
config ROCKCHIP_INNO_HDMI
bool "Rockchip specific extensions for Innosilicon HDMI"
- depends on DRM_ROCKCHIP
help
This selects support for Rockchip SoC specific extensions
for the Innosilicon HDMI driver. If you want to enable
HDMI on RK3036 based SoC, you should select this option.
+
+endif
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index bd87768dd549..7a251a54e792 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -592,7 +592,7 @@ static void inno_hdmi_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static struct drm_connector_funcs inno_hdmi_connector_funcs = {
+static const struct drm_connector_funcs inno_hdmi_connector_funcs = {
.fill_modes = inno_hdmi_probe_single_connector_modes,
.detect = inno_hdmi_connector_detect,
.destroy = inno_hdmi_connector_destroy,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 8a0f75612d4b..70773041785b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -48,7 +48,7 @@ static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
int i;
for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
- drm_gem_object_unreference_unlocked(rockchip_fb->obj[i]);
+ drm_gem_object_put_unlocked(rockchip_fb->obj[i]);
drm_framebuffer_cleanup(fb);
kfree(rockchip_fb);
@@ -144,7 +144,7 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
width * drm_format_plane_cpp(mode_cmd->pixel_format, i);
if (obj->size < min_size) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
ret = -EINVAL;
goto err_gem_object_unreference;
}
@@ -161,7 +161,7 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
err_gem_object_unreference:
for (i--; i >= 0; i--)
- drm_gem_object_unreference_unlocked(objs[i]);
+ drm_gem_object_put_unlocked(objs[i]);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index ce946b9c57a9..724579ebf947 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -173,7 +173,7 @@ void rockchip_drm_fbdev_fini(struct drm_device *dev)
drm_fb_helper_unregister_fbi(helper);
if (helper->fb)
- drm_framebuffer_unreference(helper->fb);
+ drm_framebuffer_put(helper->fb);
drm_fb_helper_fini(helper);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index f74333efe4bb..1869c8bb76c8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -383,7 +383,7 @@ rockchip_gem_create_with_handle(struct drm_file *file_priv,
goto err_handle_create;
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return rk_obj;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 948719dddc36..bf9ed0e63973 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1026,7 +1026,7 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
if (old_plane_state->fb == new_plane_state->fb)
continue;
- drm_framebuffer_reference(old_plane_state->fb);
+ drm_framebuffer_get(old_plane_state->fb);
drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
@@ -1150,7 +1150,7 @@ static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
struct drm_framebuffer *fb = val;
drm_crtc_vblank_put(&vop->crtc);
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
}
static void vop_handle_vblank(struct vop *vop)
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 5bcad8f5fb4f..06f05302ee75 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -13,17 +13,26 @@ config DRM_SUN4I
Display Engine. If M is selected the module will be called
sun4i-drm.
+if DRM_SUN4I
+
config DRM_SUN4I_HDMI
tristate "Allwinner A10 HDMI Controller Support"
- depends on DRM_SUN4I
default DRM_SUN4I
help
Choose this option if you have an Allwinner SoC with an HDMI
controller.
+config DRM_SUN4I_HDMI_CEC
+ bool "Allwinner A10 HDMI CEC Support"
+ depends on DRM_SUN4I_HDMI
+ select CEC_CORE
+ depends on CEC_PIN
+ help
+ Choose this option if you have an Allwinner SoC with an HDMI
+ controller and want to use CEC.
+
config DRM_SUN4I_BACKEND
tristate "Support for Allwinner A10 Display Engine Backend"
- depends on DRM_SUN4I
default DRM_SUN4I
help
Choose this option if you have an Allwinner SoC with the
@@ -33,10 +42,11 @@ config DRM_SUN4I_BACKEND
config DRM_SUN8I_MIXER
tristate "Support for Allwinner Display Engine 2.0 Mixer"
- depends on DRM_SUN4I
default MACH_SUN8I
help
Choose this option if you have an Allwinner SoC with the
Allwinner Display Engine 2.0, which has a mixer to do some
graphics mixture and feed graphics to TCON, If M is
selected the module will be called sun8i-mixer.
+
+endif
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index e29fd3a2ba9c..43c753cafc88 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -2,6 +2,7 @@ sun4i-drm-y += sun4i_drv.o
sun4i-drm-y += sun4i_framebuffer.o
sun4i-drm-hdmi-y += sun4i_hdmi_enc.o
+sun4i-drm-hdmi-y += sun4i_hdmi_i2c.o
sun4i-drm-hdmi-y += sun4i_hdmi_ddc_clk.o
sun4i-drm-hdmi-y += sun4i_hdmi_tmds_clk.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index cf480218daa5..ec5943627aa5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -312,7 +312,7 @@ static int sun4i_backend_of_get_id(struct device_node *node)
struct device_node *remote;
u32 reg;
- remote = of_parse_phandle(ep, "remote-endpoint", 0);
+ remote = of_graph_get_remote_endpoint(ep);
if (!remote)
continue;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index 2f2f2ff1ea63..1457750988da 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -15,6 +15,8 @@
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
+#include <media/cec.h>
+
#define SUN4I_HDMI_CTRL_REG 0x004
#define SUN4I_HDMI_CTRL_ENABLE BIT(31)
@@ -86,6 +88,11 @@
#define SUN4I_HDMI_PLL_DBG0_TMDS_PARENT_MASK BIT(21)
#define SUN4I_HDMI_PLL_DBG0_TMDS_PARENT_SHIFT 21
+#define SUN4I_HDMI_CEC 0x214
+#define SUN4I_HDMI_CEC_ENABLE BIT(11)
+#define SUN4I_HDMI_CEC_TX BIT(9)
+#define SUN4I_HDMI_CEC_RX BIT(8)
+
#define SUN4I_HDMI_PKT_CTRL_REG(n) (0x2f0 + (4 * (n)))
#define SUN4I_HDMI_PKT_CTRL_TYPE(n, t) ((t) << (((n) % 4) * 4))
@@ -96,6 +103,7 @@
#define SUN4I_HDMI_DDC_CTRL_ENABLE BIT(31)
#define SUN4I_HDMI_DDC_CTRL_START_CMD BIT(30)
#define SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK BIT(8)
+#define SUN4I_HDMI_DDC_CTRL_FIFO_DIR_WRITE (1 << 8)
#define SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ (0 << 8)
#define SUN4I_HDMI_DDC_CTRL_RESET BIT(0)
@@ -105,14 +113,34 @@
#define SUN4I_HDMI_DDC_ADDR_OFFSET(off) (((off) & 0xff) << 8)
#define SUN4I_HDMI_DDC_ADDR_SLAVE(addr) ((addr) & 0xff)
+#define SUN4I_HDMI_DDC_INT_STATUS_REG 0x50c
+#define SUN4I_HDMI_DDC_INT_STATUS_ILLEGAL_FIFO_OPERATION BIT(7)
+#define SUN4I_HDMI_DDC_INT_STATUS_DDC_RX_FIFO_UNDERFLOW BIT(6)
+#define SUN4I_HDMI_DDC_INT_STATUS_DDC_TX_FIFO_OVERFLOW BIT(5)
+#define SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST BIT(4)
+#define SUN4I_HDMI_DDC_INT_STATUS_ARBITRATION_ERROR BIT(3)
+#define SUN4I_HDMI_DDC_INT_STATUS_ACK_ERROR BIT(2)
+#define SUN4I_HDMI_DDC_INT_STATUS_BUS_ERROR BIT(1)
+#define SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE BIT(0)
+
#define SUN4I_HDMI_DDC_FIFO_CTRL_REG 0x510
#define SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR BIT(31)
+#define SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES(n) (((n) & 0xf) << 4)
+#define SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MASK GENMASK(7, 4)
+#define SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MAX (BIT(4) - 1)
+#define SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES(n) ((n) & 0xf)
+#define SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES_MASK GENMASK(3, 0)
+#define SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES_MAX (BIT(4) - 1)
#define SUN4I_HDMI_DDC_FIFO_DATA_REG 0x518
+
#define SUN4I_HDMI_DDC_BYTE_COUNT_REG 0x51c
+#define SUN4I_HDMI_DDC_BYTE_COUNT_MAX (BIT(10) - 1)
#define SUN4I_HDMI_DDC_CMD_REG 0x520
#define SUN4I_HDMI_DDC_CMD_EXPLICIT_EDDC_READ 6
+#define SUN4I_HDMI_DDC_CMD_IMPLICIT_READ 5
+#define SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE 3
#define SUN4I_HDMI_DDC_CLK_REG 0x528
#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0x7) << 3)
@@ -146,12 +174,16 @@ struct sun4i_hdmi {
struct clk *ddc_clk;
struct clk *tmds_clk;
+ struct i2c_adapter *i2c;
+
struct sun4i_drv *drv;
bool hdmi_monitor;
+ struct cec_adapter *cec_adap;
};
int sun4i_ddc_create(struct sun4i_hdmi *hdmi, struct clk *clk);
int sun4i_tmds_create(struct sun4i_hdmi *hdmi);
+int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi);
#endif /* _SUN4I_HDMI_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index f5d0d6bd1084..9ea6cd5a1370 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -29,8 +29,6 @@
#include "sun4i_hdmi.h"
#include "sun4i_tcon.h"
-#define DDC_SEGMENT_ADDR 0x30
-
static inline struct sun4i_hdmi *
drm_encoder_to_sun4i_hdmi(struct drm_encoder *encoder)
{
@@ -184,93 +182,13 @@ static const struct drm_encoder_funcs sun4i_hdmi_funcs = {
.destroy = drm_encoder_cleanup,
};
-static int sun4i_hdmi_read_sub_block(struct sun4i_hdmi *hdmi,
- unsigned int blk, unsigned int offset,
- u8 *buf, unsigned int count)
-{
- unsigned long reg;
- int i;
-
- reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
- reg &= ~SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK;
- writel(reg | SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ,
- hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
-
- writel(SUN4I_HDMI_DDC_ADDR_SEGMENT(offset >> 8) |
- SUN4I_HDMI_DDC_ADDR_EDDC(DDC_SEGMENT_ADDR << 1) |
- SUN4I_HDMI_DDC_ADDR_OFFSET(offset) |
- SUN4I_HDMI_DDC_ADDR_SLAVE(DDC_ADDR),
- hdmi->base + SUN4I_HDMI_DDC_ADDR_REG);
-
- reg = readl(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG);
- writel(reg | SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR,
- hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG);
-
- writel(count, hdmi->base + SUN4I_HDMI_DDC_BYTE_COUNT_REG);
- writel(SUN4I_HDMI_DDC_CMD_EXPLICIT_EDDC_READ,
- hdmi->base + SUN4I_HDMI_DDC_CMD_REG);
-
- reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
- writel(reg | SUN4I_HDMI_DDC_CTRL_START_CMD,
- hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
-
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG, reg,
- !(reg & SUN4I_HDMI_DDC_CTRL_START_CMD),
- 100, 100000))
- return -EIO;
-
- for (i = 0; i < count; i++)
- buf[i] = readb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG);
-
- return 0;
-}
-
-static int sun4i_hdmi_read_edid_block(void *data, u8 *buf, unsigned int blk,
- size_t length)
-{
- struct sun4i_hdmi *hdmi = data;
- int retry = 2, i;
-
- do {
- for (i = 0; i < length; i += SUN4I_HDMI_DDC_FIFO_SIZE) {
- unsigned char offset = blk * EDID_LENGTH + i;
- unsigned int count = min((unsigned int)SUN4I_HDMI_DDC_FIFO_SIZE,
- length - i);
- int ret;
-
- ret = sun4i_hdmi_read_sub_block(hdmi, blk, offset,
- buf + i, count);
- if (ret)
- return ret;
- }
- } while (!drm_edid_block_valid(buf, blk, true, NULL) && (retry--));
-
- return 0;
-}
-
static int sun4i_hdmi_get_modes(struct drm_connector *connector)
{
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
- unsigned long reg;
struct edid *edid;
int ret;
- /* Reset i2c controller */
- writel(SUN4I_HDMI_DDC_CTRL_ENABLE | SUN4I_HDMI_DDC_CTRL_RESET,
- hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG, reg,
- !(reg & SUN4I_HDMI_DDC_CTRL_RESET),
- 100, 2000))
- return -EIO;
-
- writel(SUN4I_HDMI_DDC_LINE_CTRL_SDA_ENABLE |
- SUN4I_HDMI_DDC_LINE_CTRL_SCL_ENABLE,
- hdmi->base + SUN4I_HDMI_DDC_LINE_CTRL_REG);
-
- clk_prepare_enable(hdmi->ddc_clk);
- clk_set_rate(hdmi->ddc_clk, 100000);
-
- edid = drm_do_get_edid(connector, sun4i_hdmi_read_edid_block, hdmi);
+ edid = drm_get_edid(connector, hdmi->i2c);
if (!edid)
return 0;
@@ -279,11 +197,10 @@ static int sun4i_hdmi_get_modes(struct drm_connector *connector)
hdmi->hdmi_monitor ? "an HDMI" : "a DVI");
drm_mode_connector_update_edid_property(connector, edid);
+ cec_s_phys_addr_from_edid(hdmi->cec_adap, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
- clk_disable_unprepare(hdmi->ddc_clk);
-
return ret;
}
@@ -299,8 +216,10 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg,
reg & SUN4I_HDMI_HPD_HIGH,
- 0, 500000))
+ 0, 500000)) {
+ cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
+ }
return connector_status_connected;
}
@@ -314,6 +233,40 @@ static const struct drm_connector_funcs sun4i_hdmi_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
+#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
+static bool sun4i_hdmi_cec_pin_read(struct cec_adapter *adap)
+{
+ struct sun4i_hdmi *hdmi = cec_get_drvdata(adap);
+
+ return readl(hdmi->base + SUN4I_HDMI_CEC) & SUN4I_HDMI_CEC_RX;
+}
+
+static void sun4i_hdmi_cec_pin_low(struct cec_adapter *adap)
+{
+ struct sun4i_hdmi *hdmi = cec_get_drvdata(adap);
+
+ /* Start driving the CEC pin low */
+ writel(SUN4I_HDMI_CEC_ENABLE, hdmi->base + SUN4I_HDMI_CEC);
+}
+
+static void sun4i_hdmi_cec_pin_high(struct cec_adapter *adap)
+{
+ struct sun4i_hdmi *hdmi = cec_get_drvdata(adap);
+
+ /*
+ * Stop driving the CEC pin, the pull up will take over
+ * unless another CEC device is driving the pin low.
+ */
+ writel(0, hdmi->base + SUN4I_HDMI_CEC);
+}
+
+static const struct cec_pin_ops sun4i_hdmi_cec_pin_ops = {
+ .read = sun4i_hdmi_cec_pin_read,
+ .low = sun4i_hdmi_cec_pin_low,
+ .high = sun4i_hdmi_cec_pin_high,
+};
+#endif
+
static int sun4i_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -406,9 +359,9 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
SUN4I_HDMI_PLL_CTRL_PLL_EN;
writel(reg, hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
- ret = sun4i_ddc_create(hdmi, hdmi->tmds_clk);
+ ret = sun4i_hdmi_i2c_create(dev, hdmi);
if (ret) {
- dev_err(dev, "Couldn't create the DDC clock\n");
+ dev_err(dev, "Couldn't create the HDMI I2C adapter\n");
return ret;
}
@@ -421,13 +374,26 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
NULL);
if (ret) {
dev_err(dev, "Couldn't initialise the HDMI encoder\n");
- return ret;
+ goto err_del_i2c_adapter;
}
hdmi->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm,
dev->of_node);
- if (!hdmi->encoder.possible_crtcs)
- return -EPROBE_DEFER;
+ if (!hdmi->encoder.possible_crtcs) {
+ ret = -EPROBE_DEFER;
+ goto err_del_i2c_adapter;
+ }
+
+#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
+ hdmi->cec_adap = cec_pin_allocate_adapter(&sun4i_hdmi_cec_pin_ops,
+ hdmi, "sun4i", CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC);
+ ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
+ if (ret < 0)
+ goto err_cleanup_connector;
+ writel(readl(hdmi->base + SUN4I_HDMI_CEC) & ~SUN4I_HDMI_CEC_TX,
+ hdmi->base + SUN4I_HDMI_CEC);
+#endif
drm_connector_helper_add(&hdmi->connector,
&sun4i_hdmi_connector_helper_funcs);
@@ -444,12 +410,18 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
hdmi->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
+ ret = cec_register_adapter(hdmi->cec_adap, dev);
+ if (ret < 0)
+ goto err_cleanup_connector;
drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
return 0;
err_cleanup_connector:
+ cec_delete_adapter(hdmi->cec_adap);
drm_encoder_cleanup(&hdmi->encoder);
+err_del_i2c_adapter:
+ i2c_del_adapter(hdmi->i2c);
return ret;
}
@@ -458,8 +430,10 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
{
struct sun4i_hdmi *hdmi = dev_get_drvdata(dev);
+ cec_unregister_adapter(hdmi->cec_adap);
drm_connector_cleanup(&hdmi->connector);
drm_encoder_cleanup(&hdmi->encoder);
+ i2c_del_adapter(hdmi->i2c);
}
static const struct component_ops sun4i_hdmi_ops = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
new file mode 100644
index 000000000000..2e42d09ab42e
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ * Copyright (C) 2017 Jonathan Liu <net147@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+
+#include "sun4i_hdmi.h"
+
+#define SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK ( \
+ SUN4I_HDMI_DDC_INT_STATUS_ILLEGAL_FIFO_OPERATION | \
+ SUN4I_HDMI_DDC_INT_STATUS_DDC_RX_FIFO_UNDERFLOW | \
+ SUN4I_HDMI_DDC_INT_STATUS_DDC_TX_FIFO_OVERFLOW | \
+ SUN4I_HDMI_DDC_INT_STATUS_ARBITRATION_ERROR | \
+ SUN4I_HDMI_DDC_INT_STATUS_ACK_ERROR | \
+ SUN4I_HDMI_DDC_INT_STATUS_BUS_ERROR \
+)
+
+/* FIFO request bit is set when FIFO level is above RX_THRESHOLD during read */
+#define RX_THRESHOLD SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MAX
+/* FIFO request bit is set when FIFO level is below TX_THRESHOLD during write */
+#define TX_THRESHOLD 1
+
+static int fifo_transfer(struct sun4i_hdmi *hdmi, u8 *buf, int len, bool read)
+{
+ /*
+ * 1 byte takes 9 clock cycles (8 bits + 1 ACK) = 90 us for 100 kHz
+ * clock. As clock rate is fixed, just round it up to 100 us.
+ */
+ const unsigned long byte_time_ns = 100;
+ const u32 mask = SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK |
+ SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST |
+ SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE;
+ u32 reg;
+
+ /* Limit transfer length by FIFO threshold */
+ len = min_t(int, len, read ? (RX_THRESHOLD + 1) :
+ (SUN4I_HDMI_DDC_FIFO_SIZE - TX_THRESHOLD + 1));
+
+ /* Wait until error, FIFO request bit set or transfer complete */
+ if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG, reg,
+ reg & mask, len * byte_time_ns, 100000))
+ return -ETIMEDOUT;
+
+ if (reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK)
+ return -EIO;
+
+ if (read)
+ readsb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG, buf, len);
+ else
+ writesb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG, buf, len);
+
+ /* Clear FIFO request bit */
+ writel(SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST,
+ hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG);
+
+ return len;
+}
+
+static int xfer_msg(struct sun4i_hdmi *hdmi, struct i2c_msg *msg)
+{
+ int i, len;
+ u32 reg;
+
+ /* Set FIFO direction */
+ reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+ reg &= ~SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK;
+ reg |= (msg->flags & I2C_M_RD) ?
+ SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ :
+ SUN4I_HDMI_DDC_CTRL_FIFO_DIR_WRITE;
+ writel(reg, hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+
+ /* Set I2C address */
+ writel(SUN4I_HDMI_DDC_ADDR_SLAVE(msg->addr),
+ hdmi->base + SUN4I_HDMI_DDC_ADDR_REG);
+
+ /* Set FIFO RX/TX thresholds and clear FIFO */
+ reg = readl(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG);
+ reg |= SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR;
+ reg &= ~SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MASK;
+ reg |= SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES(RX_THRESHOLD);
+ reg &= ~SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES_MASK;
+ reg |= SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES(TX_THRESHOLD);
+ writel(reg, hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG);
+ if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG,
+ reg,
+ !(reg & SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR),
+ 100, 2000))
+ return -EIO;
+
+ /* Set transfer length */
+ writel(msg->len, hdmi->base + SUN4I_HDMI_DDC_BYTE_COUNT_REG);
+
+ /* Set command */
+ writel(msg->flags & I2C_M_RD ?
+ SUN4I_HDMI_DDC_CMD_IMPLICIT_READ :
+ SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE,
+ hdmi->base + SUN4I_HDMI_DDC_CMD_REG);
+
+ /* Clear interrupt status bits */
+ writel(SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK |
+ SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST |
+ SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE,
+ hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG);
+
+ /* Start command */
+ reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+ writel(reg | SUN4I_HDMI_DDC_CTRL_START_CMD,
+ hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+
+ /* Transfer bytes */
+ for (i = 0; i < msg->len; i += len) {
+ len = fifo_transfer(hdmi, msg->buf + i, msg->len - i,
+ msg->flags & I2C_M_RD);
+ if (len <= 0)
+ return len;
+ }
+
+ /* Wait for command to finish */
+ if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG,
+ reg,
+ !(reg & SUN4I_HDMI_DDC_CTRL_START_CMD),
+ 100, 100000))
+ return -EIO;
+
+ /* Check for errors */
+ reg = readl(hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG);
+ if ((reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK) ||
+ !(reg & SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE)) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int sun4i_hdmi_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct sun4i_hdmi *hdmi = i2c_get_adapdata(adap);
+ u32 reg;
+ int err, i, ret = num;
+
+ for (i = 0; i < num; i++) {
+ if (!msgs[i].len)
+ return -EINVAL;
+ if (msgs[i].len > SUN4I_HDMI_DDC_BYTE_COUNT_MAX)
+ return -EINVAL;
+ }
+
+ /* Reset I2C controller */
+ writel(SUN4I_HDMI_DDC_CTRL_ENABLE | SUN4I_HDMI_DDC_CTRL_RESET,
+ hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+ if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG, reg,
+ !(reg & SUN4I_HDMI_DDC_CTRL_RESET),
+ 100, 2000))
+ return -EIO;
+
+ writel(SUN4I_HDMI_DDC_LINE_CTRL_SDA_ENABLE |
+ SUN4I_HDMI_DDC_LINE_CTRL_SCL_ENABLE,
+ hdmi->base + SUN4I_HDMI_DDC_LINE_CTRL_REG);
+
+ clk_prepare_enable(hdmi->ddc_clk);
+ clk_set_rate(hdmi->ddc_clk, 100000);
+
+ for (i = 0; i < num; i++) {
+ err = xfer_msg(hdmi, &msgs[i]);
+ if (err) {
+ ret = err;
+ break;
+ }
+ }
+
+ clk_disable_unprepare(hdmi->ddc_clk);
+ return ret;
+}
+
+static u32 sun4i_hdmi_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm sun4i_hdmi_i2c_algorithm = {
+ .master_xfer = sun4i_hdmi_i2c_xfer,
+ .functionality = sun4i_hdmi_i2c_func,
+};
+
+int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi)
+{
+ struct i2c_adapter *adap;
+ int ret = 0;
+
+ ret = sun4i_ddc_create(hdmi, hdmi->tmds_clk);
+ if (ret)
+ return ret;
+
+ adap = devm_kzalloc(dev, sizeof(*adap), GFP_KERNEL);
+ if (!adap)
+ return -ENOMEM;
+
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_DDC;
+ adap->algo = &sun4i_hdmi_i2c_algorithm;
+ strlcpy(adap->name, "sun4i_hdmi_i2c adapter", sizeof(adap->name));
+ i2c_set_adapdata(adap, hdmi);
+
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ return ret;
+
+ hdmi->i2c = adap;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index d45f3a1a0a29..7bddf12548d3 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -25,12 +25,6 @@ struct sun4i_plane_desc {
uint32_t nformats;
};
-static int sun4i_backend_layer_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- return 0;
-}
-
static void sun4i_backend_layer_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -52,8 +46,7 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
sun4i_backend_layer_enable(backend, layer->id, true);
}
-static struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = {
- .atomic_check = sun4i_backend_layer_atomic_check,
+static const struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = {
.atomic_disable = sun4i_backend_layer_atomic_disable,
.atomic_update = sun4i_backend_layer_atomic_update,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 550bb262943f..7cd7090ad63a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -119,7 +119,7 @@ sun4i_rgb_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static struct drm_connector_funcs sun4i_rgb_con_funcs = {
+static const struct drm_connector_funcs sun4i_rgb_con_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = sun4i_rgb_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
@@ -127,13 +127,6 @@ static struct drm_connector_funcs sun4i_rgb_con_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int sun4i_rgb_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- return 0;
-}
-
static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
@@ -181,7 +174,6 @@ static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
}
static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
- .atomic_check = sun4i_rgb_atomic_check,
.mode_set = sun4i_rgb_encoder_mode_set,
.disable = sun4i_rgb_encoder_disable,
.enable = sun4i_rgb_encoder_enable,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index e3c50ecdcd04..552c88ec16be 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -194,8 +194,6 @@ void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel);
void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable);
/* Mode Related Controls */
-void sun4i_tcon_switch_interlace(struct sun4i_tcon *tcon,
- bool enable);
void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
struct drm_encoder *encoder);
void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 7b45ac9383ea..050cfd43c7a0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -341,13 +341,6 @@ static void sun4i_tv_mode_to_drm_mode(const struct tv_mode *tv_mode,
mode->vtotal = mode->vsync_end + tv_mode->vback_porch;
}
-static int sun4i_tv_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- return 0;
-}
-
static void sun4i_tv_disable(struct drm_encoder *encoder)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
@@ -489,7 +482,6 @@ static void sun4i_tv_mode_set(struct drm_encoder *encoder,
}
static struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
- .atomic_check = sun4i_tv_atomic_check,
.disable = sun4i_tv_disable,
.enable = sun4i_tv_enable,
.mode_set = sun4i_tv_mode_set,
@@ -545,7 +537,7 @@ sun4i_tv_comp_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
+static const struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = sun4i_tv_comp_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 2db29d67193d..dc58ab140151 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -3,6 +3,7 @@ config DRM_TEGRA
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
depends on COMMON_CLK
depends on DRM
+ depends on OF
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 6af3a9ad6565..8927784396e8 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -17,4 +17,6 @@ tegra-drm-y := \
falcon.o \
vic.o
+tegra-drm-y += trace.o
+
obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 2fde44c3a1b3..e4da041ba89b 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -25,6 +25,7 @@
#include "dpaux.h"
#include "drm.h"
+#include "trace.h"
static DEFINE_MUTEX(dpaux_lock);
static LIST_HEAD(dpaux_list);
@@ -65,14 +66,19 @@ static inline struct tegra_dpaux *work_to_dpaux(struct work_struct *work)
}
static inline u32 tegra_dpaux_readl(struct tegra_dpaux *dpaux,
- unsigned long offset)
+ unsigned int offset)
{
- return readl(dpaux->regs + (offset << 2));
+ u32 value = readl(dpaux->regs + (offset << 2));
+
+ trace_dpaux_readl(dpaux->dev, offset, value);
+
+ return value;
}
static inline void tegra_dpaux_writel(struct tegra_dpaux *dpaux,
- u32 value, unsigned long offset)
+ u32 value, unsigned int offset)
{
+ trace_dpaux_writel(dpaux->dev, offset, value);
writel(value, dpaux->regs + (offset << 2));
}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 3ba659a5940d..597d563d636a 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -306,8 +306,6 @@ host1x_bo_lookup(struct drm_file *file, u32 handle)
if (!gem)
return NULL;
- drm_gem_object_unreference_unlocked(gem);
-
bo = to_tegra_bo(gem);
return &bo->base;
}
@@ -396,8 +394,10 @@ int tegra_drm_submit(struct tegra_drm_context *context,
(void __user *)(uintptr_t)args->waitchks;
struct drm_tegra_syncpt syncpt;
struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
+ struct drm_gem_object **refs;
struct host1x_syncpt *sp;
struct host1x_job *job;
+ unsigned int num_refs;
int err;
/* We don't yet support other than one syncpt_incr struct per submit */
@@ -419,6 +419,21 @@ int tegra_drm_submit(struct tegra_drm_context *context,
job->class = context->client->base.class;
job->serialize = true;
+ /*
+ * Track referenced BOs so that they can be unreferenced after the
+ * submission is complete.
+ */
+ num_refs = num_cmdbufs + num_relocs * 2 + num_waitchks;
+
+ refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
+ if (!refs) {
+ err = -ENOMEM;
+ goto put;
+ }
+
+ /* reuse as an iterator later */
+ num_refs = 0;
+
while (num_cmdbufs) {
struct drm_tegra_cmdbuf cmdbuf;
struct host1x_bo *bo;
@@ -447,6 +462,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
obj = host1x_to_tegra_bo(bo);
+ refs[num_refs++] = &obj->gem;
/*
* Gather buffer base address must be 4-bytes aligned,
@@ -476,6 +492,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
reloc = &job->relocarray[num_relocs];
obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
+ refs[num_refs++] = &obj->gem;
/*
* The unaligned cmdbuf offset will cause an unaligned write
@@ -489,6 +506,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
}
obj = host1x_to_tegra_bo(reloc->target.bo);
+ refs[num_refs++] = &obj->gem;
if (reloc->target.offset >= obj->gem.size) {
err = -EINVAL;
@@ -508,6 +526,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
goto fail;
obj = host1x_to_tegra_bo(wait->bo);
+ refs[num_refs++] = &obj->gem;
/*
* The unaligned offset will cause an unaligned write during
@@ -547,17 +566,20 @@ int tegra_drm_submit(struct tegra_drm_context *context,
goto fail;
err = host1x_job_submit(job);
- if (err)
- goto fail_submit;
+ if (err) {
+ host1x_job_unpin(job);
+ goto fail;
+ }
args->fence = job->syncpt_end;
- host1x_job_put(job);
- return 0;
-
-fail_submit:
- host1x_job_unpin(job);
fail:
+ while (num_refs--)
+ drm_gem_object_put_unlocked(refs[num_refs]);
+
+ kfree(refs);
+
+put:
host1x_job_put(job);
return err;
}
@@ -593,7 +615,7 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data,
args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return 0;
}
@@ -860,7 +882,7 @@ static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
bo->tiling.mode = mode;
bo->tiling.value = value;
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return 0;
}
@@ -900,7 +922,7 @@ static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
break;
}
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return err;
}
@@ -925,7 +947,7 @@ static int tegra_gem_set_flags(struct drm_device *drm, void *data,
if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
bo->flags |= TEGRA_BO_BOTTOM_UP;
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return 0;
}
@@ -947,7 +969,7 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data,
if (bo->flags & TEGRA_BO_BOTTOM_UP)
args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return 0;
}
@@ -955,20 +977,34 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data,
static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
#ifdef CONFIG_DRM_TEGRA_STAGING
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 0),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
#endif
};
@@ -1035,9 +1071,11 @@ static int tegra_debugfs_iova(struct seq_file *s, void *data)
struct tegra_drm *tegra = drm->dev_private;
struct drm_printer p = drm_seq_file_printer(s);
- mutex_lock(&tegra->mm_lock);
- drm_mm_print(&tegra->mm, &p);
- mutex_unlock(&tegra->mm_lock);
+ if (tegra->domain) {
+ mutex_lock(&tegra->mm_lock);
+ drm_mm_print(&tegra->mm, &p);
+ mutex_unlock(&tegra->mm_lock);
+ }
return 0;
}
@@ -1057,7 +1095,7 @@ static int tegra_debugfs_init(struct drm_minor *minor)
static struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ DRIVER_ATOMIC | DRIVER_RENDER,
.load = tegra_drm_load,
.unload = tegra_drm_unload,
.open = tegra_drm_open,
@@ -1077,8 +1115,6 @@ static struct drm_driver tegra_drm_driver = {
.gem_prime_import = tegra_gem_prime_import,
.dumb_create = tegra_bo_dumb_create,
- .dumb_map_offset = tegra_bo_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = tegra_drm_ioctls,
.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 6d6da01282f3..063f5d397526 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -23,6 +23,7 @@
#include <drm/drm_fixed.h>
#include "gem.h"
+#include "trace.h"
struct reset_control;
@@ -172,14 +173,19 @@ static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
}
static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value,
- unsigned long offset)
+ unsigned int offset)
{
+ trace_dc_writel(dc->dev, offset, value);
writel(value, dc->regs + (offset << 2));
}
-static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned long offset)
+static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned int offset)
{
- return readl(dc->regs + (offset << 2));
+ u32 value = readl(dc->regs + (offset << 2));
+
+ trace_dc_readl(dc->dev, offset, value);
+
+ return value;
}
struct tegra_dc_window {
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index e4b5aedfdbd4..046649ec9441 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -28,6 +28,7 @@
#include "drm.h"
#include "dsi.h"
#include "mipi-phy.h"
+#include "trace.h"
struct tegra_dsi_state {
struct drm_connector_state base;
@@ -105,15 +106,20 @@ static struct tegra_dsi_state *tegra_dsi_get_state(struct tegra_dsi *dsi)
return to_dsi_state(dsi->output.connector.state);
}
-static inline u32 tegra_dsi_readl(struct tegra_dsi *dsi, unsigned long reg)
+static inline u32 tegra_dsi_readl(struct tegra_dsi *dsi, unsigned int offset)
{
- return readl(dsi->regs + (reg << 2));
+ u32 value = readl(dsi->regs + (offset << 2));
+
+ trace_dsi_readl(dsi->dev, offset, value);
+
+ return value;
}
static inline void tegra_dsi_writel(struct tegra_dsi *dsi, u32 value,
- unsigned long reg)
+ unsigned int offset)
{
- writel(value, dsi->regs + (reg << 2));
+ trace_dsi_writel(dsi->dev, offset, value);
+ writel(value, dsi->regs + (offset << 2));
}
static int tegra_dsi_show_regs(struct seq_file *s, void *data)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 25acb73ee728..80540c1c66dc 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -88,7 +88,7 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
if (bo->pages)
vunmap(bo->vaddr);
- drm_gem_object_unreference_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(&bo->gem);
}
}
@@ -195,7 +195,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
unreference:
while (i--)
- drm_gem_object_unreference_unlocked(&planes[i]->gem);
+ drm_gem_object_put_unlocked(&planes[i]->gem);
return ERR_PTR(err);
}
@@ -242,7 +242,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
dev_err(drm->dev, "failed to allocate framebuffer info\n");
- drm_gem_object_unreference_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(&bo->gem);
return PTR_ERR(info);
}
@@ -251,7 +251,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
err = PTR_ERR(fbdev->fb);
dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n",
err);
- drm_gem_object_unreference_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(&bo->gem);
return PTR_ERR(fbdev->fb);
}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 7a39a355678a..ab1e53d434e8 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -24,7 +24,7 @@ static void tegra_bo_put(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- drm_gem_object_unreference_unlocked(&obj->gem);
+ drm_gem_object_put_unlocked(&obj->gem);
}
static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
@@ -95,7 +95,7 @@ static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- drm_gem_object_reference(&obj->gem);
+ drm_gem_object_get(&obj->gem);
return bo;
}
@@ -325,7 +325,7 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
return ERR_PTR(err);
}
- drm_gem_object_unreference_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(&bo->gem);
return bo;
}
@@ -423,27 +423,6 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
return 0;
}
-int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
- u32 handle, u64 *offset)
-{
- struct drm_gem_object *gem;
- struct tegra_bo *bo;
-
- gem = drm_gem_object_lookup(file, handle);
- if (!gem) {
- dev_err(drm->dev, "failed to lookup GEM object\n");
- return -EINVAL;
- }
-
- bo = to_tegra_bo(gem);
-
- *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
-
- drm_gem_object_unreference_unlocked(gem);
-
- return 0;
-}
-
static int tegra_bo_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
@@ -481,30 +460,28 @@ const struct vm_operations_struct tegra_bo_vm_ops = {
.close = drm_gem_vm_close,
};
-int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
+static int tegra_gem_mmap(struct drm_gem_object *gem,
+ struct vm_area_struct *vma)
{
- struct drm_gem_object *gem;
- struct tegra_bo *bo;
- int ret;
-
- ret = drm_gem_mmap(file, vma);
- if (ret)
- return ret;
-
- gem = vma->vm_private_data;
- bo = to_tegra_bo(gem);
+ struct tegra_bo *bo = to_tegra_bo(gem);
if (!bo->pages) {
unsigned long vm_pgoff = vma->vm_pgoff;
+ int err;
+ /*
+ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
+ * and set the vm_pgoff (used as a fake buffer offset by DRM)
+ * to 0 as we want to map the whole buffer.
+ */
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- ret = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
+ err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
gem->size);
- if (ret) {
+ if (err < 0) {
drm_gem_vm_close(vma);
- return ret;
+ return err;
}
vma->vm_pgoff = vm_pgoff;
@@ -520,6 +497,20 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
+int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gem;
+ int err;
+
+ err = drm_gem_mmap(file, vma);
+ if (err < 0)
+ return err;
+
+ gem = vma->vm_private_data;
+
+ return tegra_gem_mmap(gem, vma);
+}
+
static struct sg_table *
tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
@@ -603,7 +594,14 @@ static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
{
- return -EINVAL;
+ struct drm_gem_object *gem = buf->priv;
+ int err;
+
+ err = drm_gem_mmap_obj(gem, gem->size, vma);
+ if (err < 0)
+ return err;
+
+ return tegra_gem_mmap(gem, vma);
}
static void *tegra_gem_prime_vmap(struct dma_buf *buf)
@@ -654,7 +652,7 @@ struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
struct drm_gem_object *gem = buf->priv;
if (gem->dev == drm) {
- drm_gem_object_reference(gem);
+ drm_gem_object_get(gem);
return gem;
}
}
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 8b32a6fd586d..8eb9fd24ef0e 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -67,8 +67,6 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
void tegra_bo_free_object(struct drm_gem_object *gem);
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args);
-int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
- u32 handle, u64 *offset);
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index a621b0da4092..5b9d83b71943 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -24,6 +24,7 @@
#include "hdmi.h"
#include "drm.h"
#include "dc.h"
+#include "trace.h"
#define HDMI_ELD_BUFFER_SIZE 96
@@ -100,14 +101,19 @@ enum {
};
static inline u32 tegra_hdmi_readl(struct tegra_hdmi *hdmi,
- unsigned long offset)
+ unsigned int offset)
{
- return readl(hdmi->regs + (offset << 2));
+ u32 value = readl(hdmi->regs + (offset << 2));
+
+ trace_hdmi_readl(hdmi->dev, offset, value);
+
+ return value;
}
static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, u32 value,
- unsigned long offset)
+ unsigned int offset)
{
+ trace_hdmi_writel(hdmi->dev, offset, value);
writel(value, hdmi->regs + (offset << 2));
}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index e0642d05a8d3..7ab1d1dc7cd7 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -26,6 +26,7 @@
#include "dc.h"
#include "drm.h"
#include "sor.h"
+#include "trace.h"
#define SOR_REKEY 0x38
@@ -232,14 +233,19 @@ static inline struct tegra_sor *to_sor(struct tegra_output *output)
return container_of(output, struct tegra_sor, output);
}
-static inline u32 tegra_sor_readl(struct tegra_sor *sor, unsigned long offset)
+static inline u32 tegra_sor_readl(struct tegra_sor *sor, unsigned int offset)
{
- return readl(sor->regs + (offset << 2));
+ u32 value = readl(sor->regs + (offset << 2));
+
+ trace_sor_readl(sor->dev, offset, value);
+
+ return value;
}
static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value,
- unsigned long offset)
+ unsigned int offset)
{
+ trace_sor_writel(sor->dev, offset, value);
writel(value, sor->regs + (offset << 2));
}
diff --git a/drivers/gpu/drm/tegra/trace.c b/drivers/gpu/drm/tegra/trace.c
new file mode 100644
index 000000000000..006f65c72a34
--- /dev/null
+++ b/drivers/gpu/drm/tegra/trace.c
@@ -0,0 +1,2 @@
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/gpu/drm/tegra/trace.h b/drivers/gpu/drm/tegra/trace.h
new file mode 100644
index 000000000000..e9b7cdad5c4c
--- /dev/null
+++ b/drivers/gpu/drm/tegra/trace.h
@@ -0,0 +1,68 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tegra
+
+#if !defined(DRM_TEGRA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define DRM_TEGRA_TRACE_H 1
+
+#include <linux/device.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(register_access,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value),
+ TP_STRUCT__entry(
+ __field(struct device *, dev)
+ __field(unsigned int, offset)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->offset = offset;
+ __entry->value = value;
+ ),
+ TP_printk("%s %04x %08x", dev_name(__entry->dev), __entry->offset,
+ __entry->value)
+);
+
+DEFINE_EVENT(register_access, dc_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, dc_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+DEFINE_EVENT(register_access, hdmi_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, hdmi_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+DEFINE_EVENT(register_access, dsi_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, dsi_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+DEFINE_EVENT(register_access, dpaux_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, dpaux_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+DEFINE_EVENT(register_access, sor_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, sor_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+#endif /* DRM_TEGRA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 47cb1aaa58b1..2448229fa653 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -258,12 +258,16 @@ static const struct tegra_drm_client_ops vic_ops = {
.submit = tegra_drm_submit,
};
+#define NVIDIA_TEGRA_124_VIC_FIRMWARE "nvidia/tegra124/vic03_ucode.bin"
+
static const struct vic_config vic_t124_config = {
- .firmware = "nvidia/tegra124/vic03_ucode.bin",
+ .firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE,
};
+#define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin"
+
static const struct vic_config vic_t210_config = {
- .firmware = "nvidia/tegra210/vic04_ucode.bin",
+ .firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE,
};
static const struct of_device_id vic_match[] = {
@@ -394,3 +398,10 @@ struct platform_driver tegra_vic_driver = {
.probe = vic_probe,
.remove = vic_remove,
};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE);
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE);
+#endif
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
index f17c3caceab2..2e790e7dced5 100644
--- a/drivers/gpu/drm/tinydrm/Kconfig
+++ b/drivers/gpu/drm/tinydrm/Kconfig
@@ -32,3 +32,13 @@ config TINYDRM_REPAPER
2.71" TFT EPD Panel (E2271CS021)
If M is selected the module will be called repaper.
+
+config TINYDRM_ST7586
+ tristate "DRM support for Sitronix ST7586 display panels"
+ depends on DRM_TINYDRM && SPI
+ select TINYDRM_MIPI_DBI
+ help
+ DRM driver for the following Sitronix ST7586 panels:
+ * LEGO MINDSTORMS EV3
+
+ If M is selected the module will be called st7586.
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile
index 95bb4d4fa785..0c184bd1bb59 100644
--- a/drivers/gpu/drm/tinydrm/Makefile
+++ b/drivers/gpu/drm/tinydrm/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o
# Displays
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
+obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index 75808bb84c9a..bd6cce093a85 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -185,7 +185,9 @@ EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565);
/**
* tinydrm_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
* @dst: 8-bit grayscale destination buffer
+ * @vaddr: XRGB8888 source buffer
* @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
*
* Drm doesn't have native monochrome or grayscale support.
* Such drivers can announce the commonly supported XR24 format to userspace
@@ -195,41 +197,31 @@ EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565);
* where 1 means foreground color and 0 background color.
*
* ITU BT.601 is used for the RGB -> luma (brightness) conversion.
- *
- * Returns:
- * Zero on success, negative error code on failure.
*/
-int tinydrm_xrgb8888_to_gray8(u8 *dst, struct drm_framebuffer *fb)
+void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
- unsigned int x, y, pitch = fb->pitches[0];
- int ret = 0;
+ unsigned int len = (clip->x2 - clip->x1) * sizeof(u32);
+ unsigned int x, y;
void *buf;
u32 *src;
if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888))
- return -EINVAL;
+ return;
/*
* The cma memory is write-combined so reads are uncached.
* Speed up by fetching one line at a time.
*/
- buf = kmalloc(pitch, GFP_KERNEL);
+ buf = kmalloc(len, GFP_KERNEL);
if (!buf)
- return -ENOMEM;
-
- if (import_attach) {
- ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
- DMA_FROM_DEVICE);
- if (ret)
- goto err_free;
- }
+ return;
- for (y = 0; y < fb->height; y++) {
- src = cma_obj->vaddr + (y * pitch);
- memcpy(buf, src, pitch);
+ for (y = clip->y1; y < clip->y2; y++) {
+ src = vaddr + (y * fb->pitches[0]);
+ src += clip->x1;
+ memcpy(buf, src, len);
src = buf;
- for (x = 0; x < fb->width; x++) {
+ for (x = clip->x1; x < clip->x2; x++) {
u8 r = (*src & 0x00ff0000) >> 16;
u8 g = (*src & 0x0000ff00) >> 8;
u8 b = *src & 0x000000ff;
@@ -240,13 +232,7 @@ int tinydrm_xrgb8888_to_gray8(u8 *dst, struct drm_framebuffer *fb)
}
}
- if (import_attach)
- ret = dma_buf_end_cpu_access(import_attach->dmabuf,
- DMA_FROM_DEVICE);
-err_free:
kfree(buf);
-
- return ret;
}
EXPORT_SYMBOL(tinydrm_xrgb8888_to_gray8);
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index f224b54a30f6..177e9d861001 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -56,7 +56,7 @@ static const struct drm_connector_helper_funcs tinydrm_connector_hfuncs = {
static enum drm_connector_status
tinydrm_connector_detect(struct drm_connector *connector, bool force)
{
- if (drm_device_is_unplugged(connector->dev))
+ if (drm_dev_is_unplugged(connector->dev))
return connector_status_disconnected;
return connector->status;
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index 3343d3f15a90..30dc97b3ff21 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -18,6 +18,7 @@
*/
#include <linux/delay.h>
+#include <linux/dma-buf.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -525,11 +526,20 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
struct drm_clip_rect *clips,
unsigned int num_clips)
{
+ struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
struct tinydrm_device *tdev = fb->dev->dev_private;
struct repaper_epd *epd = epd_from_tinydrm(tdev);
+ struct drm_clip_rect clip;
u8 *buf = NULL;
int ret = 0;
+ /* repaper can't do partial updates */
+ clip.x1 = 0;
+ clip.x2 = fb->width;
+ clip.y1 = 0;
+ clip.y2 = fb->height;
+
mutex_lock(&tdev->dirty_lock);
if (!epd->enabled)
@@ -550,9 +560,21 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
goto out_unlock;
}
- ret = tinydrm_xrgb8888_to_gray8(buf, fb);
- if (ret)
- goto out_unlock;
+ if (import_attach) {
+ ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto out_unlock;
+ }
+
+ tinydrm_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip);
+
+ if (import_attach) {
+ ret = dma_buf_end_cpu_access(import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto out_unlock;
+ }
repaper_gray8_to_mono_reversed(buf, fb->width, fb->height);
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
new file mode 100644
index 000000000000..b439956a07f4
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -0,0 +1,428 @@
+/*
+ * DRM driver for Sitronix ST7586 panels
+ *
+ * Copyright 2017 David Lechner <david@lechnology.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-buf.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+#include <video/mipi_display.h>
+
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+
+/* controller-specific commands */
+#define ST7586_DISP_MODE_GRAY 0x38
+#define ST7586_DISP_MODE_MONO 0x39
+#define ST7586_ENABLE_DDRAM 0x3a
+#define ST7586_SET_DISP_DUTY 0xb0
+#define ST7586_SET_PART_DISP 0xb4
+#define ST7586_SET_NLINE_INV 0xb5
+#define ST7586_SET_VOP 0xc0
+#define ST7586_SET_BIAS_SYSTEM 0xc3
+#define ST7586_SET_BOOST_LEVEL 0xc4
+#define ST7586_SET_VOP_OFFSET 0xc7
+#define ST7586_ENABLE_ANALOG 0xd0
+#define ST7586_AUTO_READ_CTRL 0xd7
+#define ST7586_OTP_RW_CTRL 0xe0
+#define ST7586_OTP_CTRL_OUT 0xe1
+#define ST7586_OTP_READ 0xe3
+
+#define ST7586_DISP_CTRL_MX BIT(6)
+#define ST7586_DISP_CTRL_MY BIT(7)
+
+/*
+ * The ST7586 controller has an unusual pixel format where 2bpp grayscale is
+ * packed 3 pixels per byte with the first two pixels using 3 bits and the 3rd
+ * pixel using only 2 bits.
+ *
+ * | D7 | D6 | D5 || | || 2bpp |
+ * | (D4) | (D3) | (D2) || D1 | D0 || GRAY |
+ * +------+------+------++------+------++------+
+ * | 1 | 1 | 1 || 1 | 1 || 0 0 | black
+ * | 1 | 0 | 0 || 1 | 0 || 0 1 | dark gray
+ * | 0 | 1 | 0 || 0 | 1 || 1 0 | light gray
+ * | 0 | 0 | 0 || 0 | 0 || 1 1 | white
+ */
+
+static const u8 st7586_lookup[] = { 0x7, 0x4, 0x2, 0x0 };
+
+static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
+ struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip)
+{
+ size_t len = (clip->x2 - clip->x1) * (clip->y2 - clip->y1);
+ unsigned int x, y;
+ u8 *src, *buf, val;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ tinydrm_xrgb8888_to_gray8(buf, vaddr, fb, clip);
+ src = buf;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ for (x = clip->x1; x < clip->x2; x += 3) {
+ val = st7586_lookup[*src++ >> 6] << 5;
+ val |= st7586_lookup[*src++ >> 6] << 2;
+ val |= st7586_lookup[*src++ >> 6] >> 1;
+ *dst++ = val;
+ }
+ }
+
+ kfree(buf);
+}
+
+static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip)
+{
+ struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
+ void *src = cma_obj->vaddr;
+ int ret = 0;
+
+ if (import_attach) {
+ ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+ }
+
+ st7586_xrgb8888_to_gray332(dst, src, fb, clip);
+
+ if (import_attach)
+ ret = dma_buf_end_cpu_access(import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+
+ return ret;
+}
+
+static int st7586_fb_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int flags,
+ unsigned int color, struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct tinydrm_device *tdev = fb->dev->dev_private;
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct drm_clip_rect clip;
+ int start, end;
+ int ret = 0;
+
+ mutex_lock(&tdev->dirty_lock);
+
+ if (!mipi->enabled)
+ goto out_unlock;
+
+ /* fbdev can flush even when we're not interested */
+ if (tdev->pipe.plane.fb != fb)
+ goto out_unlock;
+
+ tinydrm_merge_clips(&clip, clips, num_clips, flags, fb->width,
+ fb->height);
+
+ /* 3 pixels per byte, so grow clip to nearest multiple of 3 */
+ clip.x1 = rounddown(clip.x1, 3);
+ clip.x2 = roundup(clip.x2, 3);
+
+ DRM_DEBUG("Flushing [FB:%d] x1=%u, x2=%u, y1=%u, y2=%u\n", fb->base.id,
+ clip.x1, clip.x2, clip.y1, clip.y2);
+
+ ret = st7586_buf_copy(mipi->tx_buf, fb, &clip);
+ if (ret)
+ goto out_unlock;
+
+ /* Pixels are packed 3 per byte */
+ start = clip.x1 / 3;
+ end = clip.x2 / 3;
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS,
+ (start >> 8) & 0xFF, start & 0xFF,
+ (end >> 8) & 0xFF, (end - 1) & 0xFF);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS,
+ (clip.y1 >> 8) & 0xFF, clip.y1 & 0xFF,
+ (clip.y2 >> 8) & 0xFF, (clip.y2 - 1) & 0xFF);
+
+ ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START,
+ (u8 *)mipi->tx_buf,
+ (end - start) * (clip.y2 - clip.y1));
+
+out_unlock:
+ mutex_unlock(&tdev->dirty_lock);
+
+ if (ret)
+ dev_err_once(fb->dev->dev, "Failed to update display %d\n",
+ ret);
+
+ return ret;
+}
+
+static const struct drm_framebuffer_funcs st7586_fb_funcs = {
+ .destroy = drm_fb_cma_destroy,
+ .create_handle = drm_fb_cma_create_handle,
+ .dirty = st7586_fb_dirty,
+};
+
+static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct drm_framebuffer *fb = pipe->plane.fb;
+ struct device *dev = tdev->drm->dev;
+ int ret;
+ u8 addr_mode;
+
+ DRM_DEBUG_KMS("\n");
+
+ mipi_dbi_hw_reset(mipi);
+ ret = mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f);
+ if (ret) {
+ dev_err(dev, "Error sending command %d\n", ret);
+ return;
+ }
+
+ mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00);
+
+ msleep(10);
+
+ mipi_dbi_command(mipi, ST7586_OTP_READ);
+
+ msleep(20);
+
+ mipi_dbi_command(mipi, ST7586_OTP_CTRL_OUT);
+ mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+
+ msleep(50);
+
+ mipi_dbi_command(mipi, ST7586_SET_VOP_OFFSET, 0x00);
+ mipi_dbi_command(mipi, ST7586_SET_VOP, 0xe3, 0x00);
+ mipi_dbi_command(mipi, ST7586_SET_BIAS_SYSTEM, 0x02);
+ mipi_dbi_command(mipi, ST7586_SET_BOOST_LEVEL, 0x04);
+ mipi_dbi_command(mipi, ST7586_ENABLE_ANALOG, 0x1d);
+ mipi_dbi_command(mipi, ST7586_SET_NLINE_INV, 0x00);
+ mipi_dbi_command(mipi, ST7586_DISP_MODE_GRAY);
+ mipi_dbi_command(mipi, ST7586_ENABLE_DDRAM, 0x02);
+
+ switch (mipi->rotation) {
+ default:
+ addr_mode = 0x00;
+ break;
+ case 90:
+ addr_mode = ST7586_DISP_CTRL_MY;
+ break;
+ case 180:
+ addr_mode = ST7586_DISP_CTRL_MX | ST7586_DISP_CTRL_MY;
+ break;
+ case 270:
+ addr_mode = ST7586_DISP_CTRL_MX;
+ break;
+ }
+ mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+
+ mipi_dbi_command(mipi, ST7586_SET_DISP_DUTY, 0x7f);
+ mipi_dbi_command(mipi, ST7586_SET_PART_DISP, 0xa0);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PARTIAL_AREA, 0x00, 0x00, 0x00, 0x77);
+ mipi_dbi_command(mipi, MIPI_DCS_EXIT_INVERT_MODE);
+
+ msleep(100);
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+
+ mipi->enabled = true;
+
+ if (fb)
+ fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0);
+}
+
+static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!mipi->enabled)
+ return;
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+ mipi->enabled = false;
+}
+
+static const u32 st7586_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static int st7586_init(struct device *dev, struct mipi_dbi *mipi,
+ const struct drm_simple_display_pipe_funcs *pipe_funcs,
+ struct drm_driver *driver, const struct drm_display_mode *mode,
+ unsigned int rotation)
+{
+ size_t bufsize = (mode->vdisplay + 2) / 3 * mode->hdisplay;
+ struct tinydrm_device *tdev = &mipi->tinydrm;
+ int ret;
+
+ mutex_init(&mipi->cmdlock);
+
+ mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
+ if (!mipi->tx_buf)
+ return -ENOMEM;
+
+ ret = devm_tinydrm_init(dev, tdev, &st7586_fb_funcs, driver);
+ if (ret)
+ return ret;
+
+ ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL,
+ st7586_formats,
+ ARRAY_SIZE(st7586_formats),
+ mode, rotation);
+ if (ret)
+ return ret;
+
+ tdev->drm->mode_config.preferred_depth = 32;
+ mipi->rotation = rotation;
+
+ drm_mode_config_reset(tdev->drm);
+
+ DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
+ tdev->drm->mode_config.preferred_depth, rotation);
+
+ return 0;
+}
+
+static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
+ .enable = st7586_pipe_enable,
+ .disable = st7586_pipe_disable,
+ .update = tinydrm_display_pipe_update,
+ .prepare_fb = tinydrm_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode st7586_mode = {
+ TINYDRM_MODE(178, 128, 37, 27),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
+
+static struct drm_driver st7586_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+ .fops = &st7586_fops,
+ TINYDRM_GEM_DRIVER_OPS,
+ .lastclose = tinydrm_lastclose,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "st7586",
+ .desc = "Sitronix ST7586",
+ .date = "20170801",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id st7586_of_match[] = {
+ { .compatible = "lego,ev3-lcd" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st7586_of_match);
+
+static const struct spi_device_id st7586_id[] = {
+ { "ev3-lcd", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, st7586_id);
+
+static int st7586_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct tinydrm_device *tdev;
+ struct mipi_dbi *mipi;
+ struct gpio_desc *a0;
+ u32 rotation = 0;
+ int ret;
+
+ mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ if (!mipi)
+ return -ENOMEM;
+
+ mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(mipi->reset)) {
+ dev_err(dev, "Failed to get gpio 'reset'\n");
+ return PTR_ERR(mipi->reset);
+ }
+
+ a0 = devm_gpiod_get(dev, "a0", GPIOD_OUT_LOW);
+ if (IS_ERR(a0)) {
+ dev_err(dev, "Failed to get gpio 'a0'\n");
+ return PTR_ERR(a0);
+ }
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, mipi, a0);
+ if (ret)
+ return ret;
+
+ /* Cannot read from this controller via SPI */
+ mipi->read_commands = NULL;
+
+ /*
+ * we are using 8-bit data, so we are not actually swapping anything,
+ * but setting mipi->swap_bytes makes mipi_dbi_typec3_command() do the
+ * right thing and not use 16-bit transfers (which results in swapped
+ * bytes on little-endian systems and causes out of order data to be
+ * sent to the display).
+ */
+ mipi->swap_bytes = true;
+
+ ret = st7586_init(&spi->dev, mipi, &st7586_pipe_funcs, &st7586_driver,
+ &st7586_mode, rotation);
+ if (ret)
+ return ret;
+
+ tdev = &mipi->tinydrm;
+
+ ret = devm_tinydrm_register(tdev);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, mipi);
+
+ DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n",
+ tdev->drm->driver->name, dev_name(dev),
+ spi->max_speed_hz / 1000000,
+ tdev->drm->primary->index);
+
+ return 0;
+}
+
+static void st7586_shutdown(struct spi_device *spi)
+{
+ struct mipi_dbi *mipi = spi_get_drvdata(spi);
+
+ tinydrm_shutdown(&mipi->tinydrm);
+}
+
+static struct spi_driver st7586_spi_driver = {
+ .driver = {
+ .name = "st7586",
+ .owner = THIS_MODULE,
+ .of_match_table = st7586_of_match,
+ },
+ .id_table = st7586_id,
+ .probe = st7586_probe,
+ .shutdown = st7586_shutdown,
+};
+module_spi_driver(st7586_spi_driver);
+
+MODULE_DESCRIPTION("Sitronix ST7586 DRM driver");
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 22b57020790d..cba11f13d994 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -70,6 +70,7 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place,
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct drm_printer p = drm_debug_printer(TTM_PFX);
pr_err(" has_type: %d\n", man->has_type);
pr_err(" use_type: %d\n", man->use_type);
@@ -79,7 +80,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
pr_err(" available_caching: 0x%08X\n", man->available_caching);
pr_err(" default_caching: 0x%08X\n", man->default_caching);
if (mem_type != TTM_PL_SYSTEM)
- (*man->func->debug)(man, TTM_PFX);
+ (*man->func->debug)(man, &p);
}
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
@@ -394,14 +395,33 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ww_mutex_unlock (&bo->resv->lock);
}
+static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
+{
+ int r;
+
+ if (bo->resv == &bo->ttm_resv)
+ return 0;
+
+ reservation_object_init(&bo->ttm_resv);
+ BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
+
+ r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
+ if (r) {
+ reservation_object_unlock(&bo->ttm_resv);
+ reservation_object_fini(&bo->ttm_resv);
+ }
+
+ return r;
+}
+
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
struct reservation_object_list *fobj;
struct dma_fence *fence;
int i;
- fobj = reservation_object_get_list(bo->resv);
- fence = reservation_object_get_excl(bo->resv);
+ fobj = reservation_object_get_list(&bo->ttm_resv);
+ fence = reservation_object_get_excl(&bo->ttm_resv);
if (fence && !fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
@@ -430,8 +450,19 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_cleanup_memtype_use(bo);
return;
- } else
- ttm_bo_flush_all_fences(bo);
+ }
+
+ ret = ttm_bo_individualize_resv(bo);
+ if (ret) {
+ /* Last resort, if we fail to allocate memory for the
+ * fences block for the BO to become idle and free it.
+ */
+ spin_unlock(&glob->lru_lock);
+ ttm_bo_wait(bo, true, true);
+ ttm_bo_cleanup_memtype_use(bo);
+ return;
+ }
+ ttm_bo_flush_all_fences(bo);
/*
* Make NO_EVICT bos immediately available to
@@ -443,6 +474,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_add_to_lru(bo);
}
+ if (bo->resv != &bo->ttm_resv)
+ reservation_object_unlock(&bo->ttm_resv);
__ttm_bo_unreserve(bo);
}
@@ -471,17 +504,25 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
bool no_wait_gpu)
{
struct ttm_bo_global *glob = bo->glob;
+ struct reservation_object *resv;
int ret;
- ret = ttm_bo_wait(bo, false, true);
+ if (unlikely(list_empty(&bo->ddestroy)))
+ resv = bo->resv;
+ else
+ resv = &bo->ttm_resv;
+
+ if (reservation_object_test_signaled_rcu(resv, true))
+ ret = 0;
+ else
+ ret = -EBUSY;
if (ret && !no_wait_gpu) {
long lret;
ww_mutex_unlock(&bo->resv->lock);
spin_unlock(&glob->lru_lock);
- lret = reservation_object_wait_timeout_rcu(bo->resv,
- true,
+ lret = reservation_object_wait_timeout_rcu(resv, true,
interruptible,
30 * HZ);
@@ -505,13 +546,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
spin_unlock(&glob->lru_lock);
return 0;
}
-
- /*
- * remove sync_obj with ttm_bo_wait, the wait should be
- * finished, and no new wait object should have been added.
- */
- ret = ttm_bo_wait(bo, false, true);
- WARN_ON(ret);
}
if (ret || unlikely(list_empty(&bo->ddestroy))) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 90a6c0b03afc..a7c232dc39cb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -136,13 +136,12 @@ static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
}
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
+ struct drm_printer *printer)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
- struct drm_printer p = drm_debug_printer(prefix);
spin_lock(&rman->lock);
- drm_mm_print(&rman->mm, &p);
+ drm_mm_print(&rman->mm, printer);
spin_unlock(&rman->lock);
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index eeddc1e48409..871599826773 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -615,7 +615,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
} else {
pr_err("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
- list_for_each_entry(p, &pool->list, lru) {
+ list_for_each_entry(p, &new_pages, lru) {
++cpages;
}
list_splice(&new_pages, &pool->list);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index d2f57c52f7db..9f9a49748d17 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -96,7 +96,7 @@ static int udl_mode_valid(struct drm_connector *connector,
static enum drm_connector_status
udl_detect(struct drm_connector *connector, bool force)
{
- if (drm_device_is_unplugged(connector->dev))
+ if (drm_dev_is_unplugged(connector->dev))
return connector_status_disconnected;
return connector_status_connected;
}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 0f02e1acf0ba..31421b6b586e 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -54,7 +54,6 @@ static struct drm_driver driver = {
.dumb_create = udl_dumb_create,
.dumb_map_offset = udl_gem_mmap,
- .dumb_destroy = drm_gem_dumb_destroy,
.fops = &udl_driver_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -102,7 +101,7 @@ static void udl_usb_disconnect(struct usb_interface *interface)
drm_kms_helper_poll_disable(dev);
udl_fbdev_unplug(dev);
udl_drop_usb(dev);
- drm_unplug_dev(dev);
+ drm_dev_unplug(dev);
}
/*
@@ -112,7 +111,7 @@ static void udl_usb_disconnect(struct usb_interface *interface)
* which is compatible with all known USB 2.0 era graphics chips and firmware,
* but allows DisplayLink to increment those for any future incompatible chips
*/
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{.idVendor = 0x17e9, .bInterfaceClass = 0xff,
.bInterfaceSubClass = 0x00,
.bInterfaceProtocol = 0x00,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index a5c54dc60def..b7ca90db4e80 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -198,7 +198,7 @@ static int udl_fb_open(struct fb_info *info, int user)
struct udl_device *udl = dev->dev_private;
/* If the USB device is gone, we don't accept new opens */
- if (drm_device_is_unplugged(udl->ddev))
+ if (drm_dev_is_unplugged(udl->ddev))
return -ENODEV;
ufbdev->fb_count++;
@@ -309,7 +309,7 @@ static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct udl_framebuffer *ufb = to_udl_fb(fb);
if (ufb->obj)
- drm_gem_object_unreference_unlocked(&ufb->obj->base);
+ drm_gem_object_put_unlocked(&ufb->obj->base);
drm_framebuffer_cleanup(fb);
kfree(ufb);
@@ -403,7 +403,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
return ret;
out_gfree:
- drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
+ drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
out:
return ret;
}
@@ -419,7 +419,7 @@ static void udl_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_fini(&ufbdev->helper);
drm_framebuffer_unregister_private(&ufbdev->ufb.base);
drm_framebuffer_cleanup(&ufbdev->ufb.base);
- drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
+ drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
}
int udl_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index db9ceceba30e..dee6bd9a3dd1 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
return ret;
}
- drm_gem_object_unreference_unlocked(&obj->base);
+ drm_gem_object_put_unlocked(&obj->base);
*handle_p = handle;
return 0;
}
@@ -234,7 +234,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
out:
- drm_gem_object_unreference(&gobj->base);
+ drm_gem_object_put(&gobj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index b24dd8685590..3afdbf4bc10b 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -366,7 +366,7 @@ int vc4_dumb_create(struct drm_file *file_priv,
return PTR_ERR(bo);
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
- drm_gem_object_unreference_unlocked(&bo->base.base);
+ drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
@@ -482,7 +482,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
struct vc4_bo *bo = to_vc4_bo(obj);
if (bo->validated_shader) {
- DRM_ERROR("Attempting to export shader BO\n");
+ DRM_DEBUG("Attempting to export shader BO\n");
return ERR_PTR(-EINVAL);
}
@@ -503,7 +503,7 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
bo = to_vc4_bo(gem_obj);
if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
- DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
+ DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
return -EINVAL;
}
@@ -528,7 +528,7 @@ int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct vc4_bo *bo = to_vc4_bo(obj);
if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
- DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
+ DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
return -EINVAL;
}
@@ -540,7 +540,7 @@ void *vc4_prime_vmap(struct drm_gem_object *obj)
struct vc4_bo *bo = to_vc4_bo(obj);
if (bo->validated_shader) {
- DRM_ERROR("mmaping of shader BOs not allowed.\n");
+ DRM_DEBUG("mmaping of shader BOs not allowed.\n");
return ERR_PTR(-EINVAL);
}
@@ -581,7 +581,7 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(bo);
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
- drm_gem_object_unreference_unlocked(&bo->base.base);
+ drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
@@ -594,14 +594,14 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
- DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -EINVAL;
}
/* The mmap offset was set up at BO allocation time. */
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
- drm_gem_object_unreference_unlocked(gem_obj);
+ drm_gem_object_put_unlocked(gem_obj);
return 0;
}
@@ -657,7 +657,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
fail:
- drm_gem_object_unreference_unlocked(&bo->base.base);
+ drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
@@ -698,13 +698,13 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
- DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -ENOENT;
}
bo = to_vc4_bo(gem_obj);
bo->t_format = t_format;
- drm_gem_object_unreference_unlocked(gem_obj);
+ drm_gem_object_put_unlocked(gem_obj);
return 0;
}
@@ -729,7 +729,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
- DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -ENOENT;
}
bo = to_vc4_bo(gem_obj);
@@ -739,7 +739,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
else
args->modifier = DRM_FORMAT_MOD_NONE;
- drm_gem_object_unreference_unlocked(gem_obj);
+ drm_gem_object_put_unlocked(gem_obj);
return 0;
}
@@ -830,7 +830,7 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
ret = -ENOMEM;
mutex_unlock(&vc4->bo_lock);
- drm_gem_object_unreference_unlocked(gem_obj);
+ drm_gem_object_put_unlocked(gem_obj);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 664a55b45af0..ce1e3b9e14c9 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -763,7 +763,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
}
drm_crtc_vblank_put(crtc);
- drm_framebuffer_unreference(flip_state->fb);
+ drm_framebuffer_put(flip_state->fb);
kfree(flip_state);
up(&vc4->async_modeset);
@@ -792,7 +792,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
if (!flip_state)
return -ENOMEM;
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
flip_state->fb = fb;
flip_state->crtc = crtc;
flip_state->event = event;
@@ -800,7 +800,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
/* Make sure all other async modesetes have landed. */
ret = down_interruptible(&vc4->async_modeset);
if (ret) {
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
kfree(flip_state);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index e8f0e1790d5e..1c96edcb302b 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -99,6 +99,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
case DRM_VC4_PARAM_SUPPORTS_ETC1:
case DRM_VC4_PARAM_SUPPORTS_THREADED_FS:
+ case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER:
args->value = true;
break;
default:
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 629d372633e6..d1e0dc908048 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1636,14 +1636,10 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
pm_runtime_disable(dev);
- drm_bridge_remove(dsi->bridge);
vc4_dsi_encoder_destroy(dsi->encoder);
mipi_dsi_host_unregister(&dsi->dsi_host);
- clk_disable_unprepare(dsi->pll_phy_clock);
- clk_disable_unprepare(dsi->escape_clock);
-
if (dsi->port == 1)
vc4->dsi1 = NULL;
}
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 209fccd0d3b4..d0c6bfb68c4e 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -55,7 +55,7 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
unsigned int i;
for (i = 0; i < state->user_state.bo_count; i++)
- drm_gem_object_unreference_unlocked(state->bo[i]);
+ drm_gem_object_put_unlocked(state->bo[i]);
kfree(state);
}
@@ -188,12 +188,12 @@ vc4_save_hang_state(struct drm_device *dev)
continue;
for (j = 0; j < exec[i]->bo_count; j++) {
- drm_gem_object_reference(&exec[i]->bo[j]->base);
+ drm_gem_object_get(&exec[i]->bo[j]->base);
kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
- drm_gem_object_reference(&bo->base.base);
+ drm_gem_object_get(&bo->base.base);
kernel_state->bo[j + prev_idx] = &bo->base.base;
j++;
}
@@ -659,7 +659,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
/* See comment on bo_index for why we have to check
* this.
*/
- DRM_ERROR("Rendering requires BOs to validate\n");
+ DRM_DEBUG("Rendering requires BOs to validate\n");
return -EINVAL;
}
@@ -690,13 +690,13 @@ vc4_cl_lookup_bos(struct drm_device *dev,
struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
handles[i]);
if (!bo) {
- DRM_ERROR("Failed to look up GEM BO %d: %d\n",
+ DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
i, handles[i]);
ret = -EINVAL;
spin_unlock(&file_priv->table_lock);
goto fail;
}
- drm_gem_object_reference(bo);
+ drm_gem_object_get(bo);
exec->bo[i] = (struct drm_gem_cma_object *)bo;
}
spin_unlock(&file_priv->table_lock);
@@ -728,7 +728,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
args->shader_rec_count >= (UINT_MAX /
sizeof(struct vc4_shader_state)) ||
temp_size < exec_size) {
- DRM_ERROR("overflow in exec arguments\n");
+ DRM_DEBUG("overflow in exec arguments\n");
ret = -EINVAL;
goto fail;
}
@@ -834,7 +834,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++)
- drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
+ drm_gem_object_put_unlocked(&exec->bo[i]->base);
kvfree(exec->bo);
}
@@ -842,7 +842,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_bo *bo = list_first_entry(&exec->unref_list,
struct vc4_bo, unref_head);
list_del(&bo->unref_head);
- drm_gem_object_unreference_unlocked(&bo->base.base);
+ drm_gem_object_put_unlocked(&bo->base.base);
}
/* Free up the allocation of any bin slots we used. */
@@ -973,7 +973,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
- DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -EINVAL;
}
bo = to_vc4_bo(gem_obj);
@@ -981,7 +981,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
&args->timeout_ns);
- drm_gem_object_unreference_unlocked(gem_obj);
+ drm_gem_object_put_unlocked(gem_obj);
return ret;
}
@@ -1007,8 +1007,11 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
- if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
- DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
+ if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
+ VC4_SUBMIT_CL_FIXED_RCL_ORDER |
+ VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
+ VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
+ DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
return -EINVAL;
}
@@ -1117,6 +1120,4 @@ vc4_gem_destroy(struct drm_device *dev)
if (vc4->hang_state)
vc4_free_hang_state(dev, vc4->hang_state);
-
- vc4_bo_cache_destroy(dev);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index ff09b8e2f9ee..937da8dd65b8 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -288,6 +288,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
+ kfree(edid);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index aeec6e8703d2..50c4959b5bd3 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -20,6 +20,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "vc4_drv.h"
static void vc4_output_poll_changed(struct drm_device *dev)
@@ -169,7 +170,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
gem_obj = drm_gem_object_lookup(file_priv,
mode_cmd->handles[0]);
if (!gem_obj) {
- DRM_ERROR("Failed to look up GEM BO %d\n",
+ DRM_DEBUG("Failed to look up GEM BO %d\n",
mode_cmd->handles[0]);
return ERR_PTR(-ENOENT);
}
@@ -184,12 +185,12 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
}
- drm_gem_object_unreference_unlocked(gem_obj);
+ drm_gem_object_put_unlocked(gem_obj);
mode_cmd = &mode_cmd_local;
}
- return drm_fb_cma_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
static const struct drm_mode_config_funcs vc4_mode_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 4a8051532f00..273984f71ae2 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -261,8 +261,17 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
uint8_t max_y_tile = args->max_y_tile;
uint8_t xtiles = max_x_tile - min_x_tile + 1;
uint8_t ytiles = max_y_tile - min_y_tile + 1;
- uint8_t x, y;
+ uint8_t xi, yi;
uint32_t size, loop_body_size;
+ bool positive_x = true;
+ bool positive_y = true;
+
+ if (args->flags & VC4_SUBMIT_CL_FIXED_RCL_ORDER) {
+ if (!(args->flags & VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X))
+ positive_x = false;
+ if (!(args->flags & VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y))
+ positive_y = false;
+ }
size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE;
loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE;
@@ -354,10 +363,12 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
rcl_u16(setup, args->height);
rcl_u16(setup, args->color_write.bits);
- for (y = min_y_tile; y <= max_y_tile; y++) {
- for (x = min_x_tile; x <= max_x_tile; x++) {
- bool first = (x == min_x_tile && y == min_y_tile);
- bool last = (x == max_x_tile && y == max_y_tile);
+ for (yi = 0; yi < ytiles; yi++) {
+ int y = positive_y ? min_y_tile + yi : max_y_tile - yi;
+ for (xi = 0; xi < xtiles; xi++) {
+ int x = positive_x ? min_x_tile + xi : max_x_tile - xi;
+ bool first = (xi == 0 && yi == 0);
+ bool last = (xi == xtiles - 1 && yi == ytiles - 1);
emit_tile(exec, setup, x, y, first, last);
}
@@ -378,14 +389,14 @@ static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32);
if (surf->offset > obj->base.size) {
- DRM_ERROR("surface offset %d > BO size %zd\n",
+ DRM_DEBUG("surface offset %d > BO size %zd\n",
surf->offset, obj->base.size);
return -EINVAL;
}
if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE <
render_tiles_stride * args->max_y_tile + args->max_x_tile) {
- DRM_ERROR("MSAA tile %d, %d out of bounds "
+ DRM_DEBUG("MSAA tile %d, %d out of bounds "
"(bo size %zd, offset %d).\n",
args->max_x_tile, args->max_y_tile,
obj->base.size,
@@ -401,7 +412,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
struct drm_vc4_submit_rcl_surface *surf)
{
if (surf->flags != 0 || surf->bits != 0) {
- DRM_ERROR("MSAA surface had nonzero flags/bits\n");
+ DRM_DEBUG("MSAA surface had nonzero flags/bits\n");
return -EINVAL;
}
@@ -415,7 +426,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
if (surf->offset & 0xf) {
- DRM_ERROR("MSAA write must be 16b aligned.\n");
+ DRM_DEBUG("MSAA write must be 16b aligned.\n");
return -EINVAL;
}
@@ -437,7 +448,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
int ret;
if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
- DRM_ERROR("Extra flags set\n");
+ DRM_DEBUG("Extra flags set\n");
return -EINVAL;
}
@@ -453,12 +464,12 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
if (surf == &exec->args->zs_write) {
- DRM_ERROR("general zs write may not be a full-res.\n");
+ DRM_DEBUG("general zs write may not be a full-res.\n");
return -EINVAL;
}
if (surf->bits != 0) {
- DRM_ERROR("load/store general bits set with "
+ DRM_DEBUG("load/store general bits set with "
"full res load/store.\n");
return -EINVAL;
}
@@ -473,19 +484,19 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK |
VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK |
VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) {
- DRM_ERROR("Unknown bits in load/store: 0x%04x\n",
+ DRM_DEBUG("Unknown bits in load/store: 0x%04x\n",
surf->bits);
return -EINVAL;
}
if (tiling > VC4_TILING_FORMAT_LT) {
- DRM_ERROR("Bad tiling format\n");
+ DRM_DEBUG("Bad tiling format\n");
return -EINVAL;
}
if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) {
if (format != 0) {
- DRM_ERROR("No color format should be set for ZS\n");
+ DRM_DEBUG("No color format should be set for ZS\n");
return -EINVAL;
}
cpp = 4;
@@ -499,16 +510,16 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
cpp = 4;
break;
default:
- DRM_ERROR("Bad tile buffer format\n");
+ DRM_DEBUG("Bad tile buffer format\n");
return -EINVAL;
}
} else {
- DRM_ERROR("Bad load/store buffer %d.\n", buffer);
+ DRM_DEBUG("Bad load/store buffer %d.\n", buffer);
return -EINVAL;
}
if (surf->offset & 0xf) {
- DRM_ERROR("load/store buffer must be 16b aligned.\n");
+ DRM_DEBUG("load/store buffer must be 16b aligned.\n");
return -EINVAL;
}
@@ -533,7 +544,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
int cpp;
if (surf->flags != 0) {
- DRM_ERROR("No flags supported on render config.\n");
+ DRM_DEBUG("No flags supported on render config.\n");
return -EINVAL;
}
@@ -541,7 +552,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
VC4_RENDER_CONFIG_FORMAT_MASK |
VC4_RENDER_CONFIG_MS_MODE_4X |
VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) {
- DRM_ERROR("Unknown bits in render config: 0x%04x\n",
+ DRM_DEBUG("Unknown bits in render config: 0x%04x\n",
surf->bits);
return -EINVAL;
}
@@ -556,7 +567,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
if (tiling > VC4_TILING_FORMAT_LT) {
- DRM_ERROR("Bad tiling format\n");
+ DRM_DEBUG("Bad tiling format\n");
return -EINVAL;
}
@@ -569,7 +580,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
cpp = 4;
break;
default:
- DRM_ERROR("Bad tile buffer format\n");
+ DRM_DEBUG("Bad tile buffer format\n");
return -EINVAL;
}
@@ -590,7 +601,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
if (args->min_x_tile > args->max_x_tile ||
args->min_y_tile > args->max_y_tile) {
- DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n",
+ DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n",
args->min_x_tile, args->min_y_tile,
args->max_x_tile, args->max_y_tile);
return -EINVAL;
@@ -599,7 +610,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
if (has_bin &&
(args->max_x_tile > exec->bin_tiles_x ||
args->max_y_tile > exec->bin_tiles_y)) {
- DRM_ERROR("Render tiles (%d,%d) outside of bin config "
+ DRM_DEBUG("Render tiles (%d,%d) outside of bin config "
"(%d,%d)\n",
args->max_x_tile, args->max_y_tile,
exec->bin_tiles_x, exec->bin_tiles_y);
@@ -642,7 +653,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
*/
if (!setup.color_write && !setup.zs_write &&
!setup.msaa_color_write && !setup.msaa_zs_write) {
- DRM_ERROR("RCL requires color or Z/S write\n");
+ DRM_DEBUG("RCL requires color or Z/S write\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 814b512c6b9a..2db485abb186 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -109,7 +109,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
struct vc4_bo *bo;
if (hindex >= exec->bo_count) {
- DRM_ERROR("BO index %d greater than BO count %d\n",
+ DRM_DEBUG("BO index %d greater than BO count %d\n",
hindex, exec->bo_count);
return NULL;
}
@@ -117,7 +117,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
bo = to_vc4_bo(&obj->base);
if (bo->validated_shader) {
- DRM_ERROR("Trying to use shader BO as something other than "
+ DRM_DEBUG("Trying to use shader BO as something other than "
"a shader\n");
return NULL;
}
@@ -172,7 +172,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
* our math.
*/
if (width > 4096 || height > 4096) {
- DRM_ERROR("Surface dimensions (%d,%d) too large",
+ DRM_DEBUG("Surface dimensions (%d,%d) too large",
width, height);
return false;
}
@@ -191,7 +191,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
aligned_height = round_up(height, utile_h);
break;
default:
- DRM_ERROR("buffer tiling %d unsupported\n", tiling_format);
+ DRM_DEBUG("buffer tiling %d unsupported\n", tiling_format);
return false;
}
@@ -200,7 +200,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
if (size + offset < size ||
size + offset > fbo->base.size) {
- DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n",
+ DRM_DEBUG("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n",
width, height,
aligned_width, aligned_height,
size, offset, fbo->base.size);
@@ -214,7 +214,7 @@ static int
validate_flush(VALIDATE_ARGS)
{
if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) {
- DRM_ERROR("Bin CL must end with VC4_PACKET_FLUSH\n");
+ DRM_DEBUG("Bin CL must end with VC4_PACKET_FLUSH\n");
return -EINVAL;
}
exec->found_flush = true;
@@ -226,13 +226,13 @@ static int
validate_start_tile_binning(VALIDATE_ARGS)
{
if (exec->found_start_tile_binning_packet) {
- DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n");
+ DRM_DEBUG("Duplicate VC4_PACKET_START_TILE_BINNING\n");
return -EINVAL;
}
exec->found_start_tile_binning_packet = true;
if (!exec->found_tile_binning_mode_config_packet) {
- DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
+ DRM_DEBUG("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
return -EINVAL;
}
@@ -243,7 +243,7 @@ static int
validate_increment_semaphore(VALIDATE_ARGS)
{
if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) {
- DRM_ERROR("Bin CL must end with "
+ DRM_DEBUG("Bin CL must end with "
"VC4_PACKET_INCREMENT_SEMAPHORE\n");
return -EINVAL;
}
@@ -264,7 +264,7 @@ validate_indexed_prim_list(VALIDATE_ARGS)
/* Check overflow condition */
if (exec->shader_state_count == 0) {
- DRM_ERROR("shader state must precede primitives\n");
+ DRM_DEBUG("shader state must precede primitives\n");
return -EINVAL;
}
shader_state = &exec->shader_state[exec->shader_state_count - 1];
@@ -281,7 +281,7 @@ validate_indexed_prim_list(VALIDATE_ARGS)
if (offset > ib->base.size ||
(ib->base.size - offset) / index_size < length) {
- DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n",
+ DRM_DEBUG("IB access overflow (%d + %d*%d > %zd)\n",
offset, length, index_size, ib->base.size);
return -EINVAL;
}
@@ -301,13 +301,13 @@ validate_gl_array_primitive(VALIDATE_ARGS)
/* Check overflow condition */
if (exec->shader_state_count == 0) {
- DRM_ERROR("shader state must precede primitives\n");
+ DRM_DEBUG("shader state must precede primitives\n");
return -EINVAL;
}
shader_state = &exec->shader_state[exec->shader_state_count - 1];
if (length + base_index < length) {
- DRM_ERROR("primitive vertex count overflow\n");
+ DRM_DEBUG("primitive vertex count overflow\n");
return -EINVAL;
}
max_index = length + base_index - 1;
@@ -324,7 +324,7 @@ validate_gl_shader_state(VALIDATE_ARGS)
uint32_t i = exec->shader_state_count++;
if (i >= exec->shader_state_size) {
- DRM_ERROR("More requests for shader states than declared\n");
+ DRM_DEBUG("More requests for shader states than declared\n");
return -EINVAL;
}
@@ -332,7 +332,7 @@ validate_gl_shader_state(VALIDATE_ARGS)
exec->shader_state[i].max_index = 0;
if (exec->shader_state[i].addr & ~0xf) {
- DRM_ERROR("high bits set in GL shader rec reference\n");
+ DRM_DEBUG("high bits set in GL shader rec reference\n");
return -EINVAL;
}
@@ -356,7 +356,7 @@ validate_tile_binning_config(VALIDATE_ARGS)
int bin_slot;
if (exec->found_tile_binning_mode_config_packet) {
- DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
+ DRM_DEBUG("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
return -EINVAL;
}
exec->found_tile_binning_mode_config_packet = true;
@@ -368,14 +368,14 @@ validate_tile_binning_config(VALIDATE_ARGS)
if (exec->bin_tiles_x == 0 ||
exec->bin_tiles_y == 0) {
- DRM_ERROR("Tile binning config of %dx%d too small\n",
+ DRM_DEBUG("Tile binning config of %dx%d too small\n",
exec->bin_tiles_x, exec->bin_tiles_y);
return -EINVAL;
}
if (flags & (VC4_BIN_CONFIG_DB_NON_MS |
VC4_BIN_CONFIG_TILE_BUFFER_64BIT)) {
- DRM_ERROR("unsupported binning config flags 0x%02x\n", flags);
+ DRM_DEBUG("unsupported binning config flags 0x%02x\n", flags);
return -EINVAL;
}
@@ -493,20 +493,20 @@ vc4_validate_bin_cl(struct drm_device *dev,
const struct cmd_info *info;
if (cmd >= ARRAY_SIZE(cmd_info)) {
- DRM_ERROR("0x%08x: packet %d out of bounds\n",
+ DRM_DEBUG("0x%08x: packet %d out of bounds\n",
src_offset, cmd);
return -EINVAL;
}
info = &cmd_info[cmd];
if (!info->name) {
- DRM_ERROR("0x%08x: packet %d invalid\n",
+ DRM_DEBUG("0x%08x: packet %d invalid\n",
src_offset, cmd);
return -EINVAL;
}
if (src_offset + info->len > len) {
- DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x "
+ DRM_DEBUG("0x%08x: packet %d (%s) length 0x%08x "
"exceeds bounds (0x%08x)\n",
src_offset, cmd, info->name, info->len,
src_offset + len);
@@ -519,7 +519,7 @@ vc4_validate_bin_cl(struct drm_device *dev,
if (info->func && info->func(exec,
dst_pkt + 1,
src_pkt + 1)) {
- DRM_ERROR("0x%08x: packet %d (%s) failed to validate\n",
+ DRM_DEBUG("0x%08x: packet %d (%s) failed to validate\n",
src_offset, cmd, info->name);
return -EINVAL;
}
@@ -537,7 +537,7 @@ vc4_validate_bin_cl(struct drm_device *dev,
exec->ct0ea = exec->ct0ca + dst_offset;
if (!exec->found_start_tile_binning_packet) {
- DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
+ DRM_DEBUG("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
return -EINVAL;
}
@@ -549,7 +549,7 @@ vc4_validate_bin_cl(struct drm_device *dev,
* semaphore increment.
*/
if (!exec->found_increment_semaphore_packet || !exec->found_flush) {
- DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + "
+ DRM_DEBUG("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + "
"VC4_PACKET_FLUSH\n");
return -EINVAL;
}
@@ -588,11 +588,11 @@ reloc_tex(struct vc4_exec_info *exec,
uint32_t remaining_size = tex->base.size - p0;
if (p0 > tex->base.size - 4) {
- DRM_ERROR("UBO offset greater than UBO size\n");
+ DRM_DEBUG("UBO offset greater than UBO size\n");
goto fail;
}
if (p1 > remaining_size - 4) {
- DRM_ERROR("UBO clamp would allow reads "
+ DRM_DEBUG("UBO clamp would allow reads "
"outside of UBO\n");
goto fail;
}
@@ -612,14 +612,14 @@ reloc_tex(struct vc4_exec_info *exec,
if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) ==
VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) {
if (cube_map_stride) {
- DRM_ERROR("Cube map stride set twice\n");
+ DRM_DEBUG("Cube map stride set twice\n");
goto fail;
}
cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK;
}
if (!cube_map_stride) {
- DRM_ERROR("Cube map stride not set\n");
+ DRM_DEBUG("Cube map stride not set\n");
goto fail;
}
}
@@ -660,7 +660,7 @@ reloc_tex(struct vc4_exec_info *exec,
case VC4_TEXTURE_TYPE_RGBA64:
case VC4_TEXTURE_TYPE_YUV422R:
default:
- DRM_ERROR("Texture format %d unsupported\n", type);
+ DRM_DEBUG("Texture format %d unsupported\n", type);
goto fail;
}
utile_w = utile_width(cpp);
@@ -713,7 +713,7 @@ reloc_tex(struct vc4_exec_info *exec,
level_size = aligned_width * cpp * aligned_height;
if (offset < level_size) {
- DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db "
+ DRM_DEBUG("Level %d (%dx%d -> %dx%d) size %db "
"overflowed buffer bounds (offset %d)\n",
i, level_width, level_height,
aligned_width, aligned_height,
@@ -764,7 +764,7 @@ validate_gl_shader_rec(struct drm_device *dev,
nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes;
if (nr_relocs * 4 > exec->shader_rec_size) {
- DRM_ERROR("overflowed shader recs reading %d handles "
+ DRM_DEBUG("overflowed shader recs reading %d handles "
"from %d bytes left\n",
nr_relocs, exec->shader_rec_size);
return -EINVAL;
@@ -774,7 +774,7 @@ validate_gl_shader_rec(struct drm_device *dev,
exec->shader_rec_size -= nr_relocs * 4;
if (packet_size > exec->shader_rec_size) {
- DRM_ERROR("overflowed shader recs copying %db packet "
+ DRM_DEBUG("overflowed shader recs copying %db packet "
"from %d bytes left\n",
packet_size, exec->shader_rec_size);
return -EINVAL;
@@ -794,7 +794,7 @@ validate_gl_shader_rec(struct drm_device *dev,
for (i = 0; i < shader_reloc_count; i++) {
if (src_handles[i] > exec->bo_count) {
- DRM_ERROR("Shader handle %d too big\n", src_handles[i]);
+ DRM_DEBUG("Shader handle %d too big\n", src_handles[i]);
return -EINVAL;
}
@@ -810,13 +810,13 @@ validate_gl_shader_rec(struct drm_device *dev,
if (((*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD) == 0) !=
to_vc4_bo(&bo[0]->base)->validated_shader->is_threaded) {
- DRM_ERROR("Thread mode of CL and FS do not match\n");
+ DRM_DEBUG("Thread mode of CL and FS do not match\n");
return -EINVAL;
}
if (to_vc4_bo(&bo[1]->base)->validated_shader->is_threaded ||
to_vc4_bo(&bo[2]->base)->validated_shader->is_threaded) {
- DRM_ERROR("cs and vs cannot be threaded\n");
+ DRM_DEBUG("cs and vs cannot be threaded\n");
return -EINVAL;
}
@@ -831,7 +831,7 @@ validate_gl_shader_rec(struct drm_device *dev,
*(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
if (src_offset != 0) {
- DRM_ERROR("Shaders must be at offset 0 of "
+ DRM_DEBUG("Shaders must be at offset 0 of "
"the BO.\n");
return -EINVAL;
}
@@ -842,7 +842,7 @@ validate_gl_shader_rec(struct drm_device *dev,
if (validated_shader->uniforms_src_size >
exec->uniforms_size) {
- DRM_ERROR("Uniforms src buffer overflow\n");
+ DRM_DEBUG("Uniforms src buffer overflow\n");
return -EINVAL;
}
@@ -900,7 +900,7 @@ validate_gl_shader_rec(struct drm_device *dev,
if (vbo->base.size < offset ||
vbo->base.size - offset < attr_size) {
- DRM_ERROR("BO offset overflow (%d + %d > %zu)\n",
+ DRM_DEBUG("BO offset overflow (%d + %d > %zu)\n",
offset, attr_size, vbo->base.size);
return -EINVAL;
}
@@ -909,7 +909,7 @@ validate_gl_shader_rec(struct drm_device *dev,
max_index = ((vbo->base.size - offset - attr_size) /
stride);
if (state->max_index > max_index) {
- DRM_ERROR("primitives use index %d out of "
+ DRM_DEBUG("primitives use index %d out of "
"supplied %d\n",
state->max_index, max_index);
return -EINVAL;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 0b2df5c6efb4..d3f15bf60900 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -200,7 +200,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
uint32_t clamp_reg, clamp_offset;
if (sig == QPU_SIG_SMALL_IMM) {
- DRM_ERROR("direct TMU read used small immediate\n");
+ DRM_DEBUG("direct TMU read used small immediate\n");
return false;
}
@@ -209,7 +209,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
*/
if (is_mul ||
QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
- DRM_ERROR("direct TMU load wasn't an add\n");
+ DRM_DEBUG("direct TMU load wasn't an add\n");
return false;
}
@@ -220,13 +220,13 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
*/
clamp_reg = raddr_add_a_to_live_reg_index(inst);
if (clamp_reg == ~0) {
- DRM_ERROR("direct TMU load wasn't clamped\n");
+ DRM_DEBUG("direct TMU load wasn't clamped\n");
return false;
}
clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
if (clamp_offset == ~0) {
- DRM_ERROR("direct TMU load wasn't clamped\n");
+ DRM_DEBUG("direct TMU load wasn't clamped\n");
return false;
}
@@ -238,7 +238,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
!(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
- DRM_ERROR("direct TMU load didn't add to a uniform\n");
+ DRM_DEBUG("direct TMU load didn't add to a uniform\n");
return false;
}
@@ -246,14 +246,14 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
} else {
if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
raddr_b == QPU_R_UNIF)) {
- DRM_ERROR("uniform read in the same instruction as "
+ DRM_DEBUG("uniform read in the same instruction as "
"texture setup.\n");
return false;
}
}
if (validation_state->tmu_write_count[tmu] >= 4) {
- DRM_ERROR("TMU%d got too many parameters before dispatch\n",
+ DRM_DEBUG("TMU%d got too many parameters before dispatch\n",
tmu);
return false;
}
@@ -265,7 +265,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
*/
if (!is_direct) {
if (validation_state->needs_uniform_address_update) {
- DRM_ERROR("Texturing with undefined uniform address\n");
+ DRM_DEBUG("Texturing with undefined uniform address\n");
return false;
}
@@ -336,35 +336,35 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
case QPU_SIG_LOAD_TMU1:
break;
default:
- DRM_ERROR("uniforms address change must be "
+ DRM_DEBUG("uniforms address change must be "
"normal math\n");
return false;
}
if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
- DRM_ERROR("Uniform address reset must be an ADD.\n");
+ DRM_DEBUG("Uniform address reset must be an ADD.\n");
return false;
}
if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
- DRM_ERROR("Uniform address reset must be unconditional.\n");
+ DRM_DEBUG("Uniform address reset must be unconditional.\n");
return false;
}
if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
!(inst & QPU_PM)) {
- DRM_ERROR("No packing allowed on uniforms reset\n");
+ DRM_DEBUG("No packing allowed on uniforms reset\n");
return false;
}
if (add_lri == -1) {
- DRM_ERROR("First argument of uniform address write must be "
+ DRM_DEBUG("First argument of uniform address write must be "
"an immediate value.\n");
return false;
}
if (validation_state->live_immediates[add_lri] != expected_offset) {
- DRM_ERROR("Resetting uniforms with offset %db instead of %db\n",
+ DRM_DEBUG("Resetting uniforms with offset %db instead of %db\n",
validation_state->live_immediates[add_lri],
expected_offset);
return false;
@@ -372,7 +372,7 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
!(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
- DRM_ERROR("Second argument of uniform address write must be "
+ DRM_DEBUG("Second argument of uniform address write must be "
"a uniform.\n");
return false;
}
@@ -417,7 +417,7 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
switch (waddr) {
case QPU_W_UNIFORMS_ADDRESS:
if (is_b) {
- DRM_ERROR("relative uniforms address change "
+ DRM_DEBUG("relative uniforms address change "
"unsupported\n");
return false;
}
@@ -452,11 +452,11 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
/* XXX: I haven't thought about these, so don't support them
* for now.
*/
- DRM_ERROR("Unsupported waddr %d\n", waddr);
+ DRM_DEBUG("Unsupported waddr %d\n", waddr);
return false;
case QPU_W_VPM_ADDR:
- DRM_ERROR("General VPM DMA unsupported\n");
+ DRM_DEBUG("General VPM DMA unsupported\n");
return false;
case QPU_W_VPM:
@@ -559,7 +559,7 @@ check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
bool ok;
if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
- DRM_ERROR("ADD and MUL both set up textures\n");
+ DRM_DEBUG("ADD and MUL both set up textures\n");
return false;
}
@@ -588,7 +588,7 @@ check_branch(uint64_t inst,
* there's no need for it.
*/
if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
- DRM_ERROR("branch instruction at %d wrote a register.\n",
+ DRM_DEBUG("branch instruction at %d wrote a register.\n",
validation_state->ip);
return false;
}
@@ -614,7 +614,7 @@ check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
validated_shader->uniforms_size += 4;
if (validation_state->needs_uniform_address_update) {
- DRM_ERROR("Uniform read with undefined uniform "
+ DRM_DEBUG("Uniform read with undefined uniform "
"address\n");
return false;
}
@@ -660,19 +660,19 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
continue;
if (ip - last_branch < 4) {
- DRM_ERROR("Branch at %d during delay slots\n", ip);
+ DRM_DEBUG("Branch at %d during delay slots\n", ip);
return false;
}
last_branch = ip;
if (inst & QPU_BRANCH_REG) {
- DRM_ERROR("branching from register relative "
+ DRM_DEBUG("branching from register relative "
"not supported\n");
return false;
}
if (!(inst & QPU_BRANCH_REL)) {
- DRM_ERROR("relative branching required\n");
+ DRM_DEBUG("relative branching required\n");
return false;
}
@@ -682,13 +682,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
* end of the shader object.
*/
if (branch_imm % sizeof(inst) != 0) {
- DRM_ERROR("branch target not aligned\n");
+ DRM_DEBUG("branch target not aligned\n");
return false;
}
branch_target_ip = after_delay_ip + (branch_imm >> 3);
if (branch_target_ip >= validation_state->max_ip) {
- DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n",
+ DRM_DEBUG("Branch at %d outside of shader (ip %d/%d)\n",
ip, branch_target_ip,
validation_state->max_ip);
return false;
@@ -699,7 +699,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
* the shader.
*/
if (after_delay_ip >= validation_state->max_ip) {
- DRM_ERROR("Branch at %d continues past shader end "
+ DRM_DEBUG("Branch at %d continues past shader end "
"(%d/%d)\n",
ip, after_delay_ip, validation_state->max_ip);
return false;
@@ -709,7 +709,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
}
if (max_branch_target > validation_state->max_ip - 3) {
- DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
+ DRM_DEBUG("Branch landed after QPU_SIG_PROG_END");
return false;
}
@@ -750,7 +750,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
return true;
if (texturing_in_progress(validation_state)) {
- DRM_ERROR("Branch target landed during TMU setup\n");
+ DRM_DEBUG("Branch target landed during TMU setup\n");
return false;
}
@@ -837,7 +837,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
case QPU_SIG_LAST_THREAD_SWITCH:
if (!check_instruction_writes(validated_shader,
&validation_state)) {
- DRM_ERROR("Bad write at ip %d\n", ip);
+ DRM_DEBUG("Bad write at ip %d\n", ip);
goto fail;
}
@@ -855,7 +855,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
validated_shader->is_threaded = true;
if (ip < last_thread_switch_ip + 3) {
- DRM_ERROR("Thread switch too soon after "
+ DRM_DEBUG("Thread switch too soon after "
"last switch at ip %d\n", ip);
goto fail;
}
@@ -867,7 +867,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
case QPU_SIG_LOAD_IMM:
if (!check_instruction_writes(validated_shader,
&validation_state)) {
- DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
+ DRM_DEBUG("Bad LOAD_IMM write at ip %d\n", ip);
goto fail;
}
break;
@@ -878,14 +878,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
goto fail;
if (ip < last_thread_switch_ip + 3) {
- DRM_ERROR("Branch in thread switch at ip %d",
+ DRM_DEBUG("Branch in thread switch at ip %d",
ip);
goto fail;
}
break;
default:
- DRM_ERROR("Unsupported QPU signal %d at "
+ DRM_DEBUG("Unsupported QPU signal %d at "
"instruction %d\n", sig, ip);
goto fail;
}
@@ -898,7 +898,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
}
if (ip == validation_state.max_ip) {
- DRM_ERROR("shader failed to terminate before "
+ DRM_DEBUG("shader failed to terminate before "
"shader BO end at %zd\n",
shader_obj->base.size);
goto fail;
@@ -907,7 +907,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
/* Might corrupt other thread */
if (validated_shader->is_threaded &&
validation_state.all_registers_used) {
- DRM_ERROR("Shader uses threading, but uses the upper "
+ DRM_DEBUG("Shader uses threading, but uses the upper "
"half of the registers, too\n");
goto fail;
}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 12289673f457..2524ff116f00 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -190,7 +190,7 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->base, handle);
- drm_gem_object_unreference_unlocked(&obj->base);
+ drm_gem_object_put_unlocked(&obj->base);
if (ret)
goto err;
@@ -245,7 +245,7 @@ static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
*offset = drm_vma_node_offset_addr(&obj->vma_node);
unref:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 3109c8308eb5..8fd52f211e9d 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -213,7 +213,7 @@ err_fence:
dma_fence_put(fence);
}
err:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 63d35c7e416c..49a3d8d5a249 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -122,7 +122,6 @@ static struct drm_driver driver = {
.dumb_create = virtio_gpu_mode_dumb_create,
.dumb_map_offset = virtio_gpu_mode_dumb_mmap,
- .dumb_destroy = virtio_gpu_mode_dumb_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 3a66abb8fd50..da2fb585fea4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -236,9 +236,6 @@ struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 046e28b69d99..15d18fd0c64b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -308,7 +308,7 @@ static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
return 0;
}
-static struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
.fb_probe = virtio_gpufb_create,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index cc025d8fbe19..72ad7b103448 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -118,13 +118,6 @@ fail:
return ret;
}
-int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-
int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p)
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index e695d74eaa9f..cd389c5eaef5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -192,7 +192,7 @@ static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
}
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
+ struct drm_printer *printer)
{
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 35bf781e418e..c7056322211c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -30,49 +30,49 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
-static struct ttm_place vram_placement_flags = {
+static const struct ttm_place vram_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
};
-static struct ttm_place vram_ne_placement_flags = {
+static const struct ttm_place vram_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place sys_placement_flags = {
+static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
};
-static struct ttm_place sys_ne_placement_flags = {
+static const struct ttm_place sys_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place gmr_placement_flags = {
+static const struct ttm_place gmr_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
};
-static struct ttm_place gmr_ne_placement_flags = {
+static const struct ttm_place gmr_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place mob_placement_flags = {
+static const struct ttm_place mob_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
};
-static struct ttm_place mob_ne_placement_flags = {
+static const struct ttm_place mob_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
@@ -85,7 +85,7 @@ struct ttm_placement vmw_vram_placement = {
.busy_placement = &vram_placement_flags
};
-static struct ttm_place vram_gmr_placement_flags[] = {
+static const struct ttm_place vram_gmr_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
@@ -97,7 +97,7 @@ static struct ttm_place vram_gmr_placement_flags[] = {
}
};
-static struct ttm_place gmr_vram_placement_flags[] = {
+static const struct ttm_place gmr_vram_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
@@ -116,7 +116,7 @@ struct ttm_placement vmw_vram_gmr_placement = {
.busy_placement = &gmr_placement_flags
};
-static struct ttm_place vram_gmr_ne_placement_flags[] = {
+static const struct ttm_place vram_gmr_ne_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
@@ -165,7 +165,7 @@ struct ttm_placement vmw_sys_ne_placement = {
.busy_placement = &sys_ne_placement_flags
};
-static struct ttm_place evictable_placement_flags[] = {
+static const struct ttm_place evictable_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 99a7f4ab7d97..c706ad30411b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -51,6 +51,7 @@ struct vmw_cmdbuf_context {
struct list_head hw_submitted;
struct list_head preempted;
unsigned num_hw_submitted;
+ bool block_submission;
};
/**
@@ -60,6 +61,9 @@ struct vmw_cmdbuf_context {
* kernel command submissions, @cur.
* @space_mutex: Mutex to protect against starvation when we allocate
* main pool buffer space.
+ * @error_mutex: Mutex to serialize the work queue error handling.
+ * Note this is not needed if the same workqueue handler
+ * can't race with itself...
* @work: A struct work_struct implementeing command buffer error handling.
* Immutable.
* @dev_priv: Pointer to the device private struct. Immutable.
@@ -85,7 +89,6 @@ struct vmw_cmdbuf_context {
* Internal protection.
* @dheaders: Pool of DMA memory for device command buffer headers with trailing
* space for inline data. Internal protection.
- * @tasklet: Tasklet struct for irq processing. Immutable.
* @alloc_queue: Wait queue for processes waiting to allocate command buffer
* space.
* @idle_queue: Wait queue for processes waiting for command buffer idle.
@@ -102,6 +105,7 @@ struct vmw_cmdbuf_context {
struct vmw_cmdbuf_man {
struct mutex cur_mutex;
struct mutex space_mutex;
+ struct mutex error_mutex;
struct work_struct work;
struct vmw_private *dev_priv;
struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
@@ -117,7 +121,6 @@ struct vmw_cmdbuf_man {
spinlock_t lock;
struct dma_pool *headers;
struct dma_pool *dheaders;
- struct tasklet_struct tasklet;
wait_queue_head_t alloc_queue;
wait_queue_head_t idle_queue;
bool irq_on;
@@ -181,12 +184,13 @@ struct vmw_cmdbuf_alloc_info {
};
/* Loop over each context in the command buffer manager. */
-#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
+#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
++(_i), ++(_ctx))
-static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
-
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
+ bool enable);
+static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
/**
* vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
@@ -278,9 +282,9 @@ void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
vmw_cmdbuf_header_inline_free(header);
return;
}
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
__vmw_cmdbuf_header_free(header);
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
}
@@ -331,7 +335,8 @@ static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_context *ctx)
{
while (ctx->num_hw_submitted < man->max_hw_submitted &&
- !list_empty(&ctx->submitted)) {
+ !list_empty(&ctx->submitted) &&
+ !ctx->block_submission) {
struct vmw_cmdbuf_header *entry;
SVGACBStatus status;
@@ -386,12 +391,17 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
__vmw_cmdbuf_header_free(entry);
break;
case SVGA_CB_STATUS_COMMAND_ERROR:
- case SVGA_CB_STATUS_CB_HEADER_ERROR:
+ entry->cb_header->status = SVGA_CB_STATUS_NONE;
list_add_tail(&entry->list, &man->error);
schedule_work(&man->work);
break;
case SVGA_CB_STATUS_PREEMPTED:
- list_add(&entry->list, &ctx->preempted);
+ entry->cb_header->status = SVGA_CB_STATUS_NONE;
+ list_add_tail(&entry->list, &ctx->preempted);
+ break;
+ case SVGA_CB_STATUS_CB_HEADER_ERROR:
+ WARN_ONCE(true, "Command buffer header error.\n");
+ __vmw_cmdbuf_header_free(entry);
break;
default:
WARN_ONCE(true, "Undefined command buffer status.\n");
@@ -468,20 +478,17 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
}
/**
- * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
- * handler implemented as a tasklet.
+ * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
+ * handler implemented as a threaded irq task.
*
- * @data: Tasklet closure. A pointer to the command buffer manager cast to
- * an unsigned long.
+ * @man: Pointer to the command buffer manager.
*
- * The bottom half (tasklet) of the interrupt handler simply calls into the
+ * The bottom half of the interrupt handler simply calls into the
* command buffer processor to free finished buffers and submit any
* queued buffers to hardware.
*/
-static void vmw_cmdbuf_man_tasklet(unsigned long data)
+void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
{
- struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
-
spin_lock(&man->lock);
vmw_cmdbuf_man_process(man);
spin_unlock(&man->lock);
@@ -502,24 +509,112 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
container_of(work, struct vmw_cmdbuf_man, work);
struct vmw_cmdbuf_header *entry, *next;
uint32_t dummy;
- bool restart = false;
+ bool restart[SVGA_CB_CONTEXT_MAX];
+ bool send_fence = false;
+ struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
+ int i;
+ struct vmw_cmdbuf_context *ctx;
- spin_lock_bh(&man->lock);
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ INIT_LIST_HEAD(&restart_head[i]);
+ restart[i] = false;
+ }
+
+ mutex_lock(&man->error_mutex);
+ spin_lock(&man->lock);
list_for_each_entry_safe(entry, next, &man->error, list) {
- restart = true;
- DRM_ERROR("Command buffer error.\n");
+ SVGACBHeader *cb_hdr = entry->cb_header;
+ SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
+ (entry->cmd + cb_hdr->errorOffset);
+ u32 error_cmd_size, new_start_offset;
+ const char *cmd_name;
+
+ list_del_init(&entry->list);
+ restart[entry->cb_context] = true;
+
+ if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
+ DRM_ERROR("Unknown command causing device error.\n");
+ DRM_ERROR("Command buffer offset is %lu\n",
+ (unsigned long) cb_hdr->errorOffset);
+ __vmw_cmdbuf_header_free(entry);
+ send_fence = true;
+ continue;
+ }
- list_del(&entry->list);
- __vmw_cmdbuf_header_free(entry);
- wake_up_all(&man->idle_queue);
+ DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name);
+ DRM_ERROR("Command buffer offset is %lu\n",
+ (unsigned long) cb_hdr->errorOffset);
+ DRM_ERROR("Command size is %lu\n",
+ (unsigned long) error_cmd_size);
+
+ new_start_offset = cb_hdr->errorOffset + error_cmd_size;
+
+ if (new_start_offset >= cb_hdr->length) {
+ __vmw_cmdbuf_header_free(entry);
+ send_fence = true;
+ continue;
+ }
+
+ if (man->using_mob)
+ cb_hdr->ptr.mob.mobOffset += new_start_offset;
+ else
+ cb_hdr->ptr.pa += (u64) new_start_offset;
+
+ entry->cmd += new_start_offset;
+ cb_hdr->length -= new_start_offset;
+ cb_hdr->errorOffset = 0;
+ cb_hdr->offset = 0;
+ list_add_tail(&entry->list, &restart_head[entry->cb_context]);
+ man->ctx[entry->cb_context].block_submission = true;
+ }
+ spin_unlock(&man->lock);
+
+ /* Preempt all contexts with errors */
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ if (ctx->block_submission && vmw_cmdbuf_preempt(man, i))
+ DRM_ERROR("Failed preempting command buffer "
+ "context %u.\n", i);
+ }
+
+ spin_lock(&man->lock);
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ if (!ctx->block_submission)
+ continue;
+
+ /* Move preempted command buffers to the preempted queue. */
+ vmw_cmdbuf_ctx_process(man, ctx, &dummy);
+
+ /*
+ * Add the preempted queue after the command buffer
+ * that caused an error.
+ */
+ list_splice_init(&ctx->preempted, restart_head[i].prev);
+
+ /*
+ * Finally add all command buffers first in the submitted
+ * queue, to rerun them.
+ */
+ list_splice_init(&restart_head[i], &ctx->submitted);
+
+ ctx->block_submission = false;
}
- spin_unlock_bh(&man->lock);
- if (restart && vmw_cmdbuf_startstop(man, true))
- DRM_ERROR("Failed restarting command buffer context 0.\n");
+ vmw_cmdbuf_man_process(man);
+ spin_unlock(&man->lock);
+
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ if (restart[i] && vmw_cmdbuf_startstop(man, i, true))
+ DRM_ERROR("Failed restarting command buffer "
+ "context %u.\n", i);
+ }
/* Send a new fence in case one was removed */
- vmw_fifo_send_fence(man->dev_priv, &dummy);
+ if (send_fence) {
+ vmw_fifo_send_fence(man->dev_priv, &dummy);
+ wake_up_all(&man->idle_queue);
+ }
+
+ mutex_unlock(&man->error_mutex);
}
/**
@@ -536,7 +631,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
bool idle = false;
int i;
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
vmw_cmdbuf_man_process(man);
for_each_cmdbuf_ctx(man, i, ctx) {
if (!list_empty(&ctx->submitted) ||
@@ -548,7 +643,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
idle = list_empty(&man->error);
out_unlock:
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
return idle;
}
@@ -571,7 +666,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
if (!cur)
return;
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
if (man->cur_pos == 0) {
__vmw_cmdbuf_header_free(cur);
goto out_unlock;
@@ -580,7 +675,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
man->cur->cb_header->length = man->cur_pos;
vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
out_unlock:
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
man->cur = NULL;
man->cur_pos = 0;
}
@@ -673,14 +768,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
return true;
memset(info->node, 0, sizeof(*info->node));
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
if (ret) {
vmw_cmdbuf_man_process(man);
ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
}
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
info->done = !ret;
return info->done;
@@ -779,8 +874,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
if (ret)
return ret;
- header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
- &header->handle);
+ header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
+ &header->handle);
if (!header->cb_header) {
ret = -ENOMEM;
goto out_no_cb_header;
@@ -790,7 +885,6 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
cb_hdr = header->cb_header;
offset = header->node.start << PAGE_SHIFT;
header->cmd = man->map + offset;
- memset(cb_hdr, 0, sizeof(*cb_hdr));
if (man->using_mob) {
cb_hdr->flags = SVGA_CB_FLAG_MOB;
cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
@@ -802,9 +896,9 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
return 0;
out_no_cb_header:
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
drm_mm_remove_node(&header->node);
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
return ret;
}
@@ -827,8 +921,8 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
return -ENOMEM;
- dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
- &header->handle);
+ dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
+ &header->handle);
if (!dheader)
return -ENOMEM;
@@ -837,7 +931,6 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
cb_hdr = &dheader->cb_header;
header->cb_header = cb_hdr;
header->cmd = dheader->cmd;
- memset(dheader, 0, sizeof(*dheader));
cb_hdr->status = SVGA_CB_STATUS_NONE;
cb_hdr->flags = SVGA_CB_FLAG_NONE;
cb_hdr->ptr.pa = (u64)header->handle +
@@ -1025,18 +1118,6 @@ void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
vmw_cmdbuf_cur_unlock(man);
}
-/**
- * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
- *
- * @man: The command buffer manager.
- */
-void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
-{
- if (!man)
- return;
-
- tasklet_schedule(&man->tasklet);
-}
/**
* vmw_cmdbuf_send_device_command - Send a command through the device context.
@@ -1061,9 +1142,9 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
memcpy(cmd, command, size);
header->cb_header->length = size;
header->cb_context = SVGA_CB_CONTEXT_DEVICE;
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
status = vmw_cmdbuf_header_submit(header);
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
vmw_cmdbuf_header_free(header);
if (status != SVGA_CB_STATUS_COMPLETED) {
@@ -1076,6 +1157,29 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
}
/**
+ * vmw_cmdbuf_preempt - Send a preempt command through the device
+ * context.
+ *
+ * @man: The command buffer manager.
+ *
+ * Synchronously sends a preempt command.
+ */
+static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
+{
+ struct {
+ uint32 id;
+ SVGADCCmdPreempt body;
+ } __packed cmd;
+
+ cmd.id = SVGA_DC_CMD_PREEMPT;
+ cmd.body.context = SVGA_CB_CONTEXT_0 + context;
+ cmd.body.ignoreIDZero = 0;
+
+ return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
+}
+
+
+/**
* vmw_cmdbuf_startstop - Send a start / stop command through the device
* context.
*
@@ -1084,7 +1188,7 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
*
* Synchronously sends a device start / stop context command.
*/
-static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
bool enable)
{
struct {
@@ -1094,7 +1198,7 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
cmd.body.enable = (enable) ? 1 : 0;
- cmd.body.context = SVGA_CB_CONTEXT_0;
+ cmd.body.context = SVGA_CB_CONTEXT_0 + context;
return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
}
@@ -1193,7 +1297,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
{
struct vmw_cmdbuf_man *man;
struct vmw_cmdbuf_context *ctx;
- int i;
+ unsigned int i;
int ret;
if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
@@ -1228,8 +1332,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
spin_lock_init(&man->lock);
mutex_init(&man->cur_mutex);
mutex_init(&man->space_mutex);
- tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
- (unsigned long) man);
+ mutex_init(&man->error_mutex);
man->default_size = VMW_CMDBUF_INLINE_SIZE;
init_waitqueue_head(&man->alloc_queue);
init_waitqueue_head(&man->idle_queue);
@@ -1238,11 +1341,14 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
&dev_priv->error_waiters);
- ret = vmw_cmdbuf_startstop(man, true);
- if (ret) {
- DRM_ERROR("Failed starting command buffer context 0.\n");
- vmw_cmdbuf_man_destroy(man);
- return ERR_PTR(ret);
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ ret = vmw_cmdbuf_startstop(man, i, true);
+ if (ret) {
+ DRM_ERROR("Failed starting command buffer "
+ "context %u.\n", i);
+ vmw_cmdbuf_man_destroy(man);
+ return ERR_PTR(ret);
+ }
}
return man;
@@ -1292,18 +1398,24 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
*/
void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
{
+ struct vmw_cmdbuf_context *ctx;
+ unsigned int i;
+
WARN_ON_ONCE(man->has_pool);
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
- if (vmw_cmdbuf_startstop(man, false))
- DRM_ERROR("Failed stopping command buffer context 0.\n");
+
+ for_each_cmdbuf_ctx(man, i, ctx)
+ if (vmw_cmdbuf_startstop(man, i, false))
+ DRM_ERROR("Failed stopping command buffer "
+ "context %u.\n", i);
vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
&man->dev_priv->error_waiters);
- tasklet_kill(&man->tasklet);
(void) cancel_work_sync(&man->work);
dma_pool_destroy(man->dheaders);
dma_pool_destroy(man->headers);
mutex_destroy(&man->cur_mutex);
mutex_destroy(&man->space_mutex);
+ mutex_destroy(&man->error_mutex);
kfree(man);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 1f013d45c9e9..36c7b6c839c0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -205,7 +205,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
int ret;
cres = kzalloc(sizeof(*cres), GFP_KERNEL);
- if (unlikely(cres == NULL))
+ if (unlikely(!cres))
return -ENOMEM;
cres->hash.key = user_key | (res_type << 24);
@@ -291,7 +291,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
int ret;
man = kzalloc(sizeof(*man), GFP_KERNEL);
- if (man == NULL)
+ if (!man)
return ERR_PTR(-ENOMEM);
man->dev_priv = dev_priv;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index bcc6d4136c87..4212b3e673bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -210,8 +210,8 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
&uctx->res, i);
- if (unlikely(uctx->cotables[i] == NULL)) {
- ret = -ENOMEM;
+ if (unlikely(IS_ERR(uctx->cotables[i]))) {
+ ret = PTR_ERR(uctx->cotables[i]);
goto out_cotables;
}
}
@@ -777,7 +777,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (unlikely(ctx == NULL)) {
+ if (unlikely(!ctx)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 6c026d75c180..d87861bbe971 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -584,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
return ERR_PTR(ret);
vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
- if (unlikely(vcotbl == NULL)) {
+ if (unlikely(!vcotbl)) {
ret = -ENOMEM;
goto out_no_alloc;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 204bf181b69e..e84fee3ec4f3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -36,7 +36,6 @@
#include <drm/ttm/ttm_module.h>
#include <linux/dma_remapping.h>
-#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0
#define VMW_FB_RESERVATION 0
@@ -227,7 +226,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_AUTH | DRM_RENDER_ALLOW),
};
-static struct pci_device_id vmw_pci_id_list[] = {
+static const struct pci_device_id vmw_pci_id_list[] = {
{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
{0, 0, 0}
};
@@ -630,7 +629,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
char host_log[100] = {0};
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (unlikely(dev_priv == NULL)) {
+ if (unlikely(!dev_priv)) {
DRM_ERROR("Failed allocating a device private struct.\n");
return -ENOMEM;
}
@@ -825,7 +824,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
- ret = drm_irq_install(dev, dev->pdev->irq);
+ ret = vmw_irq_install(dev, dev->pdev->irq);
if (ret != 0) {
DRM_ERROR("Failed installing irq: %d\n", ret);
goto out_no_irq;
@@ -937,7 +936,7 @@ out_no_bdev:
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- drm_irq_uninstall(dev_priv->dev);
+ vmw_irq_uninstall(dev_priv->dev);
out_no_irq:
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
@@ -990,7 +989,7 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- drm_irq_uninstall(dev_priv->dev);
+ vmw_irq_uninstall(dev_priv->dev);
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -1035,7 +1034,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
int ret = -ENOMEM;
vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
- if (unlikely(vmw_fp == NULL))
+ if (unlikely(!vmw_fp))
return ret;
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
@@ -1196,7 +1195,7 @@ static int vmw_master_create(struct drm_device *dev,
struct vmw_master *vmaster;
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
- if (unlikely(vmaster == NULL))
+ if (unlikely(!vmaster))
return -ENOMEM;
vmw_master_init(vmaster);
@@ -1516,10 +1515,6 @@ static struct drm_driver driver = {
.load = vmw_driver_load,
.unload = vmw_driver_unload,
.lastclose = vmw_lastclose,
- .irq_preinstall = vmw_irq_preinstall,
- .irq_postinstall = vmw_irq_postinstall,
- .irq_uninstall = vmw_irq_uninstall,
- .irq_handler = vmw_irq_handler,
.get_vblank_counter = vmw_get_vblank_counter,
.enable_vblank = vmw_enable_vblank,
.disable_vblank = vmw_disable_vblank,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4b948fba9eec..7e5f30e234b1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,10 +40,12 @@
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
+#include <linux/sync_file.h>
-#define VMWGFX_DRIVER_DATE "20170607"
+#define VMWGFX_DRIVER_NAME "vmwgfx"
+#define VMWGFX_DRIVER_DATE "20170612"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 13
+#define VMWGFX_DRIVER_MINOR 14
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -351,6 +353,12 @@ struct vmw_otable_batch {
struct ttm_buffer_object *otable_bo;
};
+enum {
+ VMW_IRQTHREAD_FENCE,
+ VMW_IRQTHREAD_CMDBUF,
+ VMW_IRQTHREAD_MAX
+};
+
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
@@ -529,6 +537,7 @@ struct vmw_private {
struct vmw_otable_batch otable_batch;
struct vmw_cmdbuf_man *cman;
+ DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -561,24 +570,21 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
+ spin_lock(&dev_priv->hw_lock);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
- spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+ spin_unlock(&dev_priv->hw_lock);
}
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset)
{
- unsigned long irq_flags;
u32 val;
- spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
+ spin_lock(&dev_priv->hw_lock);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
- spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+ spin_unlock(&dev_priv->hw_lock);
return val;
}
@@ -821,7 +827,8 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user
*user_fence_rep,
- struct vmw_fence_obj **out_fence);
+ struct vmw_fence_obj **out_fence,
+ uint32_t flags);
extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence);
extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
@@ -836,23 +843,23 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj *fence,
- uint32_t fence_handle);
+ uint32_t fence_handle,
+ int32_t out_fence_fd,
+ struct sync_file *sync_file);
extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo,
bool interruptible,
bool validate_as_mob);
-
+bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/**
* IRQs and wating - vmwgfx_irq.c
*/
-extern irqreturn_t vmw_irq_handler(int irq, void *arg);
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
uint32_t seqno, bool interruptible,
unsigned long timeout);
-extern void vmw_irq_preinstall(struct drm_device *dev);
-extern int vmw_irq_postinstall(struct drm_device *dev);
+extern int vmw_irq_install(struct drm_device *dev, int irq);
extern void vmw_irq_uninstall(struct drm_device *dev);
extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno);
@@ -1150,13 +1157,13 @@ extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
struct vmw_cmdbuf_header *header,
bool flush);
-extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
size_t size, bool interruptible,
struct vmw_cmdbuf_header **p_header);
extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
bool interruptible);
+extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index c7b53d987f06..21c62a34e558 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -24,6 +24,7 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
+#include <linux/sync_file.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_reg.h"
@@ -112,11 +113,12 @@ struct vmw_cmd_entry {
bool user_allow;
bool gb_disable;
bool gb_enable;
+ const char *cmd_name;
};
#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
- (_gb_disable), (_gb_enable)}
+ (_gb_disable), (_gb_enable), #_cmd}
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -264,7 +266,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (unlikely(node == NULL)) {
+ if (unlikely(!node)) {
DRM_ERROR("Failed to allocate a resource validation "
"entry.\n");
return -ENOMEM;
@@ -452,7 +454,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
struct vmw_resource_relocation *rel;
rel = kmalloc(sizeof(*rel), GFP_KERNEL);
- if (unlikely(rel == NULL)) {
+ if (unlikely(!rel)) {
DRM_ERROR("Failed to allocate a resource relocation.\n");
return -ENOMEM;
}
@@ -519,7 +521,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- return capable(CAP_SYS_ADMIN) ? : -EINVAL;
+ return -EINVAL;
}
static int vmw_cmd_ok(struct vmw_private *dev_priv,
@@ -2584,7 +2586,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
/**
* vmw_cmd_dx_ia_set_vertex_buffers - Validate an
- * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
+ * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -3302,6 +3304,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
+ true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
@@ -3469,6 +3473,51 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, true),
};
+bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
+{
+ u32 cmd_id = ((u32 *) buf)[0];
+
+ if (cmd_id >= SVGA_CMD_MAX) {
+ SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
+ const struct vmw_cmd_entry *entry;
+
+ *size = header->size + sizeof(SVGA3dCmdHeader);
+ cmd_id = header->id;
+ if (cmd_id >= SVGA_3D_CMD_MAX)
+ return false;
+
+ cmd_id -= SVGA_3D_CMD_BASE;
+ entry = &vmw_cmd_entries[cmd_id];
+ *cmd = entry->cmd_name;
+ return true;
+ }
+
+ switch (cmd_id) {
+ case SVGA_CMD_UPDATE:
+ *cmd = "SVGA_CMD_UPDATE";
+ *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
+ break;
+ case SVGA_CMD_DEFINE_GMRFB:
+ *cmd = "SVGA_CMD_DEFINE_GMRFB";
+ *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
+ break;
+ case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
+ *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
+ *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+ break;
+ case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
+ *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
+ *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+ break;
+ default:
+ *cmd = "UNKNOWN";
+ *size = 0;
+ return false;
+ }
+
+ return true;
+}
+
static int vmw_cmd_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -3781,6 +3830,8 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
* which the information should be copied.
* @fence: Pointer to the fenc object.
* @fence_handle: User-space fence handle.
+ * @out_fence_fd: exported file descriptor for the fence. -1 if not used
+ * @sync_file: Only used to clean up in case of an error in this function.
*
* This function copies fence information to user-space. If copying fails,
* The user-space struct drm_vmw_fence_rep::error member is hopefully
@@ -3796,7 +3847,9 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
int ret,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj *fence,
- uint32_t fence_handle)
+ uint32_t fence_handle,
+ int32_t out_fence_fd,
+ struct sync_file *sync_file)
{
struct drm_vmw_fence_rep fence_rep;
@@ -3806,6 +3859,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
memset(&fence_rep, 0, sizeof(fence_rep));
fence_rep.error = ret;
+ fence_rep.fd = out_fence_fd;
if (ret == 0) {
BUG_ON(fence == NULL);
@@ -3828,6 +3882,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
* and unreference the handle.
*/
if (unlikely(ret != 0) && (fence_rep.error == 0)) {
+ if (sync_file)
+ fput(sync_file->file);
+
+ if (fence_rep.fd != -1) {
+ put_unused_fd(fence_rep.fd);
+ fence_rep.fd = -1;
+ }
+
ttm_ref_object_base_unref(vmw_fp->tfile,
fence_handle, TTM_REF_USAGE);
DRM_ERROR("Fence copy error. Syncing.\n");
@@ -4003,7 +4065,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
uint64_t throttle_us,
uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user *user_fence_rep,
- struct vmw_fence_obj **out_fence)
+ struct vmw_fence_obj **out_fence,
+ uint32_t flags)
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
@@ -4013,20 +4076,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct ww_acquire_ctx ticket;
uint32_t handle;
int ret;
+ int32_t out_fence_fd = -1;
+ struct sync_file *sync_file = NULL;
+
+
+ if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ DRM_ERROR("Failed to get a fence file descriptor.\n");
+ return out_fence_fd;
+ }
+ }
if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us);
if (ret)
- return ret;
+ goto out_free_fence_fd;
}
kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
kernel_commands, command_size,
&header);
- if (IS_ERR(kernel_commands))
- return PTR_ERR(kernel_commands);
+ if (IS_ERR(kernel_commands)) {
+ ret = PTR_ERR(kernel_commands);
+ goto out_free_fence_fd;
+ }
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
if (ret) {
@@ -4162,8 +4238,32 @@ int vmw_execbuf_process(struct drm_file *file_priv,
__vmw_execbuf_release_pinned_bo(dev_priv, fence);
vmw_clear_validations(sw_context);
+
+ /*
+ * If anything fails here, give up trying to export the fence
+ * and do a sync since the user mode will not be able to sync
+ * the fence itself. This ensures we are still functionally
+ * correct.
+ */
+ if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
+
+ sync_file = sync_file_create(&fence->base);
+ if (!sync_file) {
+ DRM_ERROR("Unable to create sync file for fence\n");
+ put_unused_fd(out_fence_fd);
+ out_fence_fd = -1;
+
+ (void) vmw_fence_obj_wait(fence, false, false,
+ VMW_FENCE_WAIT_TIMEOUT);
+ } else {
+ /* Link the fence with the FD created earlier */
+ fd_install(out_fence_fd, sync_file->file);
+ }
+ }
+
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
- user_fence_rep, fence, handle);
+ user_fence_rep, fence, handle,
+ out_fence_fd, sync_file);
/* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) {
@@ -4214,6 +4314,9 @@ out_unlock:
out_free_header:
if (header)
vmw_cmdbuf_header_free(header);
+out_free_fence_fd:
+ if (out_fence_fd >= 0)
+ put_unused_fd(out_fence_fd);
return ret;
}
@@ -4366,6 +4469,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
static const size_t copy_offset[] = {
offsetof(struct drm_vmw_execbuf_arg, context_handle),
sizeof(struct drm_vmw_execbuf_arg)};
+ struct dma_fence *in_fence = NULL;
if (unlikely(size < copy_offset[0])) {
DRM_ERROR("Invalid command size, ioctl %d\n",
@@ -4401,15 +4505,25 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
arg.context_handle = (uint32_t) -1;
break;
case 2:
- if (arg.pad64 != 0) {
- DRM_ERROR("Unused IOCTL data not set to zero.\n");
- return -EINVAL;
- }
- break;
default:
break;
}
+
+ /* If imported a fence FD from elsewhere, then wait on it */
+ if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
+ in_fence = sync_file_get_fence(arg.imported_fence_fd);
+
+ if (!in_fence) {
+ DRM_ERROR("Cannot get imported fence\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
+ if (ret)
+ goto out;
+ }
+
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@@ -4419,12 +4533,16 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
NULL, arg.command_size, arg.throttle_us,
arg.context_handle,
(void __user *)(unsigned long)arg.fence_rep,
- NULL);
+ NULL,
+ arg.flags);
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))
- return ret;
+ goto out;
vmw_kms_cursor_post_execbuf(dev_priv);
- return 0;
+out:
+ if (in_fence)
+ dma_fence_put(in_fence);
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 6b2708b4eafe..3bbad22b3748 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -114,12 +114,11 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
container_of(f, struct vmw_fence_obj, base);
struct vmw_fence_manager *fman = fman_from_fence(fence);
- unsigned long irq_flags;
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
list_del_init(&fence->head);
--fman->num_fence_objects;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
fence->destroy(fence);
}
@@ -252,10 +251,10 @@ static void vmw_fence_work_func(struct work_struct *work)
INIT_LIST_HEAD(&list);
mutex_lock(&fman->goal_irq_mutex);
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
list_splice_init(&fman->cleanup_list, &list);
seqno_valid = fman->seqno_valid;
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
if (!seqno_valid && fman->goal_irq_on) {
fman->goal_irq_on = false;
@@ -284,7 +283,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
{
struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
- if (unlikely(fman == NULL))
+ if (unlikely(!fman))
return NULL;
fman->dev_priv = dev_priv;
@@ -305,15 +304,14 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
{
- unsigned long irq_flags;
bool lists_empty;
(void) cancel_work_sync(&fman->work);
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
lists_empty = list_empty(&fman->fence_list) &&
list_empty(&fman->cleanup_list);
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
BUG_ON(!lists_empty);
kfree(fman);
@@ -323,7 +321,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
struct vmw_fence_obj *fence, u32 seqno,
void (*destroy) (struct vmw_fence_obj *fence))
{
- unsigned long irq_flags;
int ret = 0;
dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
@@ -331,7 +328,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy;
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
if (unlikely(fman->fifo_down)) {
ret = -EBUSY;
goto out_unlock;
@@ -340,7 +337,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
++fman->num_fence_objects;
out_unlock:
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
return ret;
}
@@ -489,11 +486,9 @@ rerun:
void vmw_fences_update(struct vmw_fence_manager *fman)
{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
__vmw_fences_update(fman);
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
}
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
@@ -541,7 +536,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (unlikely(fence == NULL))
+ if (unlikely(!fence))
return -ENOMEM;
ret = vmw_fence_obj_init(fman, fence, seqno,
@@ -606,7 +601,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
return ret;
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
- if (unlikely(ufence == NULL)) {
+ if (unlikely(!ufence)) {
ret = -ENOMEM;
goto out_no_object;
}
@@ -650,6 +645,51 @@ out_no_object:
/**
+ * vmw_wait_dma_fence - Wait for a dma fence
+ *
+ * @fman: pointer to a fence manager
+ * @fence: DMA fence to wait on
+ *
+ * This function handles the case when the fence is actually a fence
+ * array. If that's the case, it'll wait on each of the child fence
+ */
+int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
+ struct dma_fence *fence)
+{
+ struct dma_fence_array *fence_array;
+ int ret = 0;
+ int i;
+
+
+ if (dma_fence_is_signaled(fence))
+ return 0;
+
+ if (!dma_fence_is_array(fence))
+ return dma_fence_wait(fence, true);
+
+ /* From i915: Note that if the fence-array was created in
+ * signal-on-any mode, we should *not* decompose it into its individual
+ * fences. However, we don't currently store which mode the fence-array
+ * is operating in. Fortunately, the only user of signal-on-any is
+ * private to amdgpu and we should not see any incoming fence-array
+ * from sync-file being in signal-on-any mode.
+ */
+
+ fence_array = to_dma_fence_array(fence);
+ for (i = 0; i < fence_array->num_fences; i++) {
+ struct dma_fence *child = fence_array->fences[i];
+
+ ret = dma_fence_wait(child, true);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/**
* vmw_fence_fifo_down - signal all unsignaled fence objects.
*/
@@ -663,14 +703,14 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
* restart when we've released the fman->lock.
*/
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
fman->fifo_down = true;
while (!list_empty(&fman->fence_list)) {
struct vmw_fence_obj *fence =
list_entry(fman->fence_list.prev, struct vmw_fence_obj,
head);
dma_fence_get(&fence->base);
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
ret = vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
@@ -686,18 +726,16 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
BUG_ON(!list_empty(&fence->head));
dma_fence_put(&fence->base);
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
}
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
}
void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
fman->fifo_down = false;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
}
@@ -812,9 +850,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
arg->signaled = vmw_fence_obj_signaled(fence);
arg->signaled_flags = arg->flags;
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
arg->passed_seqno = dev_priv->last_read_seqno;
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
ttm_base_object_unref(&base);
@@ -841,8 +879,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
*
* This function is called when the seqno of the fence where @action is
* attached has passed. It queues the event on the submitter's event list.
- * This function is always called from atomic context, and may be called
- * from irq context.
+ * This function is always called from atomic context.
*/
static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
{
@@ -851,13 +888,13 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event;
struct drm_file *file_priv;
- unsigned long irq_flags;
+
if (unlikely(event == NULL))
return;
file_priv = event->file_priv;
- spin_lock_irqsave(&dev->event_lock, irq_flags);
+ spin_lock_irq(&dev->event_lock);
if (likely(eaction->tv_sec != NULL)) {
struct timeval tv;
@@ -869,7 +906,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
drm_send_event_locked(dev, eaction->event);
eaction->event = NULL;
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+ spin_unlock_irq(&dev->event_lock);
}
/**
@@ -904,11 +941,10 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
struct vmw_fence_action *action)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
- unsigned long irq_flags;
bool run_update = false;
mutex_lock(&fman->goal_irq_mutex);
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
fman->pending_actions[action->type]++;
if (dma_fence_is_signaled_locked(&fence->base)) {
@@ -927,7 +963,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
run_update = vmw_fence_goal_check_locked(fence);
}
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
if (run_update) {
if (!fman->goal_irq_on) {
@@ -966,7 +1002,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
struct vmw_fence_manager *fman = fman_from_fence(fence);
eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
- if (unlikely(eaction == NULL))
+ if (unlikely(!eaction))
return -ENOMEM;
eaction->event = event;
@@ -1002,7 +1038,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
int ret;
event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (unlikely(event == NULL)) {
+ if (unlikely(!event)) {
DRM_ERROR("Failed to allocate an event.\n");
ret = -ENOMEM;
goto out_no_space;
@@ -1114,7 +1150,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
- handle);
+ handle, -1, NULL);
vmw_fence_obj_unreference(&fence);
return 0;
out_no_create:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index d9d85aa6ed20..20224dba9d8e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -28,6 +28,7 @@
#ifndef _VMWGFX_FENCE_H_
#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
@@ -102,6 +103,9 @@ extern int vmw_user_fence_create(struct drm_file *file_priv,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle);
+extern int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
+ struct dma_fence *fence);
+
extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index c1900f4390a4..f2f9d88131f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -121,7 +121,7 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
struct vmwgfx_gmrid_man *gman =
kzalloc(sizeof(*gman), GFP_KERNEL);
- if (unlikely(gman == NULL))
+ if (unlikely(!gman))
return -ENOMEM;
spin_lock_init(&gman->lock);
@@ -157,9 +157,9 @@ static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
}
static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
+ struct drm_printer *printer)
{
- pr_info("%s: No debug info available for the GMR id manager\n", prefix);
+ drm_printf(printer, "No debug info available for the GMR id manager\n");
}
const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 0c7e1723292c..b9239ba067c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -30,11 +30,56 @@
#define VMW_FENCE_WRAP (1 << 24)
-irqreturn_t vmw_irq_handler(int irq, void *arg)
+/**
+ * vmw_thread_fn - Deferred (process context) irq handler
+ *
+ * @irq: irq number
+ * @arg: Closure argument. Pointer to a struct drm_device cast to void *
+ *
+ * This function implements the deferred part of irq processing.
+ * The function is guaranteed to run at least once after the
+ * vmw_irq_handler has returned with IRQ_WAKE_THREAD.
+ *
+ */
+static irqreturn_t vmw_thread_fn(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ irqreturn_t ret = IRQ_NONE;
+
+ if (test_and_clear_bit(VMW_IRQTHREAD_FENCE,
+ dev_priv->irqthread_pending)) {
+ vmw_fences_update(dev_priv->fman);
+ wake_up_all(&dev_priv->fence_queue);
+ ret = IRQ_HANDLED;
+ }
+
+ if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF,
+ dev_priv->irqthread_pending)) {
+ vmw_cmdbuf_irqthread(dev_priv->cman);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+/**
+ * vmw_irq_handler irq handler
+ *
+ * @irq: irq number
+ * @arg: Closure argument. Pointer to a struct drm_device cast to void *
+ *
+ * This function implements the quick part of irq processing.
+ * The function performs fast actions like clearing the device interrupt
+ * flags and also reasonably quick actions like waking processes waiting for
+ * FIFO space. Other IRQ actions are deferred to the IRQ thread.
+ */
+static irqreturn_t vmw_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status, masked_status;
+ irqreturn_t ret = IRQ_HANDLED;
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
masked_status = status & READ_ONCE(dev_priv->irq_mask);
@@ -45,20 +90,21 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
if (!status)
return IRQ_NONE;
- if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
- SVGA_IRQFLAG_FENCE_GOAL)) {
- vmw_fences_update(dev_priv->fman);
- wake_up_all(&dev_priv->fence_queue);
- }
-
if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue);
- if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
- SVGA_IRQFLAG_ERROR))
- vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
+ if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
+ SVGA_IRQFLAG_FENCE_GOAL)) &&
+ !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
+ ret = IRQ_WAKE_THREAD;
- return IRQ_HANDLED;
+ if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
+ SVGA_IRQFLAG_ERROR)) &&
+ !test_and_set_bit(VMW_IRQTHREAD_CMDBUF,
+ dev_priv->irqthread_pending))
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
}
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
@@ -281,23 +327,15 @@ int vmw_wait_seqno(struct vmw_private *dev_priv,
return ret;
}
-void vmw_irq_preinstall(struct drm_device *dev)
+static void vmw_irq_preinstall(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status;
- if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
- return;
-
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
-int vmw_irq_postinstall(struct drm_device *dev)
-{
- return 0;
-}
-
void vmw_irq_uninstall(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
@@ -306,8 +344,41 @@ void vmw_irq_uninstall(struct drm_device *dev)
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
+ if (!dev->irq_enabled)
+ return;
+
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+
+ dev->irq_enabled = false;
+ free_irq(dev->irq, dev);
+}
+
+/**
+ * vmw_irq_install - Install the irq handlers
+ *
+ * @dev: Pointer to the drm device.
+ * @irq: The irq number.
+ * Return: Zero if successful. Negative number otherwise.
+ */
+int vmw_irq_install(struct drm_device *dev, int irq)
+{
+ int ret;
+
+ if (dev->irq_enabled)
+ return -EBUSY;
+
+ vmw_irq_preinstall(dev);
+
+ ret = request_threaded_irq(irq, vmw_irq_handler, vmw_thread_fn,
+ IRQF_SHARED, VMWGFX_DRIVER_NAME, dev);
+ if (ret < 0)
+ return ret;
+
+ dev->irq_enabled = true;
+ dev->irq = irq;
+
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 620180df1303..5d50e45ae274 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x = du->hotspot_x;
hotspot_y = du->hotspot_y;
+
+ if (plane->fb) {
+ hotspot_x += plane->fb->hot_x;
+ hotspot_y += plane->fb->hot_y;
+ }
+
du->cursor_surface = vps->surf;
du->cursor_dmabuf = vps->dmabuf;
@@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
vmw_cursor_update_position(dev_priv, true,
du->cursor_x + hotspot_x,
du->cursor_y + hotspot_y);
+
+ du->core_hotspot_x = hotspot_x - du->hotspot_x;
+ du->core_hotspot_y = hotspot_y - du->hotspot_y;
} else {
DRM_ERROR("Failed to update cursor image\n");
}
@@ -2485,7 +2494,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
- handle);
+ handle, -1, NULL);
if (out_fence)
*out_fence = fence;
else
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 941bcfd131ff..b17f08fc50d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -320,14 +320,14 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
if (dev_priv->has_dx) {
*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
- if (*otables == NULL)
+ if (!(*otables))
return -ENOMEM;
dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
} else {
*otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
GFP_KERNEL);
- if (*otables == NULL)
+ if (!(*otables))
return -ENOMEM;
dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
@@ -407,7 +407,7 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages)
{
struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
- if (unlikely(mob == NULL))
+ if (unlikely(!mob))
return NULL;
mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 6063c9636d4a..97000996b8dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -244,7 +244,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
reply_len = ebx;
reply = kzalloc(reply_len + 1, GFP_KERNEL);
- if (reply == NULL) {
+ if (!reply) {
DRM_ERROR("Cannot allocate memory for reply\n");
return -ENOMEM;
}
@@ -340,7 +340,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
msg_len = strlen(guest_info_param) + strlen("info-get ") + 1;
msg = kzalloc(msg_len, GFP_KERNEL);
- if (msg == NULL) {
+ if (!msg) {
DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
return -ENOMEM;
}
@@ -400,7 +400,7 @@ int vmw_host_log(const char *log)
msg_len = strlen(log) + strlen("log ") + 1;
msg = kzalloc(msg_len, GFP_KERNEL);
- if (msg == NULL) {
+ if (!msg) {
DRM_ERROR("Cannot allocate memory for log message\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 7d591f653dfa..a96f90f017d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -446,7 +446,7 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
int ret;
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
- if (unlikely(user_bo == NULL)) {
+ if (unlikely(!user_bo)) {
DRM_ERROR("Failed to allocate a buffer.\n");
return -ENOMEM;
}
@@ -836,7 +836,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
}
backup = kzalloc(sizeof(*backup), GFP_KERNEL);
- if (unlikely(backup == NULL))
+ if (unlikely(!backup))
return -ENOMEM;
ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 68f135c5b0d8..9b832f136813 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -751,7 +751,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
}
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
- if (unlikely(ushader == NULL)) {
+ if (unlikely(!ushader)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
ret = -ENOMEM;
@@ -821,7 +821,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
}
shader = kzalloc(sizeof(*shader), GFP_KERNEL);
- if (unlikely(shader == NULL)) {
+ if (unlikely(!shader)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_shader_size);
ret = -ENOMEM;
@@ -981,7 +981,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
/* Allocate and pin a DMA buffer */
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (unlikely(buf == NULL))
+ if (unlikely(!buf))
return -ENOMEM;
ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index c4de4ad0543b..ca3afae2db1f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1642,8 +1642,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
* something arbitrarily large and we will reject any layout
* that doesn't fit prim_bb_mem later
*/
- dev->mode_config.max_width = 16384;
- dev->mode_config.max_height = 16384;
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
}
vmw_kms_create_implicit_placement_property(dev_priv, false);
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 7ece0e9058c6..f9cde03030fd 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -44,9 +44,12 @@ struct host1x_subdev {
* @np: device node
*/
static int host1x_subdev_add(struct host1x_device *device,
+ struct host1x_driver *driver,
struct device_node *np)
{
struct host1x_subdev *subdev;
+ struct device_node *child;
+ int err;
subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
if (!subdev)
@@ -59,6 +62,19 @@ static int host1x_subdev_add(struct host1x_device *device,
list_add_tail(&subdev->list, &device->subdevs);
mutex_unlock(&device->subdevs_lock);
+ /* recursively add children */
+ for_each_child_of_node(np, child) {
+ if (of_match_node(driver->subdevs, child) &&
+ of_device_is_available(child)) {
+ err = host1x_subdev_add(device, driver, child);
+ if (err < 0) {
+ /* XXX cleanup? */
+ of_node_put(child);
+ return err;
+ }
+ }
+ }
+
return 0;
}
@@ -87,7 +103,7 @@ static int host1x_device_parse_dt(struct host1x_device *device,
for_each_child_of_node(device->dev.parent->of_node, np) {
if (of_match_node(driver->subdevs, np) &&
of_device_is_available(np)) {
- err = host1x_subdev_add(device, np);
+ err = host1x_subdev_add(device, driver, np);
if (err < 0) {
of_node_put(np);
return err;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 2c58a390123a..7f22c5c37660 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -134,8 +134,8 @@ static int host1x_probe(struct platform_device *pdev)
syncpt_irq = platform_get_irq(pdev, 0);
if (syncpt_irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ\n");
- return -ENXIO;
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", syncpt_irq);
+ return syncpt_irq;
}
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
@@ -186,8 +186,13 @@ static int host1x_probe(struct platform_device *pdev)
return -ENOMEM;
err = iommu_attach_device(host->domain, &pdev->dev);
- if (err)
+ if (err == -ENODEV) {
+ iommu_domain_free(host->domain);
+ host->domain = NULL;
+ goto skip_iommu;
+ } else if (err) {
goto fail_free_domain;
+ }
geometry = &host->domain->geometry;
@@ -198,6 +203,7 @@ static int host1x_probe(struct platform_device *pdev)
host->iova_end = geometry->aperture_end;
}
+skip_iommu:
err = host1x_channel_list_init(&host->channel_list,
host->info->nb_channels);
if (err) {
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index dacb8009a605..37ebb51703fa 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -33,10 +33,10 @@ static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
unsigned int id = syncpt->id;
struct host1x *host = syncpt->host;
- host1x_sync_writel(host, BIT_MASK(id),
- HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
- host1x_sync_writel(host, BIT_MASK(id),
- HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
+ host1x_sync_writel(host, BIT(id % 32),
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id / 32));
+ host1x_sync_writel(host, BIT(id % 32),
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id / 32));
schedule_work(&syncpt->intr.work);
}
@@ -50,9 +50,9 @@ static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
reg = host1x_sync_readl(host,
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
- for_each_set_bit(id, &reg, BITS_PER_LONG) {
+ for_each_set_bit(id, &reg, 32) {
struct host1x_syncpt *syncpt =
- host->syncpt + (i * BITS_PER_LONG + id);
+ host->syncpt + (i * 32 + id);
host1x_intr_syncpt_handle(syncpt);
}
}
@@ -117,17 +117,17 @@ static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
static void _host1x_intr_enable_syncpt_intr(struct host1x *host,
unsigned int id)
{
- host1x_sync_writel(host, BIT_MASK(id),
- HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
+ host1x_sync_writel(host, BIT(id % 32),
+ HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id / 32));
}
static void _host1x_intr_disable_syncpt_intr(struct host1x *host,
unsigned int id)
{
- host1x_sync_writel(host, BIT_MASK(id),
- HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
- host1x_sync_writel(host, BIT_MASK(id),
- HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
+ host1x_sync_writel(host, BIT(id % 32),
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id / 32));
+ host1x_sync_writel(host, BIT(id % 32),
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id / 32));
}
static int _host1x_free_syncpt_irq(struct host1x *host)
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index c93f74fcce72..7b0270d60742 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -89,7 +89,7 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp)
host1x_syncpt_idle(sp))
return -EINVAL;
- host1x_sync_writel(host, BIT_MASK(sp->id),
+ host1x_sync_writel(host, BIT(sp->id % 32),
HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
wmb();
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index bee504406cfc..db509ab8874e 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -197,10 +197,6 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
}
phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
- if (!phys_addr) {
- err = -EINVAL;
- goto unpin;
- }
job->addr_phys[job->num_unpins] = phys_addr;
job->unpins[job->num_unpins].bo = reloc->target.bo;
@@ -225,10 +221,6 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
}
phys_addr = host1x_bo_pin(g->bo, &sgt);
- if (!phys_addr) {
- err = -EINVAL;
- goto unpin;
- }
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
for_each_sg(sgt->sgl, sg, sgt->nents, j)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 6fd01a692197..9017dcc14502 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2216,6 +2216,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
#if IS_ENABLED(CONFIG_HID_ORTEK)
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
#endif
#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 3d911bfd91cf..c9ba4c6db74c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -824,6 +824,7 @@
#define USB_VENDOR_ID_ORTEK 0x05a4
#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700
#define USB_DEVICE_ID_ORTEK_WKB2000 0x2000
+#define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003
#define USB_VENDOR_ID_PLANTRONICS 0x047f
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index 6620f15fec22..8783a064cdcf 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -5,6 +5,7 @@
*
* Ortek PKB-1700
* Ortek WKB-2000
+ * iHome IMAC-A210S
* Skycable wireless presenter
*
* Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
@@ -28,10 +29,10 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) {
- hid_info(hdev, "Fixing up logical minimum in report descriptor (Ortek)\n");
+ hid_info(hdev, "Fixing up logical maximum in report descriptor (Ortek)\n");
rdesc[55] = 0x92;
} else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) {
- hid_info(hdev, "Fixing up logical minimum in report descriptor (Skycable)\n");
+ hid_info(hdev, "Fixing up logical maximum in report descriptor (Skycable)\n");
rdesc[53] = 0x65;
}
return rdesc;
@@ -40,6 +41,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id ortek_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
{ }
};
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 76013eb5cb7f..c008847e0b20 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -680,18 +680,21 @@ static int usbhid_open(struct hid_device *hid)
struct usbhid_device *usbhid = hid->driver_data;
int res;
+ set_bit(HID_OPENED, &usbhid->iofl);
+
if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
return 0;
res = usb_autopm_get_interface(usbhid->intf);
/* the device must be awake to reliably request remote wakeup */
- if (res < 0)
+ if (res < 0) {
+ clear_bit(HID_OPENED, &usbhid->iofl);
return -EIO;
+ }
usbhid->intf->needs_remote_wakeup = 1;
set_bit(HID_RESUME_RUNNING, &usbhid->iofl);
- set_bit(HID_OPENED, &usbhid->iofl);
set_bit(HID_IN_POLLING, &usbhid->iofl);
res = hid_start_in(hid);
@@ -727,19 +730,20 @@ static void usbhid_close(struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
- if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
- return;
-
/*
* Make sure we don't restart data acquisition due to
* a resumption we no longer care about by avoiding racing
* with hid_start_in().
*/
spin_lock_irq(&usbhid->lock);
- clear_bit(HID_IN_POLLING, &usbhid->iofl);
clear_bit(HID_OPENED, &usbhid->iofl);
+ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL))
+ clear_bit(HID_IN_POLLING, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
+ if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
+ return;
+
hid_cancel_delayed_stuff(usbhid);
usb_kill_urb(usbhid->urbin);
usbhid->intf->needs_remote_wakeup = 0;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 1006b230b236..65fa29591d21 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -983,7 +983,7 @@ config I2C_UNIPHIER_F
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
- depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
+ depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
select I2C_ALGOBIT
help
Say yes if you want to support the I2C serial bus on ARMs Versatile
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 2ea6d0d25a01..143a8fd582b4 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -298,6 +298,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
}
acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
+ /* Some broken DSTDs use 1MiHz instead of 1MHz */
+ if (acpi_speed == 1048576)
+ acpi_speed = 1000000;
/*
* Find bus speed from the "clock-frequency" device property, ACPI
* or by using fast mode if neither is set.
@@ -319,7 +322,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
if (dev->clk_freq != 100000 && dev->clk_freq != 400000
&& dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
dev_err(&pdev->dev,
- "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
+ "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",
+ dev->clk_freq);
ret = -EINVAL;
goto exit_reset;
}
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 4842ec3a5451..a9126b3cda61 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -230,6 +230,16 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
}
+const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+ struct i2c_client *client)
+{
+ if (!(client && matches))
+ return NULL;
+
+ return acpi_match_device(matches, &client->dev);
+}
+
static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
void *data, void **return_value)
{
@@ -289,7 +299,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
}
EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
-static int i2c_acpi_match_adapter(struct device *dev, void *data)
+static int i2c_acpi_find_match_adapter(struct device *dev, void *data)
{
struct i2c_adapter *adapter = i2c_verify_adapter(dev);
@@ -299,7 +309,7 @@ static int i2c_acpi_match_adapter(struct device *dev, void *data)
return ACPI_HANDLE(dev) == (acpi_handle)data;
}
-static int i2c_acpi_match_device(struct device *dev, void *data)
+static int i2c_acpi_find_match_device(struct device *dev, void *data)
{
return ACPI_COMPANION(dev) == data;
}
@@ -309,7 +319,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
struct device *dev;
dev = bus_find_device(&i2c_bus_type, NULL, handle,
- i2c_acpi_match_adapter);
+ i2c_acpi_find_match_adapter);
return dev ? i2c_verify_adapter(dev) : NULL;
}
@@ -317,7 +327,8 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
{
struct device *dev;
- dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device);
+ dev = bus_find_device(&i2c_bus_type, NULL, adev,
+ i2c_acpi_find_match_device);
return dev ? i2c_verify_client(dev) : NULL;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index c89dac7fd2e7..12822a4b8f8f 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -357,6 +357,7 @@ static int i2c_device_probe(struct device *dev)
* Tree match table entry is supplied for the probing device.
*/
if (!driver->id_table &&
+ !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
!i2c_of_match_device(dev->driver->of_match_table, client))
return -ENODEV;
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 3b63f5e5b89c..3d3d9bf02101 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -31,9 +31,18 @@ int i2c_check_addr_validity(unsigned addr, unsigned short flags);
int i2c_check_7bit_addr_validity_strict(unsigned short addr);
#ifdef CONFIG_ACPI
+const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+ struct i2c_client *client);
void i2c_acpi_register_devices(struct i2c_adapter *adap);
#else /* CONFIG_ACPI */
static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
+static inline const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+ struct i2c_client *client)
+{
+ return NULL;
+}
#endif /* CONFIG_ACPI */
extern struct notifier_block i2c_acpi_notifier;
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 2c64d0e0740f..17121329bb79 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -83,7 +83,7 @@ config I2C_MUX_PINCTRL
different sets of pins at run-time.
This driver can also be built as a module. If so, the module will be
- called pinctrl-i2cmux.
+ called i2c-mux-pinctrl.
config I2C_MUX_REG
tristate "Register-based I2C multiplexer"
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 6b5d3be283c4..807299dd45eb 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -193,7 +193,6 @@ struct bmc150_accel_data {
struct regmap *regmap;
int irq;
struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
- atomic_t active_intr;
struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
struct mutex mutex;
u8 fifo_mode, watermark;
@@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
goto out_fix_power_state;
}
- if (state)
- atomic_inc(&data->active_intr);
- else
- atomic_dec(&data->active_intr);
-
return 0;
out_fix_power_state:
@@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev)
struct bmc150_accel_data *data = iio_priv(indio_dev);
mutex_lock(&data->mutex);
- if (atomic_read(&data->active_intr))
- bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+ bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
bmc150_accel_fifo_set_mode(data);
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 07d1489cd457..e44f62bf9caa 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_ihl = 0x02,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
@@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
@@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.en_mask = 0x08,
},
},
+ .sim = {
+ .addr = 0x24,
+ .value = BIT(0),
+ },
.multi_read_bit = false,
.bootime = 2,
},
@@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int1 = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(1),
+ },
.multi_read_bit = true,
.bootime = 2, /* guess */
},
@@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(7),
+ },
.multi_read_bit = false,
.bootime = 2, /* guess */
},
@@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr_ihl = 0x22,
.mask_ihl = 0x80,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
@@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int1 = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(1),
+ },
.multi_read_bit = false,
.bootime = 2,
},
@@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_ihl = 0x02,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index e0ea411a0b2d..c02b23d675cb 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -22,6 +22,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/driver.h>
+#include <linux/iopoll.h>
#define ASPEED_RESOLUTION_BITS 10
#define ASPEED_CLOCKS_PER_SAMPLE 12
@@ -38,11 +39,17 @@
#define ASPEED_ENGINE_ENABLE BIT(0)
+#define ASPEED_ADC_CTRL_INIT_RDY BIT(8)
+
+#define ASPEED_ADC_INIT_POLLING_TIME 500
+#define ASPEED_ADC_INIT_TIMEOUT 500000
+
struct aspeed_adc_model_data {
const char *model_name;
unsigned int min_sampling_rate; // Hz
unsigned int max_sampling_rate; // Hz
unsigned int vref_voltage; // mV
+ bool wait_init_sequence;
};
struct aspeed_adc_data {
@@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev)
goto scaler_error;
}
+ model_data = of_device_get_match_data(&pdev->dev);
+
+ if (model_data->wait_init_sequence) {
+ /* Enable engine in normal mode. */
+ writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE,
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+
+ /* Wait for initial sequence complete. */
+ ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL,
+ adc_engine_control_reg_val,
+ adc_engine_control_reg_val &
+ ASPEED_ADC_CTRL_INIT_RDY,
+ ASPEED_ADC_INIT_POLLING_TIME,
+ ASPEED_ADC_INIT_TIMEOUT);
+ if (ret)
+ goto scaler_error;
+ }
+
/* Start all channels in normal mode. */
ret = clk_prepare_enable(data->clk_scaler->clk);
if (ret)
@@ -274,6 +299,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = {
.vref_voltage = 1800, // mV
.min_sampling_rate = 1,
.max_sampling_rate = 1000000,
+ .wait_init_sequence = true,
};
static const struct of_device_id aspeed_adc_matches[] = {
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 64799ad7ebad..462a99c13e7a 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -28,6 +28,8 @@
#include <linux/iio/driver.h>
#define AXP288_ADC_EN_MASK 0xF1
+#define AXP288_ADC_TS_PIN_GPADC 0xF2
+#define AXP288_ADC_TS_PIN_ON 0xF3
enum axp288_adc_id {
AXP288_ADC_TS,
@@ -121,6 +123,26 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
return IIO_VAL_INT;
}
+static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
+ unsigned long address)
+{
+ int ret;
+
+ /* channels other than GPADC do not need to switch TS pin */
+ if (address != AXP288_GP_ADC_H)
+ return 0;
+
+ ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
+ if (ret)
+ return ret;
+
+ /* When switching to the GPADC pin give things some time to settle */
+ if (mode == AXP288_ADC_TS_PIN_GPADC)
+ usleep_range(6000, 10000);
+
+ return 0;
+}
+
static int axp288_adc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -131,7 +153,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
+ chan->address)) {
+ dev_err(&indio_dev->dev, "GPADC mode\n");
+ ret = -EINVAL;
+ break;
+ }
ret = axp288_adc_read_channel(val, chan->address, info->regmap);
+ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
+ chan->address))
+ dev_err(&indio_dev->dev, "TS pin restore\n");
break;
default:
ret = -EINVAL;
@@ -141,6 +172,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
return ret;
}
+static int axp288_adc_set_state(struct regmap *regmap)
+{
+ /* ADC should be always enabled for internal FG to function */
+ if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
+ return -EIO;
+
+ return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+}
+
static const struct iio_info axp288_adc_iio_info = {
.read_raw = &axp288_adc_read_raw,
.driver_module = THIS_MODULE,
@@ -169,7 +209,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
* Set ADC to enabled state at all time, including system suspend.
* otherwise internal fuel gauge functionality may be affected.
*/
- ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+ ret = axp288_adc_set_state(axp20x->regmap);
if (ret) {
dev_err(&pdev->dev, "unable to enable ADC device\n");
return ret;
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 81d4c39e414a..137f577d9432 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -256,6 +256,7 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val,
err:
pm_runtime_put_autosuspend(indio_dev->dev.parent);
+ disable_irq(irq);
mutex_unlock(&info->mutex);
return ret;
@@ -365,7 +366,6 @@ static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id)
complete(&info->completion);
out:
- disable_irq_nosync(info->temp_data_irq);
return IRQ_HANDLED;
}
@@ -380,7 +380,6 @@ static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id)
complete(&info->completion);
out:
- disable_irq_nosync(info->fifo_data_irq);
return IRQ_HANDLED;
}
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 01fc76f7d660..c168e0db329a 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -77,7 +77,7 @@
#define VF610_ADC_ADSTS_MASK 0x300
#define VF610_ADC_ADLPC_EN 0x80
#define VF610_ADC_ADHSC_EN 0x400
-#define VF610_ADC_REFSEL_VALT 0x100
+#define VF610_ADC_REFSEL_VALT 0x800
#define VF610_ADC_REFSEL_VBG 0x1000
#define VF610_ADC_ADTRG_HARD 0x2000
#define VF610_ADC_AVGS_8 0x4000
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 79c8c7cd70d5..6e6a1ecc99dd 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -550,6 +550,31 @@ out:
}
EXPORT_SYMBOL(st_sensors_read_info_raw);
+static int st_sensors_init_interface_mode(struct iio_dev *indio_dev,
+ const struct st_sensor_settings *sensor_settings)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+ struct device_node *np = sdata->dev->of_node;
+ struct st_sensors_platform_data *pdata;
+
+ pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data;
+ if (((np && of_property_read_bool(np, "spi-3wire")) ||
+ (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) {
+ int err;
+
+ err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
+ sensor_settings->sim.addr,
+ sensor_settings->sim.value);
+ if (err < 0) {
+ dev_err(&indio_dev->dev,
+ "failed to init interface mode\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
int st_sensors_check_device_support(struct iio_dev *indio_dev,
int num_sensors_list,
const struct st_sensor_settings *sensor_settings)
@@ -574,6 +599,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
return -ENODEV;
}
+ err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]);
+ if (err < 0)
+ return err;
+
if (sensor_settings[i].wai_addr) {
err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
sensor_settings[i].wai_addr, &wai);
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index e7d4ea75e007..7599693f7fe9 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
struct tsl2563_chip *chip = iio_priv(dev_info);
iio_push_event(dev_info,
- IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index aa61ec15c139..f1bce05ffa13 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -456,7 +456,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = true,
+ .multi_read_bit = false,
.bootime = 2,
},
};
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 01236cef7bfb..437522ca97b4 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -61,6 +61,7 @@ struct addr_req {
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context);
unsigned long timeout;
+ struct delayed_work work;
int status;
u32 seq;
};
@@ -295,7 +296,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
}
EXPORT_SYMBOL(rdma_translate_ip);
-static void set_timeout(unsigned long time)
+static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
{
unsigned long delay;
@@ -303,7 +304,7 @@ static void set_timeout(unsigned long time)
if ((long)delay < 0)
delay = 0;
- mod_delayed_work(addr_wq, &work, delay);
+ mod_delayed_work(addr_wq, delayed_work, delay);
}
static void queue_req(struct addr_req *req)
@@ -318,8 +319,7 @@ static void queue_req(struct addr_req *req)
list_add(&req->list, &temp_req->list);
- if (req_list.next == &req->list)
- set_timeout(req->timeout);
+ set_timeout(&req->work, req->timeout);
mutex_unlock(&lock);
}
@@ -574,6 +574,37 @@ static int addr_resolve(struct sockaddr *src_in,
return ret;
}
+static void process_one_req(struct work_struct *_work)
+{
+ struct addr_req *req;
+ struct sockaddr *src_in, *dst_in;
+
+ mutex_lock(&lock);
+ req = container_of(_work, struct addr_req, work.work);
+
+ if (req->status == -ENODATA) {
+ src_in = (struct sockaddr *)&req->src_addr;
+ dst_in = (struct sockaddr *)&req->dst_addr;
+ req->status = addr_resolve(src_in, dst_in, req->addr,
+ true, req->seq);
+ if (req->status && time_after_eq(jiffies, req->timeout)) {
+ req->status = -ETIMEDOUT;
+ } else if (req->status == -ENODATA) {
+ /* requeue the work for retrying again */
+ set_timeout(&req->work, req->timeout);
+ mutex_unlock(&lock);
+ return;
+ }
+ }
+ list_del(&req->list);
+ mutex_unlock(&lock);
+
+ req->callback(req->status, (struct sockaddr *)&req->src_addr,
+ req->addr, req->context);
+ put_client(req->client);
+ kfree(req);
+}
+
static void process_req(struct work_struct *work)
{
struct addr_req *req, *temp_req;
@@ -591,20 +622,23 @@ static void process_req(struct work_struct *work)
true, req->seq);
if (req->status && time_after_eq(jiffies, req->timeout))
req->status = -ETIMEDOUT;
- else if (req->status == -ENODATA)
+ else if (req->status == -ENODATA) {
+ set_timeout(&req->work, req->timeout);
continue;
+ }
}
list_move_tail(&req->list, &done_list);
}
- if (!list_empty(&req_list)) {
- req = list_entry(req_list.next, struct addr_req, list);
- set_timeout(req->timeout);
- }
mutex_unlock(&lock);
list_for_each_entry_safe(req, temp_req, &done_list, list) {
list_del(&req->list);
+ /* It is safe to cancel other work items from this work item
+ * because at a time there can be only one work item running
+ * with this single threaded work queue.
+ */
+ cancel_delayed_work(&req->work);
req->callback(req->status, (struct sockaddr *) &req->src_addr,
req->addr, req->context);
put_client(req->client);
@@ -647,6 +681,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
req->context = context;
req->client = client;
atomic_inc(&client->refcount);
+ INIT_DELAYED_WORK(&req->work, process_one_req);
req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
@@ -701,7 +736,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
req->status = -ECANCELED;
req->timeout = jiffies;
list_move(&req->list, &req_list);
- set_timeout(req->timeout);
+ set_timeout(&req->work, req->timeout);
break;
}
}
@@ -807,9 +842,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
if (event == NETEVENT_NEIGH_UPDATE) {
struct neighbour *neigh = ctx;
- if (neigh->nud_state & NUD_VALID) {
- set_timeout(jiffies);
- }
+ if (neigh->nud_state & NUD_VALID)
+ set_timeout(&work, jiffies);
}
return 0;
}
@@ -820,7 +854,7 @@ static struct notifier_block nb = {
int addr_init(void)
{
- addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0);
+ addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM);
if (!addr_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 2c98533a0203..c551d2b275fd 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1153,7 +1153,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
int out_len)
{
struct ib_uverbs_resize_cq cmd;
- struct ib_uverbs_resize_cq_resp resp;
+ struct ib_uverbs_resize_cq_resp resp = {};
struct ib_udata udata;
struct ib_cq *cq;
int ret = -EINVAL;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 3d2609608f58..c023e2c81b8f 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref)
if (atomic_dec_and_test(&file->device->refcount))
ib_uverbs_comp_dev(file->device);
+ kobject_put(&file->device->kobj);
kfree(file);
}
@@ -917,7 +918,6 @@ err:
static int ib_uverbs_close(struct inode *inode, struct file *filp)
{
struct ib_uverbs_file *file = filp->private_data;
- struct ib_uverbs_device *dev = file->device;
mutex_lock(&file->cleanup_mutex);
if (file->ucontext) {
@@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
ib_uverbs_release_async_event_file);
kref_put(&file->ref, ib_uverbs_release_file);
- kobject_put(&dev->kobj);
return 0;
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index fb98ed67d5bc..7f8fe443df46 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -895,7 +895,6 @@ static const struct {
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = {
[IB_QPS_RESET] = { .valid = 1 },
- [IB_QPS_ERR] = { .valid = 1 },
[IB_QPS_INIT] = {
.valid = 1,
.req_param = {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 23fad6d96944..2540b65e242c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -733,7 +733,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
continue;
free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
- if (IS_ERR(free_mr->mr_free_qp[i])) {
+ if (!free_mr->mr_free_qp[i]) {
dev_err(dev, "Create loop qp failed!\n");
goto create_lp_qp_failed;
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index ae0746754008..3d701c7a4c91 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -939,7 +939,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
if (qp->ibqp.qp_type != IB_QPT_RC) {
av = *wqe;
- if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT))
+ if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
*wqe += sizeof(struct mlx5_av);
else
*wqe += sizeof(struct mlx5_base_av);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index ff50a7bd66d8..7ac25059c40f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -336,6 +336,7 @@ struct ipoib_dev_priv {
unsigned long flags;
struct rw_semaphore vlan_rwsem;
+ struct mutex mcast_mutex;
struct rb_root path_tree;
struct list_head path_list;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index f87d104837dc..d69410c2ed97 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -511,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
case IB_CM_REQ_RECEIVED:
return ipoib_cm_req_handler(cm_id, event);
case IB_CM_DREQ_RECEIVED:
- p = cm_id->context;
ib_send_cm_drep(cm_id, NULL, 0);
/* Fall through */
case IB_CM_REJ_RECEIVED:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 7871379342f4..184a22f48027 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = {
IPOIB_NETDEV_STAT(tx_bytes),
IPOIB_NETDEV_STAT(tx_errors),
IPOIB_NETDEV_STAT(rx_dropped),
- IPOIB_NETDEV_STAT(tx_dropped)
+ IPOIB_NETDEV_STAT(tx_dropped),
+ IPOIB_NETDEV_STAT(multicast),
};
#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 57a9655e844d..2e075377242e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ dev->stats.multicast++;
skb->dev = dev;
if ((dev->features & NETIF_F_RXCSUM) &&
@@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev)
return pending;
}
+static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
+ struct ib_qp *qp,
+ enum ib_qp_state new_state)
+{
+ struct ib_qp_attr qp_attr;
+ struct ib_qp_init_attr query_init_attr;
+ int ret;
+
+ ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
+ if (ret) {
+ ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
+ return;
+ }
+ /* print according to the new-state and the previous state.*/
+ if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
+ ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
+ else
+ ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
+ new_state, qp_attr.qp_state);
+}
+
int ipoib_ib_dev_stop_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
*/
qp_attr.qp_state = IB_QPS_ERR;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
- ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
+ check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
/* Wait for all sends and receives to complete */
begin = jiffies;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 4ce315c92b48..6c77df34869d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1560,6 +1560,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
int i, wait_flushed = 0;
init_completion(&priv->ntbl.flushed);
+ set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
spin_lock_irqsave(&priv->lock, flags);
@@ -1604,7 +1605,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
init_completion(&priv->ntbl.deleted);
- set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
/* Stop GC if called at init fail need to cancel work */
stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
@@ -1847,6 +1847,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_tx_timeout = ipoib_timeout,
.ndo_set_rx_mode = ipoib_set_mcast_list,
.ndo_get_iflink = ipoib_get_iflink,
+ .ndo_get_stats64 = ipoib_get_stats,
};
void ipoib_setup_common(struct net_device *dev)
@@ -1877,6 +1878,7 @@ static void ipoib_build_priv(struct net_device *dev)
priv->dev = dev;
spin_lock_init(&priv->lock);
init_rwsem(&priv->vlan_rwsem);
+ mutex_init(&priv->mcast_mutex);
INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs);
@@ -2173,14 +2175,14 @@ static struct net_device *ipoib_add_port(const char *format,
priv->dev->dev_id = port - 1;
result = ib_query_port(hca, port, &attr);
- if (!result)
- priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
- else {
+ if (result) {
printk(KERN_WARNING "%s: ib_query_port %d failed\n",
hca->name, port);
goto device_init_failed;
}
+ priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+
/* MTU will be reset when mcast join happens */
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
@@ -2211,12 +2213,14 @@ static struct net_device *ipoib_add_port(const char *format,
printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
hca->name, port, result);
goto device_init_failed;
- } else
- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+ }
+
+ memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
+ sizeof(union ib_gid));
set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
result = ipoib_dev_init(priv->dev, hca, port);
- if (result < 0) {
+ if (result) {
printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
hca->name, port, result);
goto device_init_failed;
@@ -2365,6 +2369,7 @@ static int __init ipoib_init_module(void)
ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
#ifdef CONFIG_INFINIBAND_IPOIB_CM
ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
+ ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
#endif
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 057f58e6afca..93e149efc1f5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev)
int ipoib_mcast_stop_thread(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- unsigned long flags;
ipoib_dbg_mcast(priv, "stopping multicast thread\n");
- spin_lock_irqsave(&priv->lock, flags);
- cancel_delayed_work(&priv->mcast_task);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- flush_workqueue(priv->wq);
+ cancel_delayed_work_sync(&priv->mcast_task);
return 0;
}
@@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list)
{
struct ipoib_mcast *mcast, *tmcast;
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
+ list_for_each_entry_safe(mcast, tmcast, remove_list, list)
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ wait_for_completion(&mcast->done);
+
list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_free(mcast);
@@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
struct ipoib_mcast *mcast, *tmcast;
unsigned long flags;
+ mutex_lock(&priv->mcast_mutex);
ipoib_dbg_mcast(priv, "flushing multicast list\n");
spin_lock_irqsave(&priv->lock, flags);
@@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /*
- * make sure the in-flight joins have finished before we attempt
- * to leave
- */
- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- wait_for_completion(&mcast->done);
-
ipoib_mcast_remove_list(&remove_list);
+ mutex_unlock(&priv->mcast_mutex);
}
static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
@@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
netif_addr_unlock(dev);
local_irq_restore(flags);
- /*
- * make sure the in-flight joins have finished before we attempt
- * to leave
- */
- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- wait_for_completion(&mcast->done);
-
ipoib_mcast_remove_list(&remove_list);
/*
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 688e77576e5a..354cbd6392cd 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4452,6 +4452,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
/* Setting */
irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
irte->hi.fields.vector = vcpu_pi_info->vector;
+ irte->lo.fields_vapic.ga_log_intr = 1;
irte->lo.fields_vapic.guest_mode = 1;
irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 5cc597b383c7..372303700566 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2440,11 +2440,11 @@ static int __init state_next(void)
break;
case IOMMU_ACPI_FINISHED:
early_enable_iommus();
- register_syscore_ops(&amd_iommu_syscore_ops);
x86_platform.iommu_shutdown = disable_iommus;
init_state = IOMMU_ENABLED;
break;
case IOMMU_ENABLED:
+ register_syscore_ops(&amd_iommu_syscore_ops);
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
enable_iommus_v2();
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index bc89b4d6c043..2d80fa8a0634 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -400,6 +400,8 @@ struct arm_smmu_device {
u32 cavium_id_base; /* Specific to Cavium */
+ spinlock_t global_sync_lock;
+
/* IOMMU core code handle */
struct iommu_device iommu;
};
@@ -436,7 +438,7 @@ struct arm_smmu_domain {
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */
- spinlock_t cb_lock; /* Serialises ATS1* ops */
+ spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain;
};
@@ -602,9 +604,12 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
{
void __iomem *base = ARM_SMMU_GR0(smmu);
+ unsigned long flags;
+ spin_lock_irqsave(&smmu->global_sync_lock, flags);
__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
base + ARM_SMMU_GR0_sTLBGSTATUS);
+ spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
}
static void arm_smmu_tlb_sync_context(void *cookie)
@@ -612,9 +617,12 @@ static void arm_smmu_tlb_sync_context(void *cookie)
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
+ unsigned long flags;
+ spin_lock_irqsave(&smmu_domain->cb_lock, flags);
__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
base + ARM_SMMU_CB_TLBSTATUS);
+ spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
}
static void arm_smmu_tlb_sync_vmid(void *cookie)
@@ -1511,6 +1519,12 @@ static int arm_smmu_add_device(struct device *dev)
if (using_legacy_binding) {
ret = arm_smmu_register_legacy_master(dev, &smmu);
+
+ /*
+ * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
+ * will allocate/initialise a new one. Thus we need to update fwspec for
+ * later use.
+ */
fwspec = dev->iommu_fwspec;
if (ret)
goto out_free;
@@ -1550,15 +1564,15 @@ static int arm_smmu_add_device(struct device *dev)
ret = arm_smmu_master_alloc_smes(dev);
if (ret)
- goto out_free;
+ goto out_cfg_free;
iommu_device_link(&smmu->iommu, dev);
return 0;
+out_cfg_free:
+ kfree(cfg);
out_free:
- if (fwspec)
- kfree(fwspec->iommu_priv);
iommu_fwspec_free(dev);
return ret;
}
@@ -1925,6 +1939,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
smmu->num_mapping_groups = size;
mutex_init(&smmu->stream_map_mutex);
+ spin_lock_init(&smmu->global_sync_lock);
if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index af330f513653..d665d0dc16e8 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -479,6 +479,9 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
+ if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr)))
+ return -ERANGE;
+
ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
/*
* Synchronise all PTE updates for the new mapping before there's
@@ -659,6 +662,9 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
size_t unmapped;
+ if (WARN_ON(upper_32_bits(iova)))
+ return 0;
+
unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd);
if (unmapped)
io_pgtable_tlb_sync(&data->iop);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index b182039862c5..e8018a308868 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -452,6 +452,10 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
+ if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
+ paddr >= (1ULL << data->iop.cfg.oas)))
+ return -ERANGE;
+
prot = arm_lpae_prot_to_pte(data, iommu_prot);
ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
/*
@@ -610,6 +614,9 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
arm_lpae_iopte *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data);
+ if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
+ return 0;
+
unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
if (unmapped)
io_pgtable_tlb_sync(&data->iop);
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 524263a7ae6f..a3e667077b14 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -158,14 +158,12 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops);
* @fmt: The page table format.
* @cookie: An opaque token provided by the IOMMU driver and passed back to
* any callback routines.
- * @tlb_sync_pending: Private flag for optimising out redundant syncs.
* @cfg: A copy of the page table configuration.
* @ops: The page table operations in use for this set of page tables.
*/
struct io_pgtable {
enum io_pgtable_fmt fmt;
void *cookie;
- bool tlb_sync_pending;
struct io_pgtable_cfg cfg;
struct io_pgtable_ops ops;
};
@@ -175,22 +173,17 @@ struct io_pgtable {
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
{
iop->cfg.tlb->tlb_flush_all(iop->cookie);
- iop->tlb_sync_pending = true;
}
static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
unsigned long iova, size_t size, size_t granule, bool leaf)
{
iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
- iop->tlb_sync_pending = true;
}
static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
{
- if (iop->tlb_sync_pending) {
- iop->cfg.tlb->tlb_sync(iop->cookie);
- iop->tlb_sync_pending = false;
- }
+ iop->cfg.tlb->tlb_sync(iop->cookie);
}
/**
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 5d14cd15198d..91c6d367ab35 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -129,6 +129,7 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A);
writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
+ data->tlb_flush_active = true;
}
static void mtk_iommu_tlb_sync(void *cookie)
@@ -137,6 +138,10 @@ static void mtk_iommu_tlb_sync(void *cookie)
int ret;
u32 tmp;
+ /* Avoid timing out if there's nothing to wait for */
+ if (!data->tlb_flush_active)
+ return;
+
ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp,
tmp != 0, 10, 100000);
if (ret) {
@@ -146,6 +151,7 @@ static void mtk_iommu_tlb_sync(void *cookie)
}
/* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
+ data->tlb_flush_active = false;
}
static const struct iommu_gather_ops mtk_iommu_gather_ops = {
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 2a28eadeea0e..c06cc91b5d9a 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -47,6 +47,7 @@ struct mtk_iommu_data {
struct iommu_group *m4u_group;
struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
bool enable_4GB;
+ bool tlb_flush_active;
struct iommu_device iommu;
};
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 7b5fd8fb1761..aaca0b3d662e 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -44,7 +44,6 @@ struct procdata {
char log_name[15]; /* log filename */
struct log_data *log_head, *log_tail; /* head and tail for queue */
int if_used; /* open count for interface */
- int volatile del_lock; /* lock for delete operations */
unsigned char logtmp[LOG_MAX_LINELEN];
wait_queue_head_t rd_queue;
};
@@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp)
{
struct log_data *ib;
struct procdata *pd = card->proclog;
- int i;
unsigned long flags;
if (!pd)
@@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp)
else
pd->log_tail->next = ib; /* follows existing messages */
pd->log_tail = ib; /* new tail */
- i = pd->del_lock++; /* get lock state */
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
/* delete old entrys */
- if (!i)
- while (pd->log_head->next) {
- if ((pd->log_head->usage_cnt <= 0) &&
- (pd->log_head->next->usage_cnt <= 0)) {
- ib = pd->log_head;
- pd->log_head = pd->log_head->next;
- kfree(ib);
- } else
- break;
- } /* pd->log_head->next */
- pd->del_lock--; /* release lock level */
+ while (pd->log_head->next) {
+ if ((pd->log_head->usage_cnt <= 0) &&
+ (pd->log_head->next->usage_cnt <= 0)) {
+ ib = pd->log_head;
+ pd->log_head = pd->log_head->next;
+ kfree(ib);
+ } else {
+ break;
+ }
+ } /* pd->log_head->next */
+
+ spin_unlock_irqrestore(&card->hysdn_lock, flags);
+
wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */
} /* put_log_buffer */
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 89b09c51ab7c..38a5bb764c7b 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1376,6 +1376,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
if (arg) {
if (copy_from_user(bname, argp, sizeof(bname) - 1))
return -EFAULT;
+ bname[sizeof(bname)-1] = 0;
} else
return -EINVAL;
ret = mutex_lock_interruptible(&dev->mtx);
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index c151c6daa67e..f63a110b7bcb 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm)
char newname[10];
if (p) {
- /* Slave-Name MUST not be empty */
- if (!strlen(p + 1))
+ /* Slave-Name MUST not be empty or overflow 'newname' */
+ if (strscpy(newname, p + 1, sizeof(newname)) <= 0)
return NULL;
- strcpy(newname, p + 1);
*p = 0;
/* Master must already exist */
if (!(n = isdn_net_findif(parm)))
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 5ecc154f6831..9bc32578a766 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -657,7 +657,7 @@ try:
* be directed to disk.
*/
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter)
+ struct ppa_addr ppa, int bio_iter, bool advanced_bio)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct pblk_rb_entry *entry;
@@ -694,7 +694,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
* filled with data from the cache). If part of the data resides on the
* media, we will read later on
*/
- if (unlikely(!bio->bi_iter.bi_idx))
+ if (unlikely(!advanced_bio))
bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
data = bio_data(bio);
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 4e5c48f3de62..d682e89e6493 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -26,7 +26,7 @@
*/
static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
sector_t lba, struct ppa_addr ppa,
- int bio_iter)
+ int bio_iter, bool advanced_bio)
{
#ifdef CONFIG_NVM_DEBUG
/* Callers must ensure that the ppa points to a cache address */
@@ -34,7 +34,8 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
BUG_ON(!pblk_addr_in_cache(ppa));
#endif
- return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, bio_iter);
+ return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
+ bio_iter, advanced_bio);
}
static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -44,7 +45,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
sector_t blba = pblk_get_lba(bio);
int nr_secs = rqd->nr_ppas;
- int advanced_bio = 0;
+ bool advanced_bio = false;
int i, j = 0;
/* logic error: lba out-of-bounds. Ignore read request */
@@ -62,19 +63,26 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
retry:
if (pblk_ppa_empty(p)) {
WARN_ON(test_and_set_bit(i, read_bitmap));
- continue;
+
+ if (unlikely(!advanced_bio)) {
+ bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
+ advanced_bio = true;
+ }
+
+ goto next;
}
/* Try to read from write buffer. The address is later checked
* on the write buffer to prevent retrieving overwritten data.
*/
if (pblk_addr_in_cache(p)) {
- if (!pblk_read_from_cache(pblk, bio, lba, p, i)) {
+ if (!pblk_read_from_cache(pblk, bio, lba, p, i,
+ advanced_bio)) {
pblk_lookup_l2p_seq(pblk, &p, lba, 1);
goto retry;
}
WARN_ON(test_and_set_bit(i, read_bitmap));
- advanced_bio = 1;
+ advanced_bio = true;
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->cache_reads);
#endif
@@ -83,6 +91,7 @@ retry:
rqd->ppa_list[j++] = p;
}
+next:
if (advanced_bio)
bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
}
@@ -282,7 +291,7 @@ retry:
* write buffer to prevent retrieving overwritten data.
*/
if (pblk_addr_in_cache(ppa)) {
- if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) {
+ if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
goto retry;
}
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 0c5692cc2f60..67e623bd5c2d 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -670,7 +670,7 @@ unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
struct list_head *list,
unsigned int max);
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter);
+ struct ppa_addr ppa, int bio_iter, bool advanced_bio);
unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index ac91fd0d62c6..cbca5e51b975 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -92,7 +92,7 @@ static struct mbox_controller pcc_mbox_ctrl = {};
*/
static struct mbox_chan *get_pcc_channel(int id)
{
- if (id < 0 || id > pcc_mbox_ctrl.num_chans)
+ if (id < 0 || id >= pcc_mbox_ctrl.num_chans)
return ERR_PTR(-ENOENT);
return &pcc_mbox_channels[id];
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 850ff6c67994..44f4a8ac95bd 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1258,8 +1258,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
*/
int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
{
- blk_status_t a;
- int f;
+ int a, f;
unsigned long buffers_processed = 0;
struct dm_buffer *b, *tmp;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 1b224aa9cf15..3acce09bba35 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1587,16 +1587,18 @@ retry:
if (likely(ic->mode == 'J')) {
if (dio->write) {
unsigned next_entry, i, pos;
- unsigned ws, we;
+ unsigned ws, we, range_sectors;
- dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors);
+ dio->range.n_sectors = min(dio->range.n_sectors,
+ ic->free_sectors << ic->sb->log2_sectors_per_block);
if (unlikely(!dio->range.n_sectors))
goto sleep;
- ic->free_sectors -= dio->range.n_sectors;
+ range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
+ ic->free_sectors -= range_sectors;
journal_section = ic->free_section;
journal_entry = ic->free_section_entry;
- next_entry = ic->free_section_entry + dio->range.n_sectors;
+ next_entry = ic->free_section_entry + range_sectors;
ic->free_section_entry = next_entry % ic->journal_section_entries;
ic->free_section += next_entry / ic->journal_section_entries;
ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
@@ -1727,6 +1729,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
wraparound_section(ic, &ic->free_section);
ic->n_uncommitted_sections++;
}
+ WARN_ON(ic->journal_sections * ic->journal_section_entries !=
+ (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
}
static void integrity_commit(struct work_struct *w)
@@ -1821,6 +1825,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
{
unsigned i, j, n;
struct journal_completion comp;
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
comp.ic = ic;
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
@@ -1945,6 +1952,8 @@ skip_io:
dm_bufio_write_dirty_buffers_async(ic->bufio);
+ blk_finish_plug(&plug);
+
complete_journal_op(&comp);
wait_for_completion_io(&comp.comp);
@@ -3019,6 +3028,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Block size doesn't match the information in superblock";
goto bad;
}
+ if (!le32_to_cpu(ic->sb->journal_sections)) {
+ r = -EINVAL;
+ ti->error = "Corrupted superblock, journal_sections is 0";
+ goto bad;
+ }
/* make sure that ti->max_io_len doesn't overflow */
if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2e10c2f13a34..5bfe285ea9d1 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -208,6 +208,7 @@ struct raid_dev {
#define RT_FLAG_RS_BITMAP_LOADED 2
#define RT_FLAG_UPDATE_SBS 3
#define RT_FLAG_RESHAPE_RS 4
+#define RT_FLAG_RS_SUSPENDED 5
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -564,9 +565,10 @@ static const char *raid10_md_layout_to_format(int layout)
if (__raid10_near_copies(layout) > 1)
return "near";
- WARN_ON(__raid10_far_copies(layout) < 2);
+ if (__raid10_far_copies(layout) > 1)
+ return "far";
- return "far";
+ return "unknown";
}
/* Return md raid10 algorithm for @name */
@@ -2540,11 +2542,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (!freshest)
return 0;
- if (validate_raid_redundancy(rs)) {
- rs->ti->error = "Insufficient redundancy to activate array";
- return -EINVAL;
- }
-
/*
* Validation of the freshest device provides the source of
* validation for the remaining devices.
@@ -2553,6 +2550,11 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (super_validate(rs, freshest))
return -EINVAL;
+ if (validate_raid_redundancy(rs)) {
+ rs->ti->error = "Insufficient redundancy to activate array";
+ return -EINVAL;
+ }
+
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags) &&
rdev != freshest &&
@@ -3168,6 +3170,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
mddev_suspend(&rs->md);
+ set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
/* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
if (rs_is_raid456(rs)) {
@@ -3625,7 +3628,7 @@ static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
- if (!rs->md.suspended)
+ if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
mddev_suspend(&rs->md);
rs->md.ro = 1;
@@ -3759,7 +3762,7 @@ static int rs_start_reshape(struct raid_set *rs)
return r;
/* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
- if (mddev->suspended)
+ if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
mddev_resume(mddev);
/*
@@ -3786,8 +3789,8 @@ static int rs_start_reshape(struct raid_set *rs)
}
/* Suspend because a resume will happen in raid_resume() */
- if (!mddev->suspended)
- mddev_suspend(mddev);
+ set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
+ mddev_suspend(mddev);
/*
* Now reshape got set up, update superblocks to
@@ -3883,13 +3886,13 @@ static void raid_resume(struct dm_target *ti)
if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- if (mddev->suspended)
+ if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
mddev_resume(mddev);
}
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 11, 1},
+ .version = {1, 12, 1},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a39bcd9b982a..28a4071cdf85 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -20,6 +20,7 @@
#include <linux/atomic.h>
#include <linux/blk-mq.h>
#include <linux/mount.h>
+#include <linux/dax.h>
#define DM_MSG_PREFIX "table"
@@ -1630,6 +1631,37 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
return false;
}
+static int device_dax_write_cache_enabled(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ struct dax_device *dax_dev = dev->dax_dev;
+
+ if (!dax_dev)
+ return false;
+
+ if (dax_write_cache_enabled(dax_dev))
+ return true;
+ return false;
+}
+
+static int dm_table_supports_dax_write_cache(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i;
+
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti,
+ device_dax_write_cache_enabled, NULL))
+ return true;
+ }
+
+ return false;
+}
+
static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1785,6 +1817,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_write_cache(q, wc, fua);
+ if (dm_table_supports_dax_write_cache(t))
+ dax_write_cache(t->md->dax_dev, true);
+
/* Ensure that all underlying devices are non-rotational. */
if (dm_table_all_devices_attribute(t, device_is_nonrot))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 504ba3fa328b..e13f90832b6b 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -308,19 +308,14 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
{
unsigned n;
- if (!fio->rs) {
- fio->rs = mempool_alloc(v->fec->rs_pool, 0);
- if (unlikely(!fio->rs)) {
- DMERR("failed to allocate RS");
- return -ENOMEM;
- }
- }
+ if (!fio->rs)
+ fio->rs = mempool_alloc(v->fec->rs_pool, GFP_NOIO);
fec_for_each_prealloc_buffer(n) {
if (fio->bufs[n])
continue;
- fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOIO);
+ fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOWAIT);
if (unlikely(!fio->bufs[n])) {
DMERR("failed to allocate FEC buffer");
return -ENOMEM;
@@ -332,22 +327,16 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
if (fio->bufs[n])
continue;
- fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOIO);
+ fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOWAIT);
/* we can manage with even one buffer if necessary */
if (unlikely(!fio->bufs[n]))
break;
}
fio->nbufs = n;
- if (!fio->output) {
+ if (!fio->output)
fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO);
- if (!fio->output) {
- DMERR("failed to allocate FEC page");
- return -ENOMEM;
- }
- }
-
return 0;
}
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 884ff7c170a0..a4fa2ada6883 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -624,7 +624,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
return ret;
}
@@ -658,7 +658,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
/* Flush drive cache (this will also sync data) */
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
return ret;
}
@@ -722,7 +722,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
goto out;
}
@@ -927,7 +927,7 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
(zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
}
- page = alloc_page(GFP_KERNEL);
+ page = alloc_page(GFP_NOIO);
if (!page)
return -ENOMEM;
@@ -1183,7 +1183,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
/* Get zone information from disk */
ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
- &blkz, &nr_blkz, GFP_KERNEL);
+ &blkz, &nr_blkz, GFP_NOIO);
if (ret) {
dmz_dev_err(zmd->dev, "Get zone %u report failed",
dmz_id(zmd, zone));
@@ -1257,7 +1257,7 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
ret = blkdev_reset_zones(dev->bdev,
dmz_start_sect(zmd, zone),
- dev->zone_nr_sectors, GFP_KERNEL);
+ dev->zone_nr_sectors, GFP_NOIO);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
dmz_id(zmd, zone), ret);
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index 05c0a126f5c8..44a119e12f1a 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -75,7 +75,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
nr_blocks = block - wp_block;
ret = blkdev_issue_zeroout(zrc->dev->bdev,
dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
- dmz_blk2sect(nr_blocks), GFP_NOFS, false);
+ dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
if (ret) {
dmz_dev_err(zrc->dev,
"Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 2b538fa817f4..b08bbbd4d902 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -541,7 +541,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
int ret;
/* Create a new chunk work */
- cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOFS);
+ cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
if (!cw)
goto out;
@@ -588,7 +588,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = dev->bdev;
- if (!nr_sectors && (bio_op(bio) != REQ_OP_FLUSH) && (bio_op(bio) != REQ_OP_WRITE))
+ if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
return DM_MAPIO_REMAPPED;
/* The BIO should be block aligned */
@@ -603,7 +603,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
bioctx->status = BLK_STS_OK;
/* Set the BIO pending in the flush list */
- if (bio_op(bio) == REQ_OP_FLUSH || (!nr_sectors && bio_op(bio) == REQ_OP_WRITE)) {
+ if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
spin_lock(&dmz->flush_lock);
bio_list_add(&dmz->flush_list, bio);
spin_unlock(&dmz->flush_lock);
@@ -785,7 +785,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Chunk BIO work */
mutex_init(&dmz->chunk_lock);
- INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOFS);
+ INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
0, dev->name);
if (!dmz->chunk_wq) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8cdca0296749..c99634612fc4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2287,7 +2287,7 @@ static void export_array(struct mddev *mddev)
static bool set_in_sync(struct mddev *mddev)
{
- WARN_ON_ONCE(!spin_is_locked(&mddev->lock));
+ WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock));
if (!mddev->in_sync) {
mddev->sync_checkers++;
spin_unlock(&mddev->lock);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b50eb4ac1b82..09db03455801 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -731,58 +731,4 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
!bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
mddev->queue->limits.max_write_zeroes_sectors = 0;
}
-
-/* Maximum size of each resync request */
-#define RESYNC_BLOCK_SIZE (64*1024)
-#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
-
-/* for managing resync I/O pages */
-struct resync_pages {
- unsigned idx; /* for get/put page from the pool */
- void *raid_bio;
- struct page *pages[RESYNC_PAGES];
-};
-
-static inline int resync_alloc_pages(struct resync_pages *rp,
- gfp_t gfp_flags)
-{
- int i;
-
- for (i = 0; i < RESYNC_PAGES; i++) {
- rp->pages[i] = alloc_page(gfp_flags);
- if (!rp->pages[i])
- goto out_free;
- }
-
- return 0;
-
-out_free:
- while (--i >= 0)
- put_page(rp->pages[i]);
- return -ENOMEM;
-}
-
-static inline void resync_free_pages(struct resync_pages *rp)
-{
- int i;
-
- for (i = 0; i < RESYNC_PAGES; i++)
- put_page(rp->pages[i]);
-}
-
-static inline void resync_get_all_pages(struct resync_pages *rp)
-{
- int i;
-
- for (i = 0; i < RESYNC_PAGES; i++)
- get_page(rp->pages[i]);
-}
-
-static inline struct page *resync_fetch_page(struct resync_pages *rp,
- unsigned idx)
-{
- if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
- return NULL;
- return rp->pages[idx];
-}
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
new file mode 100644
index 000000000000..9f2670b45f31
--- /dev/null
+++ b/drivers/md/raid1-10.c
@@ -0,0 +1,81 @@
+/* Maximum size of each resync request */
+#define RESYNC_BLOCK_SIZE (64*1024)
+#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
+
+/* for managing resync I/O pages */
+struct resync_pages {
+ void *raid_bio;
+ struct page *pages[RESYNC_PAGES];
+};
+
+static inline int resync_alloc_pages(struct resync_pages *rp,
+ gfp_t gfp_flags)
+{
+ int i;
+
+ for (i = 0; i < RESYNC_PAGES; i++) {
+ rp->pages[i] = alloc_page(gfp_flags);
+ if (!rp->pages[i])
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ while (--i >= 0)
+ put_page(rp->pages[i]);
+ return -ENOMEM;
+}
+
+static inline void resync_free_pages(struct resync_pages *rp)
+{
+ int i;
+
+ for (i = 0; i < RESYNC_PAGES; i++)
+ put_page(rp->pages[i]);
+}
+
+static inline void resync_get_all_pages(struct resync_pages *rp)
+{
+ int i;
+
+ for (i = 0; i < RESYNC_PAGES; i++)
+ get_page(rp->pages[i]);
+}
+
+static inline struct page *resync_fetch_page(struct resync_pages *rp,
+ unsigned idx)
+{
+ if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
+ return NULL;
+ return rp->pages[idx];
+}
+
+/*
+ * 'strct resync_pages' stores actual pages used for doing the resync
+ * IO, and it is per-bio, so make .bi_private points to it.
+ */
+static inline struct resync_pages *get_resync_pages(struct bio *bio)
+{
+ return bio->bi_private;
+}
+
+/* generally called after bio_reset() for reseting bvec */
+static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
+ int size)
+{
+ int idx = 0;
+
+ /* initialize bvec table again */
+ do {
+ struct page *page = resync_fetch_page(rp, idx);
+ int len = min_t(int, size, PAGE_SIZE);
+
+ /*
+ * won't fail because the vec table is big
+ * enough to hold all these pages
+ */
+ bio_add_page(bio, page, len, 0);
+ size -= len;
+ } while (idx++ < RESYNC_PAGES && size > 0);
+}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 3febfc8391fb..f50958ded9f0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -81,14 +81,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
#define raid1_log(md, fmt, args...) \
do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
-/*
- * 'strct resync_pages' stores actual pages used for doing the resync
- * IO, and it is per-bio, so make .bi_private points to it.
- */
-static inline struct resync_pages *get_resync_pages(struct bio *bio)
-{
- return bio->bi_private;
-}
+#include "raid1-10.c"
/*
* for resync bio, r1bio pointer can be retrieved from the per-bio
@@ -170,7 +163,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
resync_get_all_pages(rp);
}
- rp->idx = 0;
rp->raid_bio = r1_bio;
bio->bi_private = rp;
}
@@ -492,10 +484,6 @@ static void raid1_end_write_request(struct bio *bio)
}
if (behind) {
- /* we release behind master bio when all write are done */
- if (r1_bio->behind_master_bio == bio)
- to_put = NULL;
-
if (test_bit(WriteMostly, &rdev->flags))
atomic_dec(&r1_bio->behind_remaining);
@@ -802,8 +790,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev;
if (test_bit(Faulty, &rdev->flags)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
@@ -1088,7 +1075,7 @@ static void unfreeze_array(struct r1conf *conf)
wake_up(&conf->wait_barrier);
}
-static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
+static void alloc_behind_master_bio(struct r1bio *r1_bio,
struct bio *bio)
{
int size = bio->bi_iter.bi_size;
@@ -1098,11 +1085,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
if (!behind_bio)
- goto fail;
+ return;
/* discard op, we don't support writezero/writesame yet */
- if (!bio_has_data(bio))
+ if (!bio_has_data(bio)) {
+ behind_bio->bi_iter.bi_size = size;
goto skip_copy;
+ }
while (i < vcnt && size) {
struct page *page;
@@ -1123,14 +1112,13 @@ skip_copy:
r1_bio->behind_master_bio = behind_bio;;
set_bit(R1BIO_BehindIO, &r1_bio->state);
- return behind_bio;
+ return;
free_pages:
pr_debug("%dB behind alloc failed, doing sync I/O\n",
bio->bi_iter.bi_size);
bio_free_pages(behind_bio);
-fail:
- return behind_bio;
+ bio_put(behind_bio);
}
struct raid1_plug_cb {
@@ -1483,7 +1471,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
(atomic_read(&bitmap->behind_writes)
< mddev->bitmap_info.max_write_behind) &&
!waitqueue_active(&bitmap->behind_wait)) {
- mbio = alloc_behind_master_bio(r1_bio, bio);
+ alloc_behind_master_bio(r1_bio, bio);
}
bitmap_startwrite(bitmap, r1_bio->sector,
@@ -1493,14 +1481,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
first_clone = 0;
}
- if (!mbio) {
- if (r1_bio->behind_master_bio)
- mbio = bio_clone_fast(r1_bio->behind_master_bio,
- GFP_NOIO,
- mddev->bio_set);
- else
- mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
- }
+ if (r1_bio->behind_master_bio)
+ mbio = bio_clone_fast(r1_bio->behind_master_bio,
+ GFP_NOIO, mddev->bio_set);
+ else
+ mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
if (r1_bio->behind_master_bio) {
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
@@ -2086,10 +2071,7 @@ static void process_checks(struct r1bio *r1_bio)
/* Fix variable parts of all bios */
vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
for (i = 0; i < conf->raid_disks * 2; i++) {
- int j;
- int size;
blk_status_t status;
- struct bio_vec *bi;
struct bio *b = r1_bio->bios[i];
struct resync_pages *rp = get_resync_pages(b);
if (b->bi_end_io != end_sync_read)
@@ -2098,8 +2080,6 @@ static void process_checks(struct r1bio *r1_bio)
status = b->bi_status;
bio_reset(b);
b->bi_status = status;
- b->bi_vcnt = vcnt;
- b->bi_iter.bi_size = r1_bio->sectors << 9;
b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
b->bi_bdev = conf->mirrors[i].rdev->bdev;
@@ -2107,15 +2087,8 @@ static void process_checks(struct r1bio *r1_bio)
rp->raid_bio = r1_bio;
b->bi_private = rp;
- size = b->bi_iter.bi_size;
- bio_for_each_segment_all(bi, b, j) {
- bi->bv_offset = 0;
- if (size > PAGE_SIZE)
- bi->bv_len = PAGE_SIZE;
- else
- bi->bv_len = size;
- size -= PAGE_SIZE;
- }
+ /* initialize bvec table again */
+ md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
}
for (primary = 0; primary < conf->raid_disks * 2; primary++)
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
@@ -2366,8 +2339,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
wbio = bio_clone_fast(r1_bio->behind_master_bio,
GFP_NOIO,
mddev->bio_set);
- /* We really need a _all clone */
- wbio->bi_iter = (struct bvec_iter){ 0 };
} else {
wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
mddev->bio_set);
@@ -2619,6 +2590,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
int good_sectors = RESYNC_SECTORS;
int min_bad = 0; /* number of sectors that are bad in all devices */
int idx = sector_to_idx(sector_nr);
+ int page_idx = 0;
if (!conf->r1buf_pool)
if (init_resync(conf))
@@ -2846,7 +2818,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r1_bio->bios[i];
rp = get_resync_pages(bio);
if (bio->bi_end_io) {
- page = resync_fetch_page(rp, rp->idx++);
+ page = resync_fetch_page(rp, page_idx);
/*
* won't fail because the vec table is big
@@ -2858,7 +2830,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
nr_sectors += len>>9;
sector_nr += len>>9;
sync_blocks -= (len>>9);
- } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES);
+ } while (++page_idx < RESYNC_PAGES);
r1_bio->sectors = nr_sectors;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5026e7ad51d3..f55d4cc085f6 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -110,14 +110,7 @@ static void end_reshape(struct r10conf *conf);
#define raid10_log(md, fmt, args...) \
do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
-/*
- * 'strct resync_pages' stores actual pages used for doing the resync
- * IO, and it is per-bio, so make .bi_private points to it.
- */
-static inline struct resync_pages *get_resync_pages(struct bio *bio)
-{
- return bio->bi_private;
-}
+#include "raid1-10.c"
/*
* for resync bio, r10bio pointer can be retrieved from the per-bio
@@ -221,7 +214,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
resync_get_all_pages(rp);
}
- rp->idx = 0;
rp->raid_bio = r10_bio;
bio->bi_private = rp;
if (rbio) {
@@ -913,8 +905,7 @@ static void flush_pending_writes(struct r10conf *conf)
bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev;
if (test_bit(Faulty, &rdev->flags)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
@@ -1098,8 +1089,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev;
if (test_bit(Faulty, &rdev->flags)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
@@ -2087,8 +2077,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
rp = get_resync_pages(tbio);
bio_reset(tbio);
- tbio->bi_vcnt = vcnt;
- tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
+ md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
+
rp->raid_bio = r10_bio;
tbio->bi_private = rp;
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
@@ -2853,6 +2843,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sectors_skipped = 0;
int chunks_skipped = 0;
sector_t chunk_mask = conf->geo.chunk_mask;
+ int page_idx = 0;
if (!conf->r10buf_pool)
if (init_resync(conf))
@@ -3355,7 +3346,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
break;
for (bio= biolist ; bio ; bio=bio->bi_next) {
struct resync_pages *rp = get_resync_pages(bio);
- page = resync_fetch_page(rp, rp->idx++);
+ page = resync_fetch_page(rp, page_idx);
/*
* won't fail because the vec table is big enough
* to hold all these pages
@@ -3364,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
nr_sectors += len>>9;
sector_nr += len>>9;
- } while (get_resync_pages(biolist)->idx < RESYNC_PAGES);
+ } while (++page_idx < RESYNC_PAGES);
r10_bio->sectors = nr_sectors;
while (biolist) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index aeeb8d6854e2..0fc2748aaf95 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3381,9 +3381,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
- bi->bi_status = BLK_STS_IOERR;
md_write_end(conf->mddev);
- bio_endio(bi);
+ bio_io_error(bi);
bi = nextbi;
}
if (bitmap_end)
@@ -3403,9 +3402,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
- bi->bi_status = BLK_STS_IOERR;
md_write_end(conf->mddev);
- bio_endio(bi);
+ bio_io_error(bi);
bi = bi2;
}
@@ -3429,8 +3427,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector);
- bi->bi_status = BLK_STS_IOERR;
- bio_endio(bi);
+ bio_io_error(bi);
bi = nextbi;
}
}
@@ -6237,6 +6234,8 @@ static void raid5_do_work(struct work_struct *work)
pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
+
+ async_tx_issue_pending_all();
blk_finish_plug(&plug);
pr_debug("--- raid5worker inactive\n");
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index bf45977b2823..d596b601ff42 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -559,7 +559,7 @@ EXPORT_SYMBOL_GPL(cec_transmit_done);
void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status)
{
- switch (status) {
+ switch (status & ~CEC_TX_STATUS_MAX_RETRIES) {
case CEC_TX_STATUS_OK:
cec_transmit_done(adap, status, 0, 0, 0, 0);
return;
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index 74dc1c32080e..08b619d0ea1e 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -87,6 +87,9 @@ EXPORT_SYMBOL_GPL(cec_notifier_put);
void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa)
{
+ if (n == NULL)
+ return;
+
mutex_lock(&n->lock);
n->phys_addr = pa;
if (n->callback)
@@ -100,6 +103,9 @@ void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
{
u16 pa = CEC_PHYS_ADDR_INVALID;
+ if (n == NULL)
+ return;
+
if (edid && edid->extensions)
pa = cec_get_edid_phys_addr((const u8 *)edid,
EDID_LENGTH * (edid->extensions + 1), NULL);
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index af694f2066a2..17970cdd55fa 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -349,7 +349,8 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
/* read the buffer size from the CAM */
if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SR)) != 0)
return ret;
- if ((ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ / 10)) != 0)
+ ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ);
+ if (ret != 0)
return ret;
if ((ret = dvb_ca_en50221_read_data(ca, slot, buf, 2)) != 2)
return -EIO;
@@ -644,72 +645,101 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
}
buf_free = dvb_ringbuffer_free(&ca->slot_info[slot].rx_buffer);
- if (buf_free < (ca->slot_info[slot].link_buf_size + DVB_RINGBUFFER_PKTHDRSIZE)) {
+ if (buf_free < (ca->slot_info[slot].link_buf_size +
+ DVB_RINGBUFFER_PKTHDRSIZE)) {
status = -EAGAIN;
goto exit;
}
}
- /* check if there is data available */
- if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0)
- goto exit;
- if (!(status & STATUSREG_DA)) {
- /* no data */
- status = 0;
- goto exit;
- }
-
- /* read the amount of data */
- if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH)) < 0)
- goto exit;
- bytes_read = status << 8;
- if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW)) < 0)
- goto exit;
- bytes_read |= status;
+ if (ca->pub->read_data &&
+ (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_LINKINIT)) {
+ if (ebuf == NULL)
+ status = ca->pub->read_data(ca->pub, slot, buf,
+ sizeof(buf));
+ else
+ status = ca->pub->read_data(ca->pub, slot, buf, ecount);
+ if (status < 0)
+ return status;
+ bytes_read = status;
+ if (status == 0)
+ goto exit;
+ } else {
- /* check it will fit */
- if (ebuf == NULL) {
- if (bytes_read > ca->slot_info[slot].link_buf_size) {
- pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n",
- ca->dvbdev->adapter->num, bytes_read,
- ca->slot_info[slot].link_buf_size);
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
- status = -EIO;
+ /* check if there is data available */
+ status = ca->pub->read_cam_control(ca->pub, slot,
+ CTRLIF_STATUS);
+ if (status < 0)
goto exit;
- }
- if (bytes_read < 2) {
- pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n",
- ca->dvbdev->adapter->num);
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
- status = -EIO;
+ if (!(status & STATUSREG_DA)) {
+ /* no data */
+ status = 0;
goto exit;
}
- } else {
- if (bytes_read > ecount) {
- pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n",
- ca->dvbdev->adapter->num);
- status = -EIO;
+
+ /* read the amount of data */
+ status = ca->pub->read_cam_control(ca->pub, slot,
+ CTRLIF_SIZE_HIGH);
+ if (status < 0)
+ goto exit;
+ bytes_read = status << 8;
+ status = ca->pub->read_cam_control(ca->pub, slot,
+ CTRLIF_SIZE_LOW);
+ if (status < 0)
goto exit;
+ bytes_read |= status;
+
+ /* check it will fit */
+ if (ebuf == NULL) {
+ if (bytes_read > ca->slot_info[slot].link_buf_size) {
+ pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n",
+ ca->dvbdev->adapter->num, bytes_read,
+ ca->slot_info[slot].link_buf_size);
+ ca->slot_info[slot].slot_state =
+ DVB_CA_SLOTSTATE_LINKINIT;
+ status = -EIO;
+ goto exit;
+ }
+ if (bytes_read < 2) {
+ pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n",
+ ca->dvbdev->adapter->num);
+ ca->slot_info[slot].slot_state =
+ DVB_CA_SLOTSTATE_LINKINIT;
+ status = -EIO;
+ goto exit;
+ }
+ } else {
+ if (bytes_read > ecount) {
+ pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n",
+ ca->dvbdev->adapter->num);
+ status = -EIO;
+ goto exit;
+ }
}
- }
- /* fill the buffer */
- for (i = 0; i < bytes_read; i++) {
- /* read byte and check */
- if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_DATA)) < 0)
- goto exit;
+ /* fill the buffer */
+ for (i = 0; i < bytes_read; i++) {
+ /* read byte and check */
+ status = ca->pub->read_cam_control(ca->pub, slot,
+ CTRLIF_DATA);
+ if (status < 0)
+ goto exit;
- /* OK, store it in the buffer */
- buf[i] = status;
- }
+ /* OK, store it in the buffer */
+ buf[i] = status;
+ }
- /* check for read error (RE should now be 0) */
- if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0)
- goto exit;
- if (status & STATUSREG_RE) {
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
- status = -EIO;
- goto exit;
+ /* check for read error (RE should now be 0) */
+ status = ca->pub->read_cam_control(ca->pub, slot,
+ CTRLIF_STATUS);
+ if (status < 0)
+ goto exit;
+ if (status & STATUSREG_RE) {
+ ca->slot_info[slot].slot_state =
+ DVB_CA_SLOTSTATE_LINKINIT;
+ status = -EIO;
+ goto exit;
+ }
}
/* OK, add it to the receive buffer, or copy into external buffer if supplied */
@@ -762,6 +792,10 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
if (bytes_write > ca->slot_info[slot].link_buf_size)
return -EINVAL;
+ if (ca->pub->write_data &&
+ (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_LINKINIT))
+ return ca->pub->write_data(ca->pub, slot, buf, bytes_write);
+
/* it is possible we are dealing with a single buffer implementation,
thus if there is data available for read or if there is even a read
already in progress, we do nothing but awake the kernel thread to
@@ -1176,7 +1210,8 @@ static int dvb_ca_en50221_thread(void *data)
pr_err("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n",
ca->dvbdev->adapter->num);
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
+ ca->slot_info[slot].slot_state =
+ DVB_CA_SLOTSTATE_UNINITIALISED;
dvb_ca_en50221_thread_update_delay(ca);
break;
}
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.h b/drivers/media/dvb-core/dvb_ca_en50221.h
index 1e4bbbd34d91..82617bac0875 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.h
+++ b/drivers/media/dvb-core/dvb_ca_en50221.h
@@ -41,6 +41,8 @@
* @write_attribute_mem: function for writing attribute memory on the CAM
* @read_cam_control: function for reading the control interface on the CAM
* @write_cam_control: function for reading the control interface on the CAM
+ * @read_data: function for reading data (block mode)
+ * @write_data: function for writing data (block mode)
* @slot_reset: function to reset the CAM slot
* @slot_shutdown: function to shutdown a CAM slot
* @slot_ts_enable: function to enable the Transport Stream on a CAM slot
@@ -66,6 +68,11 @@ struct dvb_ca_en50221 {
int (*write_cam_control)(struct dvb_ca_en50221 *ca,
int slot, u8 address, u8 value);
+ int (*read_data)(struct dvb_ca_en50221 *ca,
+ int slot, u8 *ebuf, int ecount);
+ int (*write_data)(struct dvb_ca_en50221 *ca,
+ int slot, u8 *ebuf, int ecount);
+
int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot);
int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot);
int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot);
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 08f67d60a7d9..12bff778c97f 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3279,7 +3279,10 @@ static int cxd2841er_get_frontend(struct dvb_frontend *fe,
else if (priv->state == STATE_ACTIVE_TC)
cxd2841er_read_status_tc(fe, &status);
- cxd2841er_read_signal_strength(fe);
+ if (priv->state == STATE_ACTIVE_TC || priv->state == STATE_ACTIVE_S)
+ cxd2841er_read_signal_strength(fe);
+ else
+ p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
if (status & FE_HAS_LOCK) {
cxd2841er_read_snr(fe);
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
index 4442e478db72..cd69e187ba7a 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
@@ -307,7 +307,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
* \def DRX_UNKNOWN
* \brief Generic UNKNOWN value for DRX enumerated types.
*
-* Used to indicate that the parameter value is unknown or not yet initalized.
+* Used to indicate that the parameter value is unknown or not yet initialized.
*/
#ifndef DRX_UNKNOWN
#define DRX_UNKNOWN (254)
@@ -450,19 +450,6 @@ MACROS
((u8)((((u16)x)>>8)&0xFF))
/**
-* \brief Macro to sign extend signed 9 bit value to signed 16 bit value
-*/
-#define DRX_S9TOS16(x) ((((u16)x)&0x100) ? ((s16)((u16)(x)|0xFF00)) : (x))
-
-/**
-* \brief Macro to sign extend signed 9 bit value to signed 16 bit value
-*/
-#define DRX_S24TODRXFREQ(x) ((((u32) x) & 0x00800000UL) ? \
- ((s32) \
- (((u32) x) | 0xFF000000)) : \
- ((s32) x))
-
-/**
* \brief Macro to convert 16 bit register value to a s32
*/
#define DRX_U16TODRXFREQ(x) ((x & 0x8000) ? \
diff --git a/drivers/media/dvb-frontends/lnbh25.c b/drivers/media/dvb-frontends/lnbh25.c
index ef3021e964be..cb486e879fdd 100644
--- a/drivers/media/dvb-frontends/lnbh25.c
+++ b/drivers/media/dvb-frontends/lnbh25.c
@@ -76,8 +76,8 @@ static int lnbh25_read_vmon(struct lnbh25_priv *priv)
return ret;
}
}
- print_hex_dump_bytes("lnbh25_read_vmon: ",
- DUMP_PREFIX_OFFSET, status, sizeof(status));
+ dev_dbg(&priv->i2c->dev, "%s(): %*ph\n",
+ __func__, (int) sizeof(status), status);
if ((status[0] & (LNBH25_STATUS_OFL | LNBH25_STATUS_VMON)) != 0) {
dev_err(&priv->i2c->dev,
"%s(): voltage in failure state, status reg 0x%x\n",
@@ -178,7 +178,7 @@ struct dvb_frontend *lnbh25_attach(struct dvb_frontend *fe,
fe->ops.release_sec = lnbh25_release;
fe->ops.set_voltage = lnbh25_set_voltage;
- dev_err(&i2c->dev, "%s(): attached at I2C addr 0x%02x\n",
+ dev_info(&i2c->dev, "%s(): attached at I2C addr 0x%02x\n",
__func__, priv->i2c_address);
return fe;
}
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index e726c2e00460..8ac0f598978d 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -25,6 +25,8 @@
#include <linux/slab.h>
#include <linux/i2c.h>
+#include "dvb_math.h"
+
#include "stv0367.h"
#include "stv0367_defs.h"
#include "stv0367_regs.h"
@@ -1437,7 +1439,7 @@ static int stv0367ter_get_frontend(struct dvb_frontend *fe,
return 0;
}
-static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr)
+static u32 stv0367ter_snr_readreg(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
u32 snru32 = 0;
@@ -1453,10 +1455,16 @@ static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr)
cpt++;
}
-
snru32 /= 10;/*average on 10 values*/
- *snr = snru32 / 1000;
+ return snru32;
+}
+
+static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ u32 snrval = stv0367ter_snr_readreg(fe);
+
+ *snr = snrval / 1000;
return 0;
}
@@ -1501,7 +1509,8 @@ static int stv0367ter_read_status(struct dvb_frontend *fe,
*status = 0;
if (stv0367_readbits(state, F367TER_LK)) {
- *status |= FE_HAS_LOCK;
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI
+ | FE_HAS_SYNC | FE_HAS_LOCK;
dprintk("%s: stv0367 has locked\n", __func__);
}
@@ -2149,6 +2158,18 @@ static int stv0367cab_read_status(struct dvb_frontend *fe,
*status = 0;
+ if (state->cab_state->state > FE_CAB_NOSIGNAL)
+ *status |= FE_HAS_SIGNAL;
+
+ if (state->cab_state->state > FE_CAB_NOCARRIER)
+ *status |= FE_HAS_CARRIER;
+
+ if (state->cab_state->state >= FE_CAB_DEMODOK)
+ *status |= FE_HAS_VITERBI;
+
+ if (state->cab_state->state >= FE_CAB_DATAOK)
+ *status |= FE_HAS_SYNC;
+
if (stv0367_readbits(state, (state->cab_state->qamfec_status_reg ?
state->cab_state->qamfec_status_reg : F367CAB_QAMFEC_LOCK))) {
*status |= FE_HAS_LOCK;
@@ -2702,51 +2723,61 @@ static int stv0367cab_read_strength(struct dvb_frontend *fe, u16 *strength)
return 0;
}
-static int stv0367cab_read_snr(struct dvb_frontend *fe, u16 *snr)
+static int stv0367cab_snr_power(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
- u32 noisepercentage;
enum stv0367cab_mod QAMSize;
- u32 regval = 0, temp = 0;
- int power, i;
QAMSize = stv0367_readbits(state, F367CAB_QAM_MODE);
switch (QAMSize) {
case FE_CAB_MOD_QAM4:
- power = 21904;
- break;
+ return 21904;
case FE_CAB_MOD_QAM16:
- power = 20480;
- break;
+ return 20480;
case FE_CAB_MOD_QAM32:
- power = 23040;
- break;
+ return 23040;
case FE_CAB_MOD_QAM64:
- power = 21504;
- break;
+ return 21504;
case FE_CAB_MOD_QAM128:
- power = 23616;
- break;
+ return 23616;
case FE_CAB_MOD_QAM256:
- power = 21760;
- break;
- case FE_CAB_MOD_QAM512:
- power = 1;
- break;
+ return 21760;
case FE_CAB_MOD_QAM1024:
- power = 21280;
- break;
+ return 21280;
default:
- power = 1;
break;
}
+ return 1;
+}
+
+static int stv0367cab_snr_readreg(struct dvb_frontend *fe, int avgdiv)
+{
+ struct stv0367_state *state = fe->demodulator_priv;
+ u32 regval = 0;
+ int i;
+
for (i = 0; i < 10; i++) {
regval += (stv0367_readbits(state, F367CAB_SNR_LO)
+ 256 * stv0367_readbits(state, F367CAB_SNR_HI));
}
- regval /= 10; /*for average over 10 times in for loop above*/
+ if (avgdiv)
+ regval /= 10;
+
+ return regval;
+}
+
+static int stv0367cab_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct stv0367_state *state = fe->demodulator_priv;
+ u32 noisepercentage;
+ u32 regval = 0, temp = 0;
+ int power;
+
+ power = stv0367cab_snr_power(fe);
+ regval = stv0367cab_snr_readreg(fe, 1);
+
if (regval != 0) {
temp = power
* (1 << (3 + stv0367_readbits(state, F367CAB_SNR_PER)));
@@ -2980,21 +3011,117 @@ static int stv0367ddb_set_frontend(struct dvb_frontend *fe)
return -EINVAL;
}
+static void stv0367ddb_read_signal_strength(struct dvb_frontend *fe)
+{
+ struct stv0367_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ s32 signalstrength;
+
+ switch (state->activedemod) {
+ case demod_cab:
+ signalstrength = stv0367cab_get_rf_lvl(state) * 1000;
+ break;
+ default:
+ p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ return;
+ }
+
+ p->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ p->strength.stat[0].uvalue = signalstrength;
+}
+
+static void stv0367ddb_read_snr(struct dvb_frontend *fe)
+{
+ struct stv0367_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ int cab_pwr;
+ u32 regval, tmpval, snrval = 0;
+
+ switch (state->activedemod) {
+ case demod_ter:
+ snrval = stv0367ter_snr_readreg(fe);
+ break;
+ case demod_cab:
+ cab_pwr = stv0367cab_snr_power(fe);
+ regval = stv0367cab_snr_readreg(fe, 0);
+
+ /* prevent division by zero */
+ if (!regval) {
+ snrval = 0;
+ break;
+ }
+
+ tmpval = (cab_pwr * 320) / regval;
+ snrval = ((tmpval != 0) ? (intlog2(tmpval) / 5581) : 0);
+ break;
+ default:
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ return;
+ }
+
+ p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ p->cnr.stat[0].uvalue = snrval;
+}
+
+static void stv0367ddb_read_ucblocks(struct dvb_frontend *fe)
+{
+ struct stv0367_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 ucblocks = 0;
+
+ switch (state->activedemod) {
+ case demod_ter:
+ stv0367ter_read_ucblocks(fe, &ucblocks);
+ break;
+ case demod_cab:
+ stv0367cab_read_ucblcks(fe, &ucblocks);
+ break;
+ default:
+ p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ return;
+ }
+
+ p->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->block_error.stat[0].uvalue = ucblocks;
+}
+
static int stv0367ddb_read_status(struct dvb_frontend *fe,
enum fe_status *status)
{
struct stv0367_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ int ret;
switch (state->activedemod) {
case demod_ter:
- return stv0367ter_read_status(fe, status);
+ ret = stv0367ter_read_status(fe, status);
+ break;
case demod_cab:
- return stv0367cab_read_status(fe, status);
- default:
+ ret = stv0367cab_read_status(fe, status);
break;
+ default:
+ return 0;
}
- return -EINVAL;
+ /* stop and report on *_read_status failure */
+ if (ret)
+ return ret;
+
+ stv0367ddb_read_signal_strength(fe);
+
+ /* read carrier/noise when a carrier is detected */
+ if (*status & FE_HAS_CARRIER)
+ stv0367ddb_read_snr(fe);
+ else
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ /* read uncorrected blocks on FE_HAS_LOCK */
+ if (*status & FE_HAS_LOCK)
+ stv0367ddb_read_ucblocks(fe);
+ else
+ p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ return 0;
}
static int stv0367ddb_get_frontend(struct dvb_frontend *fe,
@@ -3035,6 +3162,7 @@ static int stv0367ddb_sleep(struct dvb_frontend *fe)
static int stv0367ddb_init(struct stv0367_state *state)
{
struct stv0367ter_state *ter_state = state->ter_state;
+ struct dtv_frontend_properties *p = &state->fe.dtv_property_cache;
stv0367_writereg(state, R367TER_TOPCTRL, 0x10);
@@ -3109,6 +3237,13 @@ static int stv0367ddb_init(struct stv0367_state *state)
ter_state->first_lock = 0;
ter_state->unlock_counter = 2;
+ p->strength.len = 1;
+ p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->cnr.len = 1;
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->block_error.len = 1;
+ p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
return 0;
}
@@ -3126,15 +3261,12 @@ static const struct dvb_frontend_ops stv0367ddb_ops = {
0x400 |/* FE_CAN_QAM_4 */
FE_CAN_QAM_16 | FE_CAN_QAM_32 |
FE_CAN_QAM_64 | FE_CAN_QAM_128 |
- FE_CAN_QAM_256 | FE_CAN_FEC_AUTO |
+ FE_CAN_QAM_256 | FE_CAN_QAM_AUTO |
/* DVB-T */
- FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
- FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
- FE_CAN_FEC_AUTO |
- FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
- FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER |
- FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK | FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_RECOVER | FE_CAN_INVERSION_AUTO |
FE_CAN_MUTE_TS
},
.release = stv0367_release,
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index 6e313d5243a0..f39f5179dd95 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1496,7 +1496,6 @@ MODULE_DEVICE_TABLE(i2c, et8ek8_id_table);
static const struct dev_pm_ops et8ek8_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(et8ek8_suspend, et8ek8_resume)
};
-MODULE_DEVICE_TABLE(of, et8ek8_of_table);
static struct i2c_driver et8ek8_i2c_driver = {
.driver = {
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 9da4bf4f2c7a..7b79a7498751 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -659,7 +659,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
struct tvp5150 *decoder = to_tvp5150(sd);
v4l2_std_id std = decoder->norm;
u8 reg;
- int pos=0;
+ int pos = 0;
if (std == V4L2_STD_ALL) {
dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n");
@@ -669,33 +669,30 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
line += 3;
}
- if (line<6||line>27)
+ if (line < 6 || line > 27)
return 0;
- while (regs->reg != (u16)-1 ) {
+ while (regs->reg != (u16)-1) {
if ((type & regs->type.vbi_type) &&
- (line>=regs->type.ini_line) &&
- (line<=regs->type.end_line)) {
- type=regs->type.vbi_type;
+ (line >= regs->type.ini_line) &&
+ (line <= regs->type.end_line))
break;
- }
regs++;
pos++;
}
+
if (regs->reg == (u16)-1)
return 0;
- type=pos | (flags & 0xf0);
- reg=((line-6)<<1)+TVP5150_LINE_MODE_INI;
+ type = pos | (flags & 0xf0);
+ reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI;
- if (fields&1) {
+ if (fields & 1)
tvp5150_write(sd, reg, type);
- }
- if (fields&2) {
- tvp5150_write(sd, reg+1, type);
- }
+ if (fields & 2)
+ tvp5150_write(sd, reg + 1, type);
return type;
}
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 9420479bee9a..cd1723e79a07 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -17,6 +17,8 @@
* http://www.gnu.org/copyleft/gpl.html
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -114,6 +116,19 @@ static int i2c_write_reg(struct i2c_adapter *adap, u8 adr,
return i2c_write(adap, adr, msg, 2);
}
+static inline u32 safe_ddbreadl(struct ddb *dev, u32 adr)
+{
+ u32 val = ddbreadl(adr);
+
+ /* (ddb)readl returns (uint)-1 (all bits set) on failure, catch that */
+ if (val == ~0) {
+ dev_err(&dev->pdev->dev, "ddbreadl failure, adr=%08x\n", adr);
+ return 0;
+ }
+
+ return val;
+}
+
static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd)
{
struct ddb *dev = i2c->dev;
@@ -124,10 +139,10 @@ static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd)
ddbwritel((adr << 9) | cmd, i2c->regs + I2C_COMMAND);
stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ);
if (stat == 0) {
- printk(KERN_ERR "I2C timeout\n");
+ dev_err(&dev->pdev->dev, "I2C timeout\n");
{ /* MSI debugging*/
u32 istat = ddbreadl(INTERRUPT_STATUS);
- printk(KERN_ERR "IRS %08x\n", istat);
+ dev_err(&dev->pdev->dev, "IRS %08x\n", istat);
ddbwritel(istat, INTERRUPT_ACK);
}
return -EIO;
@@ -533,7 +548,7 @@ static u32 ddb_input_avail(struct ddb_input *input)
off = (stat & 0x7ff) << 7;
if (ctrl & 4) {
- printk(KERN_ERR "IA %d %d %08x\n", idx, off, ctrl);
+ dev_err(&dev->pdev->dev, "IA %d %d %08x\n", idx, off, ctrl);
ddbwritel(input->stat, DMA_BUFFER_ACK(input->nr));
return 0;
}
@@ -611,6 +626,7 @@ static int demod_attach_drxk(struct ddb_input *input)
struct i2c_adapter *i2c = &input->port->i2c->adap;
struct dvb_frontend *fe;
struct drxk_config config;
+ struct device *dev = &input->port->dev->pdev->dev;
memset(&config, 0, sizeof(config));
config.microcode_name = "drxk_a3.mc";
@@ -619,7 +635,7 @@ static int demod_attach_drxk(struct ddb_input *input)
fe = input->fe = dvb_attach(drxk_attach, &config, i2c);
if (!input->fe) {
- printk(KERN_ERR "No DRXK found!\n");
+ dev_err(dev, "No DRXK found!\n");
return -ENODEV;
}
fe->sec_priv = input;
@@ -632,12 +648,13 @@ static int tuner_attach_tda18271(struct ddb_input *input)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
struct dvb_frontend *fe;
+ struct device *dev = &input->port->dev->pdev->dev;
if (input->fe->ops.i2c_gate_ctrl)
input->fe->ops.i2c_gate_ctrl(input->fe, 1);
fe = dvb_attach(tda18271c2dd_attach, input->fe, i2c, 0x60);
if (!fe) {
- printk(KERN_ERR "No TDA18271 found!\n");
+ dev_err(dev, "No TDA18271 found!\n");
return -ENODEV;
}
if (input->fe->ops.i2c_gate_ctrl)
@@ -670,13 +687,14 @@ static struct stv0367_config ddb_stv0367_config[] = {
static int demod_attach_stv0367(struct ddb_input *input)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct device *dev = &input->port->dev->pdev->dev;
/* attach frontend */
input->fe = dvb_attach(stv0367ddb_attach,
&ddb_stv0367_config[(input->nr & 1)], i2c);
if (!input->fe) {
- printk(KERN_ERR "stv0367ddb_attach failed (not found?)\n");
+ dev_err(dev, "stv0367ddb_attach failed (not found?)\n");
return -ENODEV;
}
@@ -690,17 +708,19 @@ static int demod_attach_stv0367(struct ddb_input *input)
static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr)
{
struct i2c_adapter *adapter = &input->port->i2c->adap;
+ struct device *dev = &input->port->dev->pdev->dev;
+
u8 tda_id[2];
u8 subaddr = 0x00;
- printk(KERN_DEBUG "stv0367-tda18212 tuner ping\n");
+ dev_dbg(dev, "stv0367-tda18212 tuner ping\n");
if (input->fe->ops.i2c_gate_ctrl)
input->fe->ops.i2c_gate_ctrl(input->fe, 1);
if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0)
- printk(KERN_DEBUG "tda18212 ping 1 fail\n");
+ dev_dbg(dev, "tda18212 ping 1 fail\n");
if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0)
- printk(KERN_DEBUG "tda18212 ping 2 fail\n");
+ dev_warn(dev, "tda18212 ping failed, expect problems\n");
if (input->fe->ops.i2c_gate_ctrl)
input->fe->ops.i2c_gate_ctrl(input->fe, 0);
@@ -711,6 +731,7 @@ static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr)
static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct device *dev = &input->port->dev->pdev->dev;
struct cxd2841er_config cfg;
/* the cxd2841er driver expects 8bit/shifted I2C addresses */
@@ -728,7 +749,7 @@ static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24)
input->fe = dvb_attach(cxd2841er_attach_t_c, &cfg, i2c);
if (!input->fe) {
- printk(KERN_ERR "No Sony CXD28xx found!\n");
+ dev_err(dev, "No Sony CXD28xx found!\n");
return -ENODEV;
}
@@ -742,6 +763,7 @@ static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24)
static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype)
{
struct i2c_adapter *adapter = &input->port->i2c->adap;
+ struct device *dev = &input->port->dev->pdev->dev;
struct i2c_client *client;
struct tda18212_config config = {
.fe = input->fe,
@@ -786,7 +808,7 @@ static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype)
return 0;
err:
- printk(KERN_INFO "TDA18212 tuner not found. Device is not fully operational.\n");
+ dev_warn(dev, "TDA18212 tuner not found. Device is not fully operational.\n");
return -ENODEV;
}
@@ -847,19 +869,20 @@ static struct stv6110x_config stv6110b = {
static int demod_attach_stv0900(struct ddb_input *input, int type)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct device *dev = &input->port->dev->pdev->dev;
struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
input->fe = dvb_attach(stv090x_attach, feconf, i2c,
(input->nr & 1) ? STV090x_DEMODULATOR_1
: STV090x_DEMODULATOR_0);
if (!input->fe) {
- printk(KERN_ERR "No STV0900 found!\n");
+ dev_err(dev, "No STV0900 found!\n");
return -ENODEV;
}
if (!dvb_attach(lnbh24_attach, input->fe, i2c, 0,
0, (input->nr & 1) ?
(0x09 - type) : (0x0b - type))) {
- printk(KERN_ERR "No LNBH24 found!\n");
+ dev_err(dev, "No LNBH24 found!\n");
return -ENODEV;
}
return 0;
@@ -868,6 +891,7 @@ static int demod_attach_stv0900(struct ddb_input *input, int type)
static int tuner_attach_stv6110(struct ddb_input *input, int type)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct device *dev = &input->port->dev->pdev->dev;
struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
struct stv6110x_config *tunerconf = (input->nr & 1) ?
&stv6110b : &stv6110a;
@@ -875,10 +899,10 @@ static int tuner_attach_stv6110(struct ddb_input *input, int type)
ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c);
if (!ctl) {
- printk(KERN_ERR "No STV6110X found!\n");
+ dev_err(dev, "No STV6110X found!\n");
return -ENODEV;
}
- printk(KERN_INFO "attach tuner input %d adr %02x\n",
+ dev_info(dev, "attach tuner input %d adr %02x\n",
input->nr, tunerconf->addr);
feconf->tuner_init = ctl->tuner_init;
@@ -1009,13 +1033,14 @@ static int dvb_input_attach(struct ddb_input *input)
struct ddb_port *port = input->port;
struct dvb_adapter *adap = &input->adap;
struct dvb_demux *dvbdemux = &input->demux;
+ struct device *dev = &input->port->dev->pdev->dev;
int sony_osc24 = 0, sony_tspar = 0;
ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE,
&input->port->dev->pdev->dev,
adapter_nr);
if (ret < 0) {
- printk(KERN_ERR "ddbridge: Could not register adapter.Check if you enabled enough adapters in dvb-core!\n");
+ dev_err(dev, "Could not register adapter. Check if you enabled enough adapters in dvb-core!\n");
return ret;
}
input->attached = 1;
@@ -1241,9 +1266,9 @@ static void input_tasklet(unsigned long data)
if (input->port->class == DDB_PORT_TUNER) {
if (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))
- printk(KERN_ERR "Overflow input %d\n", input->nr);
+ dev_err(&dev->pdev->dev, "Overflow input %d\n", input->nr);
while (input->cbuf != ((input->stat >> 11) & 0x1f)
- || (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))) {
+ || (4 & safe_ddbreadl(dev, DMA_BUFFER_CONTROL(input->nr)))) {
dvb_dmx_swfilter_packets(&input->demux,
input->vbuf[input->cbuf],
input->dma_buf_size / 188);
@@ -1280,6 +1305,7 @@ static struct cxd2099_cfg cxd_cfg = {
.adr = 0x40,
.polarity = 1,
.clock_mode = 1,
+ .max_i2c = 512,
};
static int ddb_ci_attach(struct ddb_port *port)
@@ -1310,6 +1336,7 @@ static int ddb_ci_attach(struct ddb_port *port)
static int ddb_port_attach(struct ddb_port *port)
{
+ struct device *dev = &port->dev->pdev->dev;
int ret = 0;
switch (port->class) {
@@ -1326,7 +1353,7 @@ static int ddb_port_attach(struct ddb_port *port)
break;
}
if (ret < 0)
- printk(KERN_ERR "port_attach on port %d failed\n", port->nr);
+ dev_err(dev, "port_attach on port %d failed\n", port->nr);
return ret;
}
@@ -1377,6 +1404,7 @@ static void ddb_ports_detach(struct ddb *dev)
static int init_xo2(struct ddb_port *port)
{
struct i2c_adapter *i2c = &port->i2c->adap;
+ struct device *dev = &port->dev->pdev->dev;
u8 val, data[2];
int res;
@@ -1385,7 +1413,7 @@ static int init_xo2(struct ddb_port *port)
return res;
if (data[0] != 0x01) {
- pr_info("Port %d: invalid XO2\n", port->nr);
+ dev_info(dev, "Port %d: invalid XO2\n", port->nr);
return -1;
}
@@ -1511,7 +1539,7 @@ static void ddb_port_probe(struct ddb_port *port)
port->class = DDB_PORT_CI;
ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
} else if (port_has_xo2(port, &xo2_type, &xo2_id)) {
- printk(KERN_INFO "Port %d (TAB %d): XO2 type: %d, id: %d\n",
+ dev_dbg(&dev->pdev->dev, "Port %d (TAB %d): XO2 type: %d, id: %d\n",
port->nr, port->nr+1, xo2_type, xo2_id);
ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
@@ -1556,10 +1584,10 @@ static void ddb_port_probe(struct ddb_port *port)
}
break;
case DDB_XO2_TYPE_CI:
- printk(KERN_INFO "DuoFlex CI modules not supported\n");
+ dev_info(&dev->pdev->dev, "DuoFlex CI modules not supported\n");
break;
default:
- printk(KERN_INFO "Unknown XO2 DuoFlex module\n");
+ dev_info(&dev->pdev->dev, "Unknown XO2 DuoFlex module\n");
break;
}
} else if (port_has_cxd28xx(port, &cxd_id)) {
@@ -1611,7 +1639,7 @@ static void ddb_port_probe(struct ddb_port *port)
ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
}
- printk(KERN_INFO "Port %d (TAB %d): %s\n",
+ dev_info(&dev->pdev->dev, "Port %d (TAB %d): %s\n",
port->nr, port->nr+1, modname);
}
@@ -1765,7 +1793,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
wbuf += 4;
wlen -= 4;
ddbwritel(data, SPI_DATA);
- while (ddbreadl(SPI_CONTROL) & 0x0004)
+ while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004)
;
}
@@ -1785,7 +1813,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
if (shift)
data <<= shift;
ddbwritel(data, SPI_DATA);
- while (ddbreadl(SPI_CONTROL) & 0x0004)
+ while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004)
;
if (!rlen) {
@@ -1797,7 +1825,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
while (rlen > 4) {
ddbwritel(0xffffffff, SPI_DATA);
- while (ddbreadl(SPI_CONTROL) & 0x0004)
+ while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004)
;
data = ddbreadl(SPI_DATA);
*(u32 *) rbuf = swab32(data);
@@ -1806,7 +1834,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
}
ddbwritel(0x0003 | ((rlen << (8 + 3)) & 0x1F00), SPI_CONTROL);
ddbwritel(0xffffffff, SPI_DATA);
- while (ddbreadl(SPI_CONTROL) & 0x0004)
+ while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004)
;
data = ddbreadl(SPI_DATA);
@@ -1993,7 +2021,7 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
dev->info = (struct ddb_info *) id->driver_data;
- printk(KERN_INFO "DDBridge driver detected: %s\n", dev->info->name);
+ dev_info(&pdev->dev, "Detected %s\n", dev->info->name);
dev->regs = ioremap(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -2001,13 +2029,13 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
stat = -ENOMEM;
goto fail;
}
- printk(KERN_INFO "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4));
+ dev_info(&pdev->dev, "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4));
#ifdef CONFIG_PCI_MSI
if (pci_msi_enabled())
stat = pci_enable_msi(dev->pdev);
if (stat) {
- printk(KERN_INFO ": MSI not available.\n");
+ dev_info(&pdev->dev, "MSI not available.\n");
} else {
irq_flag = 0;
dev->msi = 1;
@@ -2040,7 +2068,7 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail1;
ddb_ports_init(dev);
if (ddb_buffers_alloc(dev) < 0) {
- printk(KERN_INFO ": Could not allocate buffer memory\n");
+ dev_err(&pdev->dev, "Could not allocate buffer memory\n");
goto fail2;
}
if (ddb_ports_attach(dev) < 0)
@@ -2050,19 +2078,19 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
fail3:
ddb_ports_detach(dev);
- printk(KERN_ERR "fail3\n");
+ dev_err(&pdev->dev, "fail3\n");
ddb_ports_release(dev);
fail2:
- printk(KERN_ERR "fail2\n");
+ dev_err(&pdev->dev, "fail2\n");
ddb_buffers_free(dev);
fail1:
- printk(KERN_ERR "fail1\n");
+ dev_err(&pdev->dev, "fail1\n");
if (dev->msi)
pci_disable_msi(dev->pdev);
if (stat == 0)
free_irq(dev->pdev->irq, dev);
fail:
- printk(KERN_ERR "fail\n");
+ dev_err(&pdev->dev, "fail\n");
ddb_unmap(dev);
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
@@ -2242,7 +2270,7 @@ static __init int module_init_ddbridge(void)
{
int ret;
- printk(KERN_INFO "Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n");
+ pr_info("Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n");
ret = ddb_class_create();
if (ret < 0)
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index ce69e648b663..8c92cb7f7e72 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -336,9 +336,9 @@ int ngene_command(struct ngene *dev, struct ngene_command *com)
{
int result;
- down(&dev->cmd_mutex);
+ mutex_lock(&dev->cmd_mutex);
result = ngene_command_mutex(dev, com);
- up(&dev->cmd_mutex);
+ mutex_unlock(&dev->cmd_mutex);
return result;
}
@@ -560,7 +560,6 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream,
u16 BsSPI = ((stream & 1) ? 0x9800 : 0x9700);
u16 BsSDO = 0x9B00;
- down(&dev->stream_mutex);
memset(&com, 0, sizeof(com));
com.cmd.hdr.Opcode = CMD_CONTROL;
com.cmd.hdr.Length = sizeof(struct FW_STREAM_CONTROL) - 2;
@@ -586,17 +585,13 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream,
chan->State = KSSTATE_ACQUIRE;
chan->HWState = HWSTATE_STOP;
spin_unlock_irq(&chan->state_lock);
- if (ngene_command(dev, &com) < 0) {
- up(&dev->stream_mutex);
+ if (ngene_command(dev, &com) < 0)
return -1;
- }
/* clear_buffers(chan); */
flush_buffers(chan);
- up(&dev->stream_mutex);
return 0;
}
spin_unlock_irq(&chan->state_lock);
- up(&dev->stream_mutex);
return 0;
}
@@ -692,11 +687,9 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream,
chan->HWState = HWSTATE_STARTUP;
spin_unlock_irq(&chan->state_lock);
- if (ngene_command(dev, &com) < 0) {
- up(&dev->stream_mutex);
+ if (ngene_command(dev, &com) < 0)
return -1;
- }
- up(&dev->stream_mutex);
+
return 0;
}
@@ -750,8 +743,11 @@ void set_transfer(struct ngene_channel *chan, int state)
/* else printk(KERN_INFO DEVICE_NAME ": lock=%08x\n",
ngreadl(0x9310)); */
+ mutex_lock(&dev->stream_mutex);
ret = ngene_command_stream_control(dev, chan->number,
control, mode, flags);
+ mutex_unlock(&dev->stream_mutex);
+
if (!ret)
chan->running = state;
else
@@ -1283,7 +1279,7 @@ static int ngene_load_firm(struct ngene *dev)
static void ngene_stop(struct ngene *dev)
{
- down(&dev->cmd_mutex);
+ mutex_destroy(&dev->cmd_mutex);
i2c_del_adapter(&(dev->channel[0].i2c_adapter));
i2c_del_adapter(&(dev->channel[1].i2c_adapter));
ngwritel(0, NGENE_INT_ENABLE);
@@ -1346,10 +1342,10 @@ static int ngene_start(struct ngene *dev)
init_waitqueue_head(&dev->cmd_wq);
init_waitqueue_head(&dev->tx_wq);
init_waitqueue_head(&dev->rx_wq);
- sema_init(&dev->cmd_mutex, 1);
- sema_init(&dev->stream_mutex, 1);
+ mutex_init(&dev->cmd_mutex);
+ mutex_init(&dev->stream_mutex);
sema_init(&dev->pll_mutex, 1);
- sema_init(&dev->i2c_switch_mutex, 1);
+ mutex_init(&dev->i2c_switch_mutex);
spin_lock_init(&dev->cmd_lock);
for (i = 0; i < MAX_STREAM; i++)
spin_lock_init(&dev->channel[i].state_lock);
@@ -1606,10 +1602,10 @@ static void ngene_unlink(struct ngene *dev)
com.in_len = 3;
com.out_len = 1;
- down(&dev->cmd_mutex);
+ mutex_lock(&dev->cmd_mutex);
ngwritel(0, NGENE_INT_ENABLE);
ngene_command_mutex(dev, &com);
- up(&dev->cmd_mutex);
+ mutex_unlock(&dev->cmd_mutex);
}
void ngene_shutdown(struct pci_dev *pdev)
diff --git a/drivers/media/pci/ngene/ngene-i2c.c b/drivers/media/pci/ngene/ngene-i2c.c
index cf39fcf54adf..fbf36353c701 100644
--- a/drivers/media/pci/ngene/ngene-i2c.c
+++ b/drivers/media/pci/ngene/ngene-i2c.c
@@ -118,7 +118,7 @@ static int ngene_i2c_master_xfer(struct i2c_adapter *adapter,
(struct ngene_channel *)i2c_get_adapdata(adapter);
struct ngene *dev = chan->dev;
- down(&dev->i2c_switch_mutex);
+ mutex_lock(&dev->i2c_switch_mutex);
ngene_i2c_set_bus(dev, chan->number);
if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD))
@@ -136,11 +136,11 @@ static int ngene_i2c_master_xfer(struct i2c_adapter *adapter,
msg[0].buf, msg[0].len, 0))
goto done;
- up(&dev->i2c_switch_mutex);
+ mutex_unlock(&dev->i2c_switch_mutex);
return -EIO;
done:
- up(&dev->i2c_switch_mutex);
+ mutex_unlock(&dev->i2c_switch_mutex);
return num;
}
diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h
index 10d8f74c4f0a..7c7cd217333d 100644
--- a/drivers/media/pci/ngene/ngene.h
+++ b/drivers/media/pci/ngene/ngene.h
@@ -762,10 +762,10 @@ struct ngene {
wait_queue_head_t cmd_wq;
int cmd_done;
- struct semaphore cmd_mutex;
- struct semaphore stream_mutex;
+ struct mutex cmd_mutex;
+ struct mutex stream_mutex;
struct semaphore pll_mutex;
- struct semaphore i2c_switch_mutex;
+ struct mutex i2c_switch_mutex;
int i2c_current_channel;
int i2c_current_bus;
spinlock_t cmd_lock;
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index 2a044be729da..e7bd2b8484e3 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -545,6 +545,7 @@ static int tw5864_fmt_vid_cap(struct file *file, void *priv,
switch (input->std) {
default:
WARN_ON_ONCE(1);
+ return -EINVAL;
case STD_NTSC:
f->fmt.pix.height = 480;
break;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 1313cd533436..fb1fa0b82077 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -475,8 +475,8 @@ config VIDEO_QCOM_VENUS
tristate "Qualcomm Venus V4L2 encoder/decoder driver"
depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
- select QCOM_MDT_LOADER if (ARM || ARM64)
- select QCOM_SCM if (ARM || ARM64)
+ select QCOM_MDT_LOADER if ARCH_QCOM
+ select QCOM_SCM if ARCH_QCOM
select VIDEOBUF2_DMA_SG
select V4L2_MEM2MEM_DEV
---help---
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 25cbf9e5ac5a..bba1eb43b5d8 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -393,8 +393,8 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
int ret;
int i;
- if (ctx->codec && (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
- ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264)) {
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
+ ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264) {
width = round_up(q_data->width, 16);
height = round_up(q_data->height, 16);
} else {
@@ -2198,7 +2198,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
ctx->display_idx = display_idx;
}
-static void coda_error_decode(struct coda_ctx *ctx)
+static void coda_decode_timeout(struct coda_ctx *ctx)
{
struct vb2_v4l2_buffer *dst_buf;
@@ -2223,7 +2223,7 @@ const struct coda_context_ops coda_bit_decode_ops = {
.start_streaming = coda_start_decoding,
.prepare_run = coda_prepare_decode,
.finish_run = coda_finish_decode,
- .error_run = coda_error_decode,
+ .run_timeout = coda_decode_timeout,
.seq_end_work = coda_seq_end_work,
.release = coda_bit_release,
};
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index f92cc7df58fb..829c7895a98a 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -1164,8 +1164,8 @@ static void coda_pic_run_work(struct work_struct *work)
coda_hw_reset(ctx);
- if (ctx->ops->error_run)
- ctx->ops->error_run(ctx);
+ if (ctx->ops->run_timeout)
+ ctx->ops->run_timeout(ctx);
} else if (!ctx->aborting) {
ctx->ops->finish_run(ctx);
}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 40fe22f0d757..c5f504d8cf67 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -183,7 +183,7 @@ struct coda_context_ops {
int (*start_streaming)(struct coda_ctx *ctx);
int (*prepare_run)(struct coda_ctx *ctx);
void (*finish_run)(struct coda_ctx *ctx);
- void (*error_run)(struct coda_ctx *ctx);
+ void (*run_timeout)(struct coda_ctx *ctx);
void (*seq_end_work)(struct work_struct *work);
void (*release)(struct coda_ctx *ctx);
};
diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/davinci/ccdc_hw_device.h
index 8f6688a7a111..f1b521045d64 100644
--- a/drivers/media/platform/davinci/ccdc_hw_device.h
+++ b/drivers/media/platform/davinci/ccdc_hw_device.h
@@ -42,16 +42,6 @@ struct ccdc_hw_ops {
int (*set_hw_if_params) (struct vpfe_hw_if_param *param);
/* get interface parameters */
int (*get_hw_if_params) (struct vpfe_hw_if_param *param);
- /*
- * Pointer to function to set parameters. Used
- * for implementing VPFE_S_CCDC_PARAMS
- */
- int (*set_params) (void *params);
- /*
- * Pointer to function to get parameter. Used
- * for implementing VPFE_G_CCDC_PARAMS
- */
- int (*get_params) (void *params);
/* Pointer to function to configure ccdc */
int (*configure) (void);
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index 73db166dc338..6d492dc4c3a9 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -17,12 +17,7 @@
* This module is for configuring DM355 CCD controller of VPFE to capture
* Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
* such as Defect Pixel Correction, Color Space Conversion etc to
- * pre-process the Bayer RGB data, before writing it to SDRAM. This
- * module also allows application to configure individual
- * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL.
- * To do so, application include dm355_ccdc.h and vpfe_capture.h header
- * files. The setparams() API is called by vpfe_capture driver
- * to configure module parameters
+ * pre-process the Bayer RGB data, before writing it to SDRAM.
*
* TODO: 1) Raw bayer parameter settings and bayer capture
* 2) Split module parameter structure to module specific ioctl structs
@@ -260,90 +255,6 @@ static void ccdc_setwin(struct v4l2_rect *image_win,
dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin...");
}
-static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam)
-{
- if (ccdcparam->datasft < CCDC_DATA_NO_SHIFT ||
- ccdcparam->datasft > CCDC_DATA_SHIFT_6BIT) {
- dev_dbg(ccdc_cfg.dev, "Invalid value of data shift\n");
- return -EINVAL;
- }
-
- if (ccdcparam->mfilt1 < CCDC_NO_MEDIAN_FILTER1 ||
- ccdcparam->mfilt1 > CCDC_MEDIAN_FILTER1) {
- dev_dbg(ccdc_cfg.dev, "Invalid value of median filter1\n");
- return -EINVAL;
- }
-
- if (ccdcparam->mfilt2 < CCDC_NO_MEDIAN_FILTER2 ||
- ccdcparam->mfilt2 > CCDC_MEDIAN_FILTER2) {
- dev_dbg(ccdc_cfg.dev, "Invalid value of median filter2\n");
- return -EINVAL;
- }
-
- if ((ccdcparam->med_filt_thres < 0) ||
- (ccdcparam->med_filt_thres > CCDC_MED_FILT_THRESH)) {
- dev_dbg(ccdc_cfg.dev,
- "Invalid value of median filter threshold\n");
- return -EINVAL;
- }
-
- if (ccdcparam->data_sz < CCDC_DATA_16BITS ||
- ccdcparam->data_sz > CCDC_DATA_8BITS) {
- dev_dbg(ccdc_cfg.dev, "Invalid value of data size\n");
- return -EINVAL;
- }
-
- if (ccdcparam->alaw.enable) {
- if (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_13_4 ||
- ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) {
- dev_dbg(ccdc_cfg.dev, "Invalid value of ALAW\n");
- return -EINVAL;
- }
- }
-
- if (ccdcparam->blk_clamp.b_clamp_enable) {
- if (ccdcparam->blk_clamp.sample_pixel < CCDC_SAMPLE_1PIXELS ||
- ccdcparam->blk_clamp.sample_pixel > CCDC_SAMPLE_16PIXELS) {
- dev_dbg(ccdc_cfg.dev,
- "Invalid value of sample pixel\n");
- return -EINVAL;
- }
- if (ccdcparam->blk_clamp.sample_ln < CCDC_SAMPLE_1LINES ||
- ccdcparam->blk_clamp.sample_ln > CCDC_SAMPLE_16LINES) {
- dev_dbg(ccdc_cfg.dev,
- "Invalid value of sample lines\n");
- return -EINVAL;
- }
- }
- return 0;
-}
-
-/* Parameter operations */
-static int ccdc_set_params(void __user *params)
-{
- struct ccdc_config_params_raw ccdc_raw_params;
- int x;
-
- /* only raw module parameters can be set through the IOCTL */
- if (ccdc_cfg.if_type != VPFE_RAW_BAYER)
- return -EINVAL;
-
- x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
- if (x) {
- dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying ccdcparams, %d\n",
- x);
- return -EFAULT;
- }
-
- if (!validate_ccdc_param(&ccdc_raw_params)) {
- memcpy(&ccdc_cfg.bayer.config_params,
- &ccdc_raw_params,
- sizeof(ccdc_raw_params));
- return 0;
- }
- return -EINVAL;
-}
-
/* This function will configure CCDC for YCbCr video capture */
static void ccdc_config_ycbcr(void)
{
@@ -939,7 +850,6 @@ static struct ccdc_hw_device ccdc_hw_dev = {
.enable = ccdc_enable,
.enable_out_to_sdram = ccdc_enable_output_to_sdram,
.set_hw_if_params = ccdc_set_hw_if_params,
- .set_params = ccdc_set_params,
.configure = ccdc_configure,
.set_buftype = ccdc_set_buftype,
.get_buftype = ccdc_get_buftype,
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index 740fbc7a8c14..3b2d8a9317b8 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -17,13 +17,9 @@
* This module is for configuring CCD controller of DM6446 VPFE to capture
* Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
* such as Defect Pixel Correction, Color Space Conversion etc to
- * pre-process the Raw Bayer RGB data, before writing it to SDRAM. This
- * module also allows application to configure individual
- * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL.
- * To do so, application includes dm644x_ccdc.h and vpfe_capture.h header
- * files. The setparams() API is called by vpfe_capture driver
- * to configure module parameters. This file is named DM644x so that other
- * variants such DM6443 may be supported using the same module.
+ * pre-process the Raw Bayer RGB data, before writing it to SDRAM.
+ * This file is named DM644x so that other variants such DM6443
+ * may be supported using the same module.
*
* TODO: Test Raw bayer parameter settings and bayer capture
* Split module parameter structure to module specific ioctl structs
@@ -216,96 +212,8 @@ static void ccdc_readregs(void)
dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val);
}
-static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam)
-{
- if (ccdcparam->alaw.enable) {
- u8 max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd);
- u8 max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
-
- if ((ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) ||
- (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_15_6) ||
- (max_gamma > max_data)) {
- dev_dbg(ccdc_cfg.dev, "\nInvalid data line select");
- return -1;
- }
- }
- return 0;
-}
-
-static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params)
-{
- struct ccdc_config_params_raw *config_params =
- &ccdc_cfg.bayer.config_params;
- unsigned int *fpc_virtaddr = NULL;
- unsigned int *fpc_physaddr = NULL;
-
- memcpy(config_params, raw_params, sizeof(*raw_params));
- /*
- * allocate memory for fault pixel table and copy the user
- * values to the table
- */
- if (!config_params->fault_pxl.enable)
- return 0;
-
- fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr;
- fpc_virtaddr = (unsigned int *)phys_to_virt(
- (unsigned long)fpc_physaddr);
- /*
- * Allocate memory for FPC table if current
- * FPC table buffer is not big enough to
- * accommodate FPC Number requested
- */
- if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) {
- if (fpc_physaddr != NULL) {
- free_pages((unsigned long)fpc_virtaddr,
- get_order
- (config_params->fault_pxl.fp_num *
- FP_NUM_BYTES));
- }
-
- /* Allocate memory for FPC table */
- fpc_virtaddr =
- (unsigned int *)__get_free_pages(GFP_KERNEL | GFP_DMA,
- get_order(raw_params->
- fault_pxl.fp_num *
- FP_NUM_BYTES));
-
- if (fpc_virtaddr == NULL) {
- dev_dbg(ccdc_cfg.dev,
- "\nUnable to allocate memory for FPC");
- return -EFAULT;
- }
- fpc_physaddr =
- (unsigned int *)virt_to_phys((void *)fpc_virtaddr);
- }
-
- /* Copy number of fault pixels and FPC table */
- config_params->fault_pxl.fp_num = raw_params->fault_pxl.fp_num;
- if (copy_from_user(fpc_virtaddr,
- (void __user *)raw_params->fault_pxl.fpc_table_addr,
- config_params->fault_pxl.fp_num * FP_NUM_BYTES)) {
- dev_dbg(ccdc_cfg.dev, "\n copy_from_user failed");
- return -EFAULT;
- }
- config_params->fault_pxl.fpc_table_addr = (unsigned long)fpc_physaddr;
- return 0;
-}
-
static int ccdc_close(struct device *dev)
{
- struct ccdc_config_params_raw *config_params =
- &ccdc_cfg.bayer.config_params;
- unsigned int *fpc_physaddr = NULL, *fpc_virtaddr = NULL;
-
- fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr;
-
- if (fpc_physaddr != NULL) {
- fpc_virtaddr = (unsigned int *)
- phys_to_virt((unsigned long)fpc_physaddr);
- free_pages((unsigned long)fpc_virtaddr,
- get_order(config_params->fault_pxl.fp_num *
- FP_NUM_BYTES));
- }
return 0;
}
@@ -339,29 +247,6 @@ static void ccdc_sbl_reset(void)
vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O);
}
-/* Parameter operations */
-static int ccdc_set_params(void __user *params)
-{
- struct ccdc_config_params_raw ccdc_raw_params;
- int x;
-
- if (ccdc_cfg.if_type != VPFE_RAW_BAYER)
- return -EINVAL;
-
- x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
- if (x) {
- dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copyingccdc params, %d\n",
- x);
- return -EFAULT;
- }
-
- if (!validate_ccdc_param(&ccdc_raw_params)) {
- if (!ccdc_update_raw_params(&ccdc_raw_params))
- return 0;
- }
- return -EINVAL;
-}
-
/*
* ccdc_config_ycbcr()
* This function will configure CCDC for YCbCr video capture
@@ -489,32 +374,6 @@ static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp)
regw(val, CCDC_BLKCMP);
}
-static void ccdc_config_fpc(struct ccdc_fault_pixel *fpc)
-{
- u32 val;
-
- /* Initially disable FPC */
- val = CCDC_FPC_DISABLE;
- regw(val, CCDC_FPC);
-
- if (!fpc->enable)
- return;
-
- /* Configure Fault pixel if needed */
- regw(fpc->fpc_table_addr, CCDC_FPC_ADDR);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%lx to FPC_ADDR...\n",
- (fpc->fpc_table_addr));
- /* Write the FPC params with FPC disable */
- val = fpc->fp_num & CCDC_FPC_FPC_NUM_MASK;
- regw(val, CCDC_FPC);
-
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val);
- /* read the FPC register */
- val = regr(CCDC_FPC) | CCDC_FPC_ENABLE;
- regw(val, CCDC_FPC);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val);
-}
-
/*
* ccdc_config_raw()
* This function will configure CCDC for Raw capture mode
@@ -569,9 +428,6 @@ static void ccdc_config_raw(void)
/* Configure Black level compensation */
ccdc_config_black_compense(&config_params->blk_comp);
- /* Configure Fault Pixel Correction */
- ccdc_config_fpc(&config_params->fault_pxl);
-
/* If data size is 8 bit then pack the data */
if ((config_params->data_sz == CCDC_DATA_8BITS) ||
config_params->alaw.enable)
@@ -929,7 +785,6 @@ static struct ccdc_hw_device ccdc_hw_dev = {
.reset = ccdc_sbl_reset,
.enable = ccdc_enable,
.set_hw_if_params = ccdc_set_hw_if_params,
- .set_params = ccdc_set_params,
.configure = ccdc_configure,
.set_buftype = ccdc_set_buftype,
.get_buftype = ccdc_get_buftype,
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index e3fe3e0635aa..b1bf4a7e8eb7 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -281,45 +281,6 @@ void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
EXPORT_SYMBOL(vpfe_unregister_ccdc_device);
/*
- * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
- */
-static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe_dev,
- struct v4l2_format *f)
-{
- struct v4l2_rect image_win;
- enum ccdc_buftype buf_type;
- enum ccdc_frmfmt frm_fmt;
-
- memset(f, 0, sizeof(*f));
- f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- ccdc_dev->hw_ops.get_image_window(&image_win);
- f->fmt.pix.width = image_win.width;
- f->fmt.pix.height = image_win.height;
- f->fmt.pix.bytesperline = ccdc_dev->hw_ops.get_line_length();
- f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
- f->fmt.pix.height;
- buf_type = ccdc_dev->hw_ops.get_buftype();
- f->fmt.pix.pixelformat = ccdc_dev->hw_ops.get_pixel_format();
- frm_fmt = ccdc_dev->hw_ops.get_frame_format();
- if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
- f->fmt.pix.field = V4L2_FIELD_NONE;
- else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
- if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
- f->fmt.pix.field = V4L2_FIELD_INTERLACED;
- else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED)
- f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
- else {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf_type\n");
- return -EINVAL;
- }
- } else {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid frm_fmt\n");
- return -EINVAL;
- }
- return 0;
-}
-
-/*
* vpfe_config_ccdc_image_format()
* For a pix format, configure ccdc to setup the capture
*/
@@ -1697,59 +1658,6 @@ unlock_out:
return ret;
}
-
-static long vpfe_param_handler(struct file *file, void *priv,
- bool valid_prio, unsigned int cmd, void *param)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- int ret;
-
- v4l2_dbg(2, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n");
-
- if (vpfe_dev->started) {
- /* only allowed if streaming is not started */
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "device already started\n");
- return -EBUSY;
- }
-
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- return ret;
-
- switch (cmd) {
- case VPFE_CMD_S_CCDC_RAW_PARAMS:
- v4l2_warn(&vpfe_dev->v4l2_dev,
- "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
- if (ccdc_dev->hw_ops.set_params) {
- ret = ccdc_dev->hw_ops.set_params(param);
- if (ret) {
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "Error setting parameters in CCDC\n");
- goto unlock_out;
- }
- ret = vpfe_get_ccdc_image_format(vpfe_dev,
- &vpfe_dev->fmt);
- if (ret < 0) {
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "Invalid image format at CCDC\n");
- goto unlock_out;
- }
- } else {
- ret = -EINVAL;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
- }
- break;
- default:
- ret = -ENOTTY;
- }
-unlock_out:
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-
/* vpfe capture ioctl operations */
static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
.vidioc_querycap = vpfe_querycap,
@@ -1772,7 +1680,6 @@ static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
.vidioc_cropcap = vpfe_cropcap,
.vidioc_g_selection = vpfe_g_selection,
.vidioc_s_selection = vpfe_s_selection,
- .vidioc_default = vpfe_param_handler,
};
static struct vpfe_device *vpfe_initialize(void)
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index d78580f9e431..4be6554c56c5 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -1719,7 +1719,6 @@ vpif_unregister:
*/
static int vpif_remove(struct platform_device *device)
{
- struct common_obj *common;
struct channel_obj *ch;
int i;
@@ -1730,7 +1729,6 @@ static int vpif_remove(struct platform_device *device)
for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
/* Get the pointer to the channel object */
ch = vpif_obj.dev[i];
- common = &ch->common[VPIF_VIDEO_INDEX];
/* Unregister video device */
video_unregister_device(&ch->video_dev);
kfree(vpif_obj.dev[i]);
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index b5ac6ce626b3..bf982bf86542 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -1339,7 +1339,6 @@ vpif_unregister:
*/
static int vpif_remove(struct platform_device *device)
{
- struct common_obj *common;
struct channel_obj *ch;
int i;
@@ -1350,7 +1349,6 @@ static int vpif_remove(struct platform_device *device)
for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
/* Get the pointer to the channel object */
ch = vpif_obj.dev[i];
- common = &ch->common[VPIF_VIDEO_INDEX];
/* Unregister video device */
video_unregister_device(&ch->video_dev);
kfree(vpif_obj.dev[i]);
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 92c4e1826356..45a553d4f5b2 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -16,7 +16,6 @@
#include <media/videobuf-dma-contig.h>
#include <media/v4l2-device.h>
-#include <linux/omap-dma.h>
#include <video/omapvrfb.h>
#include "omap_voutdef.h"
@@ -63,7 +62,7 @@ static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
/*
* Wakes up the application once the DMA transfer to VRFB space is completed.
*/
-static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
+static void omap_vout_vrfb_dma_tx_callback(void *data)
{
struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
@@ -94,6 +93,7 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
int ret = 0, i, j;
struct omap_vout_device *vout;
struct video_device *vfd;
+ dma_cap_mask_t mask;
int image_width, image_height;
int vrfb_num_bufs = VRFB_NUM_BUFS;
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
@@ -131,18 +131,27 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
/*
* Request and Initialize DMA, for DMA based VRFB transfer
*/
- vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
- vout->vrfb_dma_tx.dma_ch = -1;
- vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
- ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
- omap_vout_vrfb_dma_tx_callback,
- (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
- if (ret < 0) {
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_INTERLEAVE, mask);
+ vout->vrfb_dma_tx.chan = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(vout->vrfb_dma_tx.chan)) {
vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ } else {
+ size_t xt_size = sizeof(struct dma_interleaved_template) +
+ sizeof(struct data_chunk);
+
+ vout->vrfb_dma_tx.xt = kzalloc(xt_size, GFP_KERNEL);
+ if (!vout->vrfb_dma_tx.xt) {
+ dma_release_channel(vout->vrfb_dma_tx.chan);
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ }
+ }
+
+ if (vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED)
dev_info(&pdev->dev,
": failed to allocate DMA Channel for video%d\n",
vfd->minor);
- }
+
init_waitqueue_head(&vout->vrfb_dma_tx.wait);
/* statically allocated the VRFB buffer is done through
@@ -177,7 +186,9 @@ void omap_vout_release_vrfb(struct omap_vout_device *vout)
if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
- omap_free_dma(vout->vrfb_dma_tx.dma_ch);
+ kfree(vout->vrfb_dma_tx.xt);
+ dmaengine_terminate_sync(vout->vrfb_dma_tx.chan);
+ dma_release_channel(vout->vrfb_dma_tx.chan);
}
}
@@ -219,70 +230,84 @@ int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
}
int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
- struct videobuf_buffer *vb)
+ struct videobuf_buffer *vb)
{
- dma_addr_t dmabuf;
- struct vid_vrfb_dma *tx;
+ struct dma_async_tx_descriptor *tx;
+ enum dma_ctrl_flags flags;
+ struct dma_chan *chan = vout->vrfb_dma_tx.chan;
+ struct dma_device *dmadev = chan->device;
+ struct dma_interleaved_template *xt = vout->vrfb_dma_tx.xt;
+ dma_cookie_t cookie;
+ enum dma_status status;
enum dss_rotation rotation;
- u32 dest_frame_index = 0, src_element_index = 0;
- u32 dest_element_index = 0, src_frame_index = 0;
- u32 elem_count = 0, frame_count = 0, pixsize = 2;
+ size_t dst_icg;
+ u32 pixsize;
if (!is_rotation_enabled(vout))
return 0;
- dmabuf = vout->buf_phy_addr[vb->i];
/* If rotation is enabled, copy input buffer into VRFB
* memory space using DMA. We are copying input buffer
* into VRFB memory space of desired angle and DSS will
* read image VRFB memory for 0 degree angle
*/
+
pixsize = vout->bpp * vout->vrfb_bpp;
- /*
- * DMA transfer in double index mode
- */
+ dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) -
+ (vout->pix.width * vout->bpp)) + 1;
+
+ xt->src_start = vout->buf_phy_addr[vb->i];
+ xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
+
+ xt->numf = vout->pix.height;
+ xt->frame_size = 1;
+ xt->sgl[0].size = vout->pix.width * vout->bpp;
+ xt->sgl[0].icg = dst_icg;
+
+ xt->dir = DMA_MEM_TO_MEM;
+ xt->src_sgl = false;
+ xt->src_inc = true;
+ xt->dst_sgl = true;
+ xt->dst_inc = true;
+
+ tx = dmadev->device_prep_interleaved_dma(chan, xt, flags);
+ if (tx == NULL) {
+ pr_err("%s: DMA interleaved prep error\n", __func__);
+ return -EINVAL;
+ }
- /* Frame index */
- dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
- (vout->pix.width * vout->bpp)) + 1;
-
- /* Source and destination parameters */
- src_element_index = 0;
- src_frame_index = 0;
- dest_element_index = 1;
- /* Number of elements per frame */
- elem_count = vout->pix.width * vout->bpp;
- frame_count = vout->pix.height;
- tx = &vout->vrfb_dma_tx;
- tx->tx_status = 0;
- omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
- (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
- tx->dev_id, 0x0);
- /* src_port required only for OMAP1 */
- omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- dmabuf, src_element_index, src_frame_index);
- /*set dma source burst mode for VRFB */
- omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
- rotation = calc_rotation(vout);
+ tx->callback = omap_vout_vrfb_dma_tx_callback;
+ tx->callback_param = &vout->vrfb_dma_tx;
+
+ cookie = dmaengine_submit(tx);
+ if (dma_submit_error(cookie)) {
+ pr_err("%s: dmaengine_submit failed (%d)\n", __func__, cookie);
+ return -EINVAL;
+ }
- /* dest_port required only for OMAP1 */
- omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
- vout->vrfb_context[vb->i].paddr[0], dest_element_index,
- dest_frame_index);
- /*set dma dest burst mode for VRFB */
- omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
- omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
+ vout->vrfb_dma_tx.tx_status = 0;
+ dma_async_issue_pending(chan);
- omap_start_dma(tx->dma_ch);
- wait_event_interruptible_timeout(tx->wait, tx->tx_status == 1,
+ wait_event_interruptible_timeout(vout->vrfb_dma_tx.wait,
+ vout->vrfb_dma_tx.tx_status == 1,
VRFB_TX_TIMEOUT);
- if (tx->tx_status == 0) {
- omap_stop_dma(tx->dma_ch);
+ status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
+
+ if (vout->vrfb_dma_tx.tx_status == 0) {
+ pr_err("%s: Timeout while waiting for DMA\n", __func__);
+ dmaengine_terminate_sync(chan);
+ return -EINVAL;
+ } else if (status != DMA_COMPLETE) {
+ pr_err("%s: DMA completion %s status\n", __func__,
+ status == DMA_ERROR ? "error" : "busy");
+ dmaengine_terminate_sync(chan);
return -EINVAL;
}
+
/* Store buffers physical address into an array. Addresses
* from this array will be used to configure DSS */
+ rotation = calc_rotation(vout);
vout->queued_buf_addr[vb->i] = (u8 *)
vout->vrfb_context[vb->i].paddr[rotation];
return 0;
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
index 80c79fabdf95..56b630b1c8b4 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -14,6 +14,7 @@
#include <media/v4l2-ctrls.h>
#include <video/omapfb_dss.h>
#include <video/omapvrfb.h>
+#include <linux/dmaengine.h>
#define YUYV_BPP 2
#define RGB565_BPP 2
@@ -81,8 +82,9 @@ enum vout_rotaion_type {
* for VRFB hidden buffer
*/
struct vid_vrfb_dma {
- int dev_id;
- int dma_ch;
+ struct dma_chan *chan;
+ struct dma_interleaved_template *xt;
+
int req_status;
int tx_status;
wait_queue_head_t wait;
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 776d2bae6979..41eef376eb2d 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -76,7 +76,7 @@ static void venus_sys_error_handler(struct work_struct *work)
hfi_core_deinit(core, true);
hfi_destroy(core);
mutex_lock(&core->lock);
- venus_shutdown(&core->dev_fw);
+ venus_shutdown(core->dev);
pm_runtime_put_sync(core->dev);
@@ -84,7 +84,7 @@ static void venus_sys_error_handler(struct work_struct *work)
pm_runtime_get_sync(core->dev);
- ret |= venus_boot(core->dev, &core->dev_fw, core->res->fwname);
+ ret |= venus_boot(core->dev, core->res->fwname);
ret |= hfi_core_resume(core, true);
@@ -137,7 +137,7 @@ static int venus_clks_enable(struct venus_core *core)
return 0;
err:
- while (--i)
+ while (i--)
clk_disable_unprepare(core->clks[i]);
return ret;
@@ -207,7 +207,7 @@ static int venus_probe(struct platform_device *pdev)
if (ret < 0)
goto err_runtime_disable;
- ret = venus_boot(dev, &core->dev_fw, core->res->fwname);
+ ret = venus_boot(dev, core->res->fwname);
if (ret)
goto err_runtime_disable;
@@ -238,7 +238,7 @@ err_dev_unregister:
err_core_deinit:
hfi_core_deinit(core, false);
err_venus_shutdown:
- venus_shutdown(&core->dev_fw);
+ venus_shutdown(dev);
err_runtime_disable:
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
@@ -259,7 +259,7 @@ static int venus_remove(struct platform_device *pdev)
WARN_ON(ret);
hfi_destroy(core);
- venus_shutdown(&core->dev_fw);
+ venus_shutdown(dev);
of_platform_depopulate(dev);
pm_runtime_put_sync(dev);
@@ -270,8 +270,7 @@ static int venus_remove(struct platform_device *pdev)
return ret;
}
-#ifdef CONFIG_PM
-static int venus_runtime_suspend(struct device *dev)
+static __maybe_unused int venus_runtime_suspend(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
int ret;
@@ -283,7 +282,7 @@ static int venus_runtime_suspend(struct device *dev)
return ret;
}
-static int venus_runtime_resume(struct device *dev)
+static __maybe_unused int venus_runtime_resume(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
int ret;
@@ -302,7 +301,6 @@ err_clks_disable:
venus_clks_disable(core);
return ret;
}
-#endif
static const struct dev_pm_ops venus_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index e542700eee32..cba092bcb76d 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -101,7 +101,6 @@ struct venus_core {
struct device *dev;
struct device *dev_dec;
struct device *dev_enc;
- struct device dev_fw;
struct mutex lock;
struct list_head instances;
atomic_t insts_count;
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index 1b1a4f355918..521d4b36c090 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -12,97 +12,87 @@
*
*/
-#include <linux/dma-mapping.h>
+#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
+#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_reserved_mem.h>
-#include <linux/slab.h>
+#include <linux/of_address.h>
#include <linux/qcom_scm.h>
+#include <linux/sizes.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "firmware.h"
#define VENUS_PAS_ID 9
-#define VENUS_FW_MEM_SIZE SZ_8M
+#define VENUS_FW_MEM_SIZE (6 * SZ_1M)
-static void device_release_dummy(struct device *dev)
-{
- of_reserved_mem_device_release(dev);
-}
-
-int venus_boot(struct device *parent, struct device *fw_dev, const char *fwname)
+int venus_boot(struct device *dev, const char *fwname)
{
const struct firmware *mdt;
+ struct device_node *node;
phys_addr_t mem_phys;
+ struct resource r;
ssize_t fw_size;
size_t mem_size;
void *mem_va;
int ret;
- if (!qcom_scm_is_available())
+ if (!IS_ENABLED(CONFIG_QCOM_MDT_LOADER) || !qcom_scm_is_available())
return -EPROBE_DEFER;
- fw_dev->parent = parent;
- fw_dev->release = device_release_dummy;
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(dev, "no memory-region specified\n");
+ return -EINVAL;
+ }
- ret = dev_set_name(fw_dev, "%s:%s", dev_name(parent), "firmware");
+ ret = of_address_to_resource(node, 0, &r);
if (ret)
return ret;
- ret = device_register(fw_dev);
- if (ret < 0)
- return ret;
+ mem_phys = r.start;
+ mem_size = resource_size(&r);
- ret = of_reserved_mem_device_init_by_idx(fw_dev, parent->of_node, 0);
- if (ret)
- goto err_unreg_device;
+ if (mem_size < VENUS_FW_MEM_SIZE)
+ return -EINVAL;
- mem_size = VENUS_FW_MEM_SIZE;
-
- mem_va = dmam_alloc_coherent(fw_dev, mem_size, &mem_phys, GFP_KERNEL);
+ mem_va = memremap(r.start, mem_size, MEMREMAP_WC);
if (!mem_va) {
- ret = -ENOMEM;
- goto err_unreg_device;
+ dev_err(dev, "unable to map memory region: %pa+%zx\n",
+ &r.start, mem_size);
+ return -ENOMEM;
}
- ret = request_firmware(&mdt, fwname, fw_dev);
+ ret = request_firmware(&mdt, fwname, dev);
if (ret < 0)
- goto err_unreg_device;
+ goto err_unmap;
fw_size = qcom_mdt_get_size(mdt);
if (fw_size < 0) {
ret = fw_size;
release_firmware(mdt);
- goto err_unreg_device;
+ goto err_unmap;
}
- ret = qcom_mdt_load(fw_dev, mdt, fwname, VENUS_PAS_ID, mem_va, mem_phys,
+ ret = qcom_mdt_load(dev, mdt, fwname, VENUS_PAS_ID, mem_va, mem_phys,
mem_size);
release_firmware(mdt);
if (ret)
- goto err_unreg_device;
+ goto err_unmap;
ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID);
if (ret)
- goto err_unreg_device;
-
- return 0;
+ goto err_unmap;
-err_unreg_device:
- device_unregister(fw_dev);
+err_unmap:
+ memunmap(mem_va);
return ret;
}
-int venus_shutdown(struct device *fw_dev)
+int venus_shutdown(struct device *dev)
{
- int ret;
-
- ret = qcom_scm_pas_shutdown(VENUS_PAS_ID);
- device_unregister(fw_dev);
- memset(fw_dev, 0, sizeof(*fw_dev));
-
- return ret;
+ return qcom_scm_pas_shutdown(VENUS_PAS_ID);
}
diff --git a/drivers/media/platform/qcom/venus/firmware.h b/drivers/media/platform/qcom/venus/firmware.h
index f81a98979798..428efb56d339 100644
--- a/drivers/media/platform/qcom/venus/firmware.h
+++ b/drivers/media/platform/qcom/venus/firmware.h
@@ -16,8 +16,7 @@
struct device;
-int venus_boot(struct device *parent, struct device *fw_dev,
- const char *fwname);
-int venus_shutdown(struct device *fw_dev);
+int venus_boot(struct device *dev, const char *fwname);
+int venus_shutdown(struct device *dev);
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index f8841713e417..a681ae5381d6 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -239,11 +239,12 @@ static void hfi_sys_init_done(struct venus_core *core, struct venus_inst *inst,
break;
}
- if (!error) {
- rem_bytes -= read_bytes;
- data += read_bytes;
- num_properties--;
- }
+ if (error)
+ break;
+
+ rem_bytes -= read_bytes;
+ data += read_bytes;
+ num_properties--;
}
err_no_prop:
diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
index 7af66860d624..2cc289e4dea1 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
@@ -104,7 +104,7 @@ static void bdisp_dbg_dump_ins(struct seq_file *s, u32 val)
if (val & BLT_INS_IRQ)
seq_puts(s, "IRQ - ");
- seq_puts(s, "\n");
+ seq_putc(s, '\n');
}
static void bdisp_dbg_dump_tty(struct seq_file *s, u32 val)
@@ -153,7 +153,7 @@ static void bdisp_dbg_dump_tty(struct seq_file *s, u32 val)
if (val & BLT_TTY_BIG_END)
seq_puts(s, "BigEndian - ");
- seq_puts(s, "\n");
+ seq_putc(s, '\n');
}
static void bdisp_dbg_dump_xy(struct seq_file *s, u32 val, char *name)
@@ -230,7 +230,7 @@ static void bdisp_dbg_dump_sty(struct seq_file *s,
seq_puts(s, "BigEndian - ");
done:
- seq_puts(s, "\n");
+ seq_putc(s, '\n');
}
static void bdisp_dbg_dump_fctl(struct seq_file *s, u32 val)
@@ -247,7 +247,7 @@ static void bdisp_dbg_dump_fctl(struct seq_file *s, u32 val)
else if ((val & BLT_FCTL_HV_SCALE) == BLT_FCTL_HV_SAMPLE)
seq_puts(s, "Sample Chroma");
- seq_puts(s, "\n");
+ seq_putc(s, '\n');
}
static void bdisp_dbg_dump_rsf(struct seq_file *s, u32 val, char *name)
@@ -266,7 +266,7 @@ static void bdisp_dbg_dump_rsf(struct seq_file *s, u32 val, char *name)
seq_printf(s, "V: %d(6.10) / scale~%dx0.1", inc, 1024 * 10 / inc);
done:
- seq_puts(s, "\n");
+ seq_putc(s, '\n');
}
static void bdisp_dbg_dump_rzi(struct seq_file *s, u32 val, char *name)
@@ -281,7 +281,7 @@ static void bdisp_dbg_dump_rzi(struct seq_file *s, u32 val, char *name)
seq_printf(s, "V: init=%d repeat=%d", val & 0x3FF, (val >> 12) & 7);
done:
- seq_puts(s, "\n");
+ seq_putc(s, '\n');
}
static void bdisp_dbg_dump_ivmx(struct seq_file *s,
@@ -293,7 +293,7 @@ static void bdisp_dbg_dump_ivmx(struct seq_file *s,
seq_printf(s, "IVMX3\t0x%08X\t", c3);
if (!c0 && !c1 && !c2 && !c3) {
- seq_puts(s, "\n");
+ seq_putc(s, '\n');
return;
}
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index 14cb32e21130..88a1e5670c72 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -517,21 +517,22 @@ static int vimc_cap_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id vimc_cap_driver_ids[] = {
+ {
+ .name = VIMC_CAP_DRV_NAME,
+ },
+ { }
+};
+
static struct platform_driver vimc_cap_pdrv = {
.probe = vimc_cap_probe,
.remove = vimc_cap_remove,
+ .id_table = vimc_cap_driver_ids,
.driver = {
.name = VIMC_CAP_DRV_NAME,
},
};
-static const struct platform_device_id vimc_cap_driver_ids[] = {
- {
- .name = VIMC_CAP_DRV_NAME,
- },
- { }
-};
-
module_platform_driver(vimc_cap_pdrv);
MODULE_DEVICE_TABLE(platform, vimc_cap_driver_ids);
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
index 35b15bd4d61d..033a131f67af 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -577,21 +577,22 @@ static int vimc_deb_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id vimc_deb_driver_ids[] = {
+ {
+ .name = VIMC_DEB_DRV_NAME,
+ },
+ { }
+};
+
static struct platform_driver vimc_deb_pdrv = {
.probe = vimc_deb_probe,
.remove = vimc_deb_remove,
+ .id_table = vimc_deb_driver_ids,
.driver = {
.name = VIMC_DEB_DRV_NAME,
},
};
-static const struct platform_device_id vimc_deb_driver_ids[] = {
- {
- .name = VIMC_DEB_DRV_NAME,
- },
- { }
-};
-
module_platform_driver(vimc_deb_pdrv);
MODULE_DEVICE_TABLE(platform, vimc_deb_driver_ids);
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index fe77505d2679..0a3e086e12f3 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -431,21 +431,22 @@ static int vimc_sca_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id vimc_sca_driver_ids[] = {
+ {
+ .name = VIMC_SCA_DRV_NAME,
+ },
+ { }
+};
+
static struct platform_driver vimc_sca_pdrv = {
.probe = vimc_sca_probe,
.remove = vimc_sca_remove,
+ .id_table = vimc_sca_driver_ids,
.driver = {
.name = VIMC_SCA_DRV_NAME,
},
};
-static const struct platform_device_id vimc_sca_driver_ids[] = {
- {
- .name = VIMC_SCA_DRV_NAME,
- },
- { }
-};
-
module_platform_driver(vimc_sca_pdrv);
MODULE_DEVICE_TABLE(platform, vimc_sca_driver_ids);
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index ebdbbe8c05ed..615c2b18dcfc 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -365,21 +365,22 @@ static int vimc_sen_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id vimc_sen_driver_ids[] = {
+ {
+ .name = VIMC_SEN_DRV_NAME,
+ },
+ { }
+};
+
static struct platform_driver vimc_sen_pdrv = {
.probe = vimc_sen_probe,
.remove = vimc_sen_remove,
+ .id_table = vimc_sen_driver_ids,
.driver = {
.name = VIMC_SEN_DRV_NAME,
},
};
-static const struct platform_device_id vimc_sen_driver_ids[] = {
- {
- .name = VIMC_SEN_DRV_NAME,
- },
- { }
-};
-
module_platform_driver(vimc_sen_pdrv);
MODULE_DEVICE_TABLE(platform, vimc_sen_driver_ids);
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 7240223dc15a..17e82a9a0109 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -610,10 +610,21 @@ static int wl1273_fm_start(struct wl1273_device *radio, int new_mode)
}
}
- if (radio->rds_on)
+ if (radio->rds_on) {
r = core->write(core, WL1273_RDS_DATA_ENB, 1);
- else
+ if (r) {
+ dev_err(dev, "%s: RDS_DATA_ENB ON fails\n",
+ __func__);
+ goto fail;
+ }
+ } else {
r = core->write(core, WL1273_RDS_DATA_ENB, 0);
+ if (r) {
+ dev_err(dev, "%s: RDS_DATA_ENB OFF fails\n",
+ __func__);
+ goto fail;
+ }
+ }
} else {
dev_warn(dev, "%s: Illegal mode.\n", __func__);
}
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index a30af91710fe..d2223c04e9ad 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -266,7 +266,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
if (!dev->rx_resolution)
return -ENOTTY;
- val = dev->rx_resolution;
+ val = dev->rx_resolution / 1000;
break;
case LIRC_SET_WIDEBAND_RECEIVER:
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
index 192b1c7740df..145407dee3db 100644
--- a/drivers/media/tuners/fc0011.c
+++ b/drivers/media/tuners/fc0011.c
@@ -342,6 +342,7 @@ static int fc0011_set_params(struct dvb_frontend *fe)
switch (vco_sel) {
default:
WARN_ON(1);
+ return -EINVAL;
case 0:
if (vco_cal < 8) {
regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
index 353744fee053..dd59c2c0e4a5 100644
--- a/drivers/media/tuners/mxl5005s.c
+++ b/drivers/media/tuners/mxl5005s.c
@@ -2737,8 +2737,6 @@ static u16 MXL_TuneRF(struct dvb_frontend *fe, u32 RF_Freq)
status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0x0);
status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x7);
divider_val = 2 ;
- Fmax = FmaxBin ;
- Fmin = FminBin ;
}
/* TG_DIV_VAL */
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index 9ec919c68482..9d82ec0a4b64 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -351,7 +351,7 @@ int au0828_rc_register(struct au0828_dev *dev)
if (err)
goto error;
- pr_info("Remote controller %s initalized\n", ir->name);
+ pr_info("Remote controller %s initialized\n", ir->name);
return 0;
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 594360a63c18..a91fdad8f8d4 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -207,15 +207,13 @@ static int lme2510_stream_restart(struct dvb_usb_device *d)
struct lme2510_state *st = d->priv;
u8 all_pids[] = LME_ALL_PIDS;
u8 stream_on[] = LME_ST_ON_W;
- int ret;
u8 rbuff[1];
if (st->pid_off)
- ret = lme2510_usb_talk(d, all_pids, sizeof(all_pids),
- rbuff, sizeof(rbuff));
+ lme2510_usb_talk(d, all_pids, sizeof(all_pids),
+ rbuff, sizeof(rbuff));
/*Restart Stream Command*/
- ret = lme2510_usb_talk(d, stream_on, sizeof(stream_on),
- rbuff, sizeof(rbuff));
- return ret;
+ return lme2510_usb_talk(d, stream_on, sizeof(stream_on),
+ rbuff, sizeof(rbuff));
}
static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out)
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 08acdd32e412..bea1b4764a66 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -215,13 +215,14 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
USB_CTRL_GET_TIMEOUT);
if (result < 0) {
deb_info("i2c read error (status = %d)\n", result);
- break;
+ goto unlock;
}
if (msg[i].len > sizeof(st->buf)) {
deb_info("buffer too small to fit %d bytes\n",
msg[i].len);
- return -EIO;
+ result = -EIO;
+ goto unlock;
}
memcpy(msg[i].buf, st->buf, msg[i].len);
@@ -233,8 +234,8 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
/* Write request */
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- mutex_unlock(&d->i2c_mutex);
- return -EINTR;
+ result = -EINTR;
+ goto unlock;
}
st->buf[0] = REQUEST_NEW_I2C_WRITE;
st->buf[1] = msg[i].addr << 1;
@@ -247,7 +248,9 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
if (msg[i].len > sizeof(st->buf) - 4) {
deb_info("i2c message to big: %d\n",
msg[i].len);
- return -EIO;
+ mutex_unlock(&d->usb_mutex);
+ result = -EIO;
+ goto unlock;
}
/* The Actual i2c payload */
@@ -269,8 +272,11 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
}
}
}
+ result = i;
+
+unlock:
mutex_unlock(&d->i2c_mutex);
- return i;
+ return result;
}
/*
@@ -281,7 +287,7 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
struct dib0700_state *st = d->priv;
- int i,len;
+ int i, len, result;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EINTR;
@@ -298,7 +304,8 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
if (msg[i].len > sizeof(st->buf) - 2) {
deb_info("i2c xfer to big: %d\n",
msg[i].len);
- return -EIO;
+ result = -EIO;
+ goto unlock;
}
memcpy(&st->buf[2], msg[i].buf, msg[i].len);
@@ -313,13 +320,15 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
if (len <= 0) {
deb_info("I2C read failed on address 0x%02x\n",
msg[i].addr);
- break;
+ result = -EIO;
+ goto unlock;
}
if (msg[i + 1].len > sizeof(st->buf)) {
deb_info("i2c xfer buffer to small for %d\n",
msg[i].len);
- return -EIO;
+ result = -EIO;
+ goto unlock;
}
memcpy(msg[i + 1].buf, st->buf, msg[i + 1].len);
@@ -328,14 +337,17 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
i++;
} else {
st->buf[0] = REQUEST_I2C_WRITE;
- if (dib0700_ctrl_wr(d, st->buf, msg[i].len + 2) < 0)
- break;
+ result = dib0700_ctrl_wr(d, st->buf, msg[i].len + 2);
+ if (result < 0)
+ goto unlock;
}
}
+ result = i;
+unlock:
mutex_unlock(&d->usb_mutex);
mutex_unlock(&d->i2c_mutex);
- return i;
+ return result;
}
static int dib0700_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 146341aeb782..4c57fd7929cb 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -1193,6 +1193,22 @@ struct em28xx_board em28xx_boards[] = {
.i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
EM28XX_I2C_FREQ_400_KHZ,
},
+ [EM2884_BOARD_TERRATEC_H6] = {
+ .name = "Terratec Cinergy H6 rev. 2",
+ .has_dvb = 1,
+ .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS,
+#if 0
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .tuner_addr = 0x41,
+ .dvb_gpio = terratec_h5_digital, /* FIXME: probably wrong */
+ .tuner_gpio = terratec_h5_gpio,
+#else
+ .tuner_type = TUNER_ABSENT,
+#endif
+ .def_i2c_bus = 1,
+ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_400_KHZ,
+ },
[EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C] = {
.name = "Hauppauge WinTV HVR 930C",
.has_dvb = 1,
@@ -2496,6 +2512,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2884_BOARD_TERRATEC_H5 },
{ USB_DEVICE(0x0ccd, 0x10b6), /* H5 Rev. 3 */
.driver_info = EM2884_BOARD_TERRATEC_H5 },
+ { USB_DEVICE(0x0ccd, 0x10b2), /* H6 */
+ .driver_info = EM2884_BOARD_TERRATEC_H6 },
{ USB_DEVICE(0x0ccd, 0x0084),
.driver_info = EM2860_BOARD_TERRATEC_AV350 },
{ USB_DEVICE(0x0ccd, 0x0096),
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 82edd37f0d73..4a7db623fe29 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -1522,6 +1522,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
break;
case EM2884_BOARD_ELGATO_EYETV_HYBRID_2008:
case EM2884_BOARD_CINERGY_HTC_STICK:
+ case EM2884_BOARD_TERRATEC_H6:
terratec_htc_stick_init(dev);
/* attach demodulator */
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 8c472d5adb50..60b195c157b8 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -982,8 +982,6 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
dev_err(&dev->intf->dev,
"%s: em28xx_i2_eeprom failed! retval [%d]\n",
__func__, retval);
-
- return retval;
}
}
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index eba75736e654..ca9673917ad5 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -821,7 +821,7 @@ static int em28xx_ir_init(struct em28xx *dev)
if (err)
goto error;
- dev_info(&dev->intf->dev, "Input extension successfully initalized\n");
+ dev_info(&dev->intf->dev, "Input extension successfully initialized\n");
return 0;
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index e8d97d5ec161..88084f24f033 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -148,6 +148,7 @@
#define EM28178_BOARD_PLEX_PX_BCUD 98
#define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB 99
#define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 100
+#define EM2884_BOARD_TERRATEC_H6 101
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
index c843070f24c1..f9ed9c950247 100644
--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -51,7 +51,7 @@ MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver");
MODULE_LICENSE("GPL");
static int debug;
-static int persistent_config = 1;
+static int persistent_config;
module_param(debug, int, 0644);
module_param(persistent_config, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-1)");
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
index f203699e9c1b..65692576690f 100644
--- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
+++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
@@ -116,21 +116,19 @@ static void rain_irq_work_handler(struct work_struct *work)
while (true) {
unsigned long flags;
- bool exit_loop = false;
char data;
spin_lock_irqsave(&rain->buf_lock, flags);
- if (rain->buf_len) {
- data = rain->buf[rain->buf_rd_idx];
- rain->buf_len--;
- rain->buf_rd_idx = (rain->buf_rd_idx + 1) & 0xff;
- } else {
- exit_loop = true;
+ if (!rain->buf_len) {
+ spin_unlock_irqrestore(&rain->buf_lock, flags);
+ break;
}
- spin_unlock_irqrestore(&rain->buf_lock, flags);
- if (exit_loop)
- break;
+ data = rain->buf[rain->buf_rd_idx];
+ rain->buf_len--;
+ rain->buf_rd_idx = (rain->buf_rd_idx + 1) & 0xff;
+
+ spin_unlock_irqrestore(&rain->buf_lock, flags);
if (!rain->cmd_started && data != '?')
continue;
diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/media/usb/stkwebcam/stk-sensor.c
index 985af9933c7e..c1d4505f84ea 100644
--- a/drivers/media/usb/stkwebcam/stk-sensor.c
+++ b/drivers/media/usb/stkwebcam/stk-sensor.c
@@ -41,6 +41,8 @@
/* It seems the i2c bus is controlled with these registers */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "stk-webcam.h"
#define STK_IIC_BASE (0x0200)
@@ -239,8 +241,8 @@ static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
} while (tmpval == 0 && i < MAX_RETRIES);
if (tmpval != STK_IIC_STAT_TX_OK) {
if (tmpval)
- STK_ERROR("stk_sensor_outb failed, status=0x%02x\n",
- tmpval);
+ pr_err("stk_sensor_outb failed, status=0x%02x\n",
+ tmpval);
return 1;
} else
return 0;
@@ -262,8 +264,8 @@ static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
} while (tmpval == 0 && i < MAX_RETRIES);
if (tmpval != STK_IIC_STAT_RX_OK) {
if (tmpval)
- STK_ERROR("stk_sensor_inb failed, status=0x%02x\n",
- tmpval);
+ pr_err("stk_sensor_inb failed, status=0x%02x\n",
+ tmpval);
return 1;
}
@@ -366,29 +368,29 @@ int stk_sensor_init(struct stk_camera *dev)
if (stk_camera_write_reg(dev, STK_IIC_ENABLE, STK_IIC_ENABLE_YES)
|| stk_camera_write_reg(dev, STK_IIC_ADDR, SENSOR_ADDRESS)
|| stk_sensor_outb(dev, REG_COM7, COM7_RESET)) {
- STK_ERROR("Sensor resetting failed\n");
+ pr_err("Sensor resetting failed\n");
return -ENODEV;
}
msleep(10);
/* Read the manufacturer ID: ov = 0x7FA2 */
if (stk_sensor_inb(dev, REG_MIDH, &idh)
|| stk_sensor_inb(dev, REG_MIDL, &idl)) {
- STK_ERROR("Strange error reading sensor ID\n");
+ pr_err("Strange error reading sensor ID\n");
return -ENODEV;
}
if (idh != 0x7f || idl != 0xa2) {
- STK_ERROR("Huh? you don't have a sensor from ovt\n");
+ pr_err("Huh? you don't have a sensor from ovt\n");
return -ENODEV;
}
if (stk_sensor_inb(dev, REG_PID, &idh)
|| stk_sensor_inb(dev, REG_VER, &idl)) {
- STK_ERROR("Could not read sensor model\n");
+ pr_err("Could not read sensor model\n");
return -ENODEV;
}
stk_sensor_write_regvals(dev, ov_initvals);
msleep(10);
- STK_INFO("OmniVision sensor detected, id %02X%02X at address %x\n",
- idh, idl, SENSOR_ADDRESS);
+ pr_info("OmniVision sensor detected, id %02X%02X at address %x\n",
+ idh, idl, SENSOR_ADDRESS);
return 0;
}
@@ -520,7 +522,8 @@ int stk_sensor_configure(struct stk_camera *dev)
case MODE_SXGA: com7 = COM7_FMT_SXGA;
dummylines = 0;
break;
- default: STK_ERROR("Unsupported mode %d\n", dev->vsettings.mode);
+ default:
+ pr_err("Unsupported mode %d\n", dev->vsettings.mode);
return -EFAULT;
}
switch (dev->vsettings.palette) {
@@ -544,7 +547,8 @@ int stk_sensor_configure(struct stk_camera *dev)
com7 |= COM7_PBAYER;
rv = ov_fmt_bayer;
break;
- default: STK_ERROR("Unsupported colorspace\n");
+ default:
+ pr_err("Unsupported colorspace\n");
return -EFAULT;
}
/*FIXME sometimes the sensor go to a bad state
@@ -564,7 +568,7 @@ int stk_sensor_configure(struct stk_camera *dev)
switch (dev->vsettings.mode) {
case MODE_VGA:
if (stk_sensor_set_hw(dev, 302, 1582, 6, 486))
- STK_ERROR("stk_sensor_set_hw failed (VGA)\n");
+ pr_err("stk_sensor_set_hw failed (VGA)\n");
break;
case MODE_SXGA:
case MODE_CIF:
@@ -572,7 +576,7 @@ int stk_sensor_configure(struct stk_camera *dev)
case MODE_QCIF:
/*FIXME These settings seem ignored by the sensor
if (stk_sensor_set_hw(dev, 220, 1500, 10, 1034))
- STK_ERROR("stk_sensor_set_hw failed (SXGA)\n");
+ pr_err("stk_sensor_set_hw failed (SXGA)\n");
*/
break;
}
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 6e7fc36b658f..90d4a08cda31 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -18,6 +18,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -175,15 +177,15 @@ static int stk_start_stream(struct stk_camera *dev)
if (!is_present(dev))
return -ENODEV;
if (!is_memallocd(dev) || !is_initialised(dev)) {
- STK_ERROR("FIXME: Buffers are not allocated\n");
+ pr_err("FIXME: Buffers are not allocated\n");
return -EFAULT;
}
ret = usb_set_interface(dev->udev, 0, 5);
if (ret < 0)
- STK_ERROR("usb_set_interface failed !\n");
+ pr_err("usb_set_interface failed !\n");
if (stk_sensor_wakeup(dev))
- STK_ERROR("error awaking the sensor\n");
+ pr_err("error awaking the sensor\n");
stk_camera_read_reg(dev, 0x0116, &value_116);
stk_camera_read_reg(dev, 0x0117, &value_117);
@@ -224,9 +226,9 @@ static int stk_stop_stream(struct stk_camera *dev)
unset_streaming(dev);
if (usb_set_interface(dev->udev, 0, 0))
- STK_ERROR("usb_set_interface failed !\n");
+ pr_err("usb_set_interface failed !\n");
if (stk_sensor_sleep(dev))
- STK_ERROR("error suspending the sensor\n");
+ pr_err("error suspending the sensor\n");
}
return 0;
}
@@ -313,7 +315,7 @@ static void stk_isoc_handler(struct urb *urb)
dev = (struct stk_camera *) urb->context;
if (dev == NULL) {
- STK_ERROR("isoc_handler called with NULL device !\n");
+ pr_err("isoc_handler called with NULL device !\n");
return;
}
@@ -326,14 +328,13 @@ static void stk_isoc_handler(struct urb *urb)
spin_lock_irqsave(&dev->spinlock, flags);
if (urb->status != -EINPROGRESS && urb->status != 0) {
- STK_ERROR("isoc_handler: urb->status == %d\n", urb->status);
+ pr_err("isoc_handler: urb->status == %d\n", urb->status);
goto resubmit;
}
if (list_empty(&dev->sio_avail)) {
/*FIXME Stop streaming after a while */
- (void) (printk_ratelimit() &&
- STK_ERROR("isoc_handler without available buffer!\n"));
+ pr_err_ratelimited("isoc_handler without available buffer!\n");
goto resubmit;
}
fb = list_first_entry(&dev->sio_avail,
@@ -343,8 +344,8 @@ static void stk_isoc_handler(struct urb *urb)
for (i = 0; i < urb->number_of_packets; i++) {
if (urb->iso_frame_desc[i].status != 0) {
if (urb->iso_frame_desc[i].status != -EXDEV)
- STK_ERROR("Frame %d has error %d\n", i,
- urb->iso_frame_desc[i].status);
+ pr_err("Frame %d has error %d\n",
+ i, urb->iso_frame_desc[i].status);
continue;
}
framelen = urb->iso_frame_desc[i].actual_length;
@@ -368,9 +369,8 @@ static void stk_isoc_handler(struct urb *urb)
/* This marks a new frame */
if (fb->v4lbuf.bytesused != 0
&& fb->v4lbuf.bytesused != dev->frame_size) {
- (void) (printk_ratelimit() &&
- STK_ERROR("frame %d, bytesused=%d, skipping\n",
- i, fb->v4lbuf.bytesused));
+ pr_err_ratelimited("frame %d, bytesused=%d, skipping\n",
+ i, fb->v4lbuf.bytesused);
fb->v4lbuf.bytesused = 0;
fill = fb->buffer;
} else if (fb->v4lbuf.bytesused == dev->frame_size) {
@@ -395,8 +395,7 @@ static void stk_isoc_handler(struct urb *urb)
/* Our buffer is full !!! */
if (framelen + fb->v4lbuf.bytesused > dev->frame_size) {
- (void) (printk_ratelimit() &&
- STK_ERROR("Frame buffer overflow, lost sync\n"));
+ pr_err_ratelimited("Frame buffer overflow, lost sync\n");
/*FIXME Do something here? */
continue;
}
@@ -414,8 +413,8 @@ resubmit:
urb->dev = dev->udev;
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret != 0) {
- STK_ERROR("Error (%d) re-submitting urb in stk_isoc_handler.\n",
- ret);
+ pr_err("Error (%d) re-submitting urb in stk_isoc_handler\n",
+ ret);
}
}
@@ -433,32 +432,31 @@ static int stk_prepare_iso(struct stk_camera *dev)
udev = dev->udev;
if (dev->isobufs)
- STK_ERROR("isobufs already allocated. Bad\n");
+ pr_err("isobufs already allocated. Bad\n");
else
dev->isobufs = kcalloc(MAX_ISO_BUFS, sizeof(*dev->isobufs),
GFP_KERNEL);
if (dev->isobufs == NULL) {
- STK_ERROR("Unable to allocate iso buffers\n");
+ pr_err("Unable to allocate iso buffers\n");
return -ENOMEM;
}
for (i = 0; i < MAX_ISO_BUFS; i++) {
if (dev->isobufs[i].data == NULL) {
kbuf = kzalloc(ISO_BUFFER_SIZE, GFP_KERNEL);
if (kbuf == NULL) {
- STK_ERROR("Failed to allocate iso buffer %d\n",
- i);
+ pr_err("Failed to allocate iso buffer %d\n", i);
goto isobufs_out;
}
dev->isobufs[i].data = kbuf;
} else
- STK_ERROR("isobuf data already allocated\n");
+ pr_err("isobuf data already allocated\n");
if (dev->isobufs[i].urb == NULL) {
urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL);
if (urb == NULL)
goto isobufs_out;
dev->isobufs[i].urb = urb;
} else {
- STK_ERROR("Killing URB\n");
+ pr_err("Killing URB\n");
usb_kill_urb(dev->isobufs[i].urb);
urb = dev->isobufs[i].urb;
}
@@ -567,7 +565,7 @@ static int stk_prepare_sio_buffers(struct stk_camera *dev, unsigned n_sbufs)
{
int i;
if (dev->sio_bufs != NULL)
- STK_ERROR("sio_bufs already allocated\n");
+ pr_err("sio_bufs already allocated\n");
else {
dev->sio_bufs = kzalloc(n_sbufs * sizeof(struct stk_sio_buffer),
GFP_KERNEL);
@@ -690,7 +688,7 @@ static ssize_t stk_read(struct file *fp, char __user *buf,
spin_lock_irqsave(&dev->spinlock, flags);
if (list_empty(&dev->sio_full)) {
spin_unlock_irqrestore(&dev->spinlock, flags);
- STK_ERROR("BUG: No siobufs ready\n");
+ pr_err("BUG: No siobufs ready\n");
return 0;
}
sbuf = list_first_entry(&dev->sio_full, struct stk_sio_buffer, list);
@@ -907,7 +905,7 @@ static int stk_vidioc_g_fmt_vid_cap(struct file *filp,
stk_sizes[i].m != dev->vsettings.mode; i++)
;
if (i == ARRAY_SIZE(stk_sizes)) {
- STK_ERROR("ERROR: mode invalid\n");
+ pr_err("ERROR: mode invalid\n");
return -EINVAL;
}
pix_format->width = stk_sizes[i].w;
@@ -985,7 +983,7 @@ static int stk_setup_format(struct stk_camera *dev)
stk_sizes[i].m != dev->vsettings.mode)
i++;
if (i == ARRAY_SIZE(stk_sizes)) {
- STK_ERROR("Something is broken in %s\n", __func__);
+ pr_err("Something is broken in %s\n", __func__);
return -EFAULT;
}
/* This registers controls some timings, not sure of what. */
@@ -1241,7 +1239,7 @@ static void stk_v4l_dev_release(struct video_device *vd)
struct stk_camera *dev = vdev_to_camera(vd);
if (dev->sio_bufs != NULL || dev->isobufs != NULL)
- STK_ERROR("We are leaking memory\n");
+ pr_err("We are leaking memory\n");
usb_put_intf(dev->interface);
kfree(dev);
}
@@ -1264,10 +1262,10 @@ static int stk_register_video_device(struct stk_camera *dev)
video_set_drvdata(&dev->vdev, dev);
err = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1);
if (err)
- STK_ERROR("v4l registration failed\n");
+ pr_err("v4l registration failed\n");
else
- STK_INFO("Syntek USB2.0 Camera is now controlling device %s\n",
- video_device_node_name(&dev->vdev));
+ pr_info("Syntek USB2.0 Camera is now controlling device %s\n",
+ video_device_node_name(&dev->vdev));
return err;
}
@@ -1288,7 +1286,7 @@ static int stk_camera_probe(struct usb_interface *interface,
dev = kzalloc(sizeof(struct stk_camera), GFP_KERNEL);
if (dev == NULL) {
- STK_ERROR("Out of memory !\n");
+ pr_err("Out of memory !\n");
return -ENOMEM;
}
err = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
@@ -1352,7 +1350,7 @@ static int stk_camera_probe(struct usb_interface *interface,
}
}
if (!dev->isoc_ep) {
- STK_ERROR("Could not find isoc-in endpoint");
+ pr_err("Could not find isoc-in endpoint\n");
err = -ENODEV;
goto error;
}
@@ -1387,8 +1385,8 @@ static void stk_camera_disconnect(struct usb_interface *interface)
wake_up_interruptible(&dev->wait_frame);
- STK_INFO("Syntek USB2.0 Camera release resources device %s\n",
- video_device_node_name(&dev->vdev));
+ pr_info("Syntek USB2.0 Camera release resources device %s\n",
+ video_device_node_name(&dev->vdev));
video_unregister_device(&dev->vdev);
v4l2_ctrl_handler_free(&dev->hdl);
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/media/usb/stkwebcam/stk-webcam.h
index 0284120ce246..5cecbdc97573 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.h
+++ b/drivers/media/usb/stkwebcam/stk-webcam.h
@@ -31,12 +31,6 @@
#define ISO_MAX_FRAME_SIZE 3 * 1024
#define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE)
-
-#define PREFIX "stkwebcam: "
-#define STK_INFO(str, args...) printk(KERN_INFO PREFIX str, ##args)
-#define STK_ERROR(str, args...) printk(KERN_ERR PREFIX str, ##args)
-#define STK_WARNING(str, args...) printk(KERN_WARNING PREFIX str, ##args)
-
struct stk_iso_buf {
void *data;
int length;
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index e48b7c032c95..8db45dfc271b 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -43,8 +43,6 @@
#define UNSET (-1U)
-#define PREFIX (t->i2c->dev.driver->name)
-
/*
* Driver modprobe parameters
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 8621a198a2ce..bac33311f55a 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -216,6 +216,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
/*
+ * MEI requires to resume from runtime suspend mode
+ * in order to perform link reset flow upon system suspend.
+ */
+ pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+
+ /*
* For not wake-able HW runtime pm framework
* can't be used on pci device level.
* Use domain runtime pm callbacks instead.
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index f811cd524468..e38a5f144373 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -138,6 +138,12 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
/*
+ * MEI requires to resume from runtime suspend mode
+ * in order to perform link reset flow upon system suspend.
+ */
+ pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+
+ /*
* For not wake-able HW runtime pm framework
* can't be used on pci device level.
* Use domain runtime pm callbacks instead.
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 8ac59dc80f23..f1bbfd389367 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2170,6 +2170,9 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* from being accepted.
*/
card = md->queue.card;
+ spin_lock_irq(md->queue.queue->queue_lock);
+ queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
+ spin_unlock_irq(md->queue.queue->queue_lock);
blk_set_queue_dying(md->queue.queue);
mmc_cleanup_queue(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 4ffea14b7eb6..2bae69e39544 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1289,7 +1289,7 @@ out_err:
static int mmc_select_hs400es(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- int err = 0;
+ int err = -EINVAL;
u8 val;
if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index a9dfb26972f2..250dc6ec4c82 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2957,7 +2957,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
}
/* find out number of slots supported */
- if (device_property_read_u32(dev, "num-slots", &pdata->num_slots))
+ if (!device_property_read_u32(dev, "num-slots", &pdata->num_slots))
dev_info(dev, "'num-slots' was deprecated.\n");
if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 7c12f3715676..2ab4788d021f 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -356,9 +356,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
struct mmc_host *mmc = host->mmc;
int ret = 0;
- if (mmc_pdata(host)->set_power)
- return mmc_pdata(host)->set_power(host->dev, power_on, vdd);
-
/*
* If we don't see a Vcc regulator, assume it's a fixed
* voltage always-on regulator.
@@ -366,9 +363,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
if (IS_ERR(mmc->supply.vmmc))
return 0;
- if (mmc_pdata(host)->before_set_reg)
- mmc_pdata(host)->before_set_reg(host->dev, power_on, vdd);
-
ret = omap_hsmmc_set_pbias(host, false, 0);
if (ret)
return ret;
@@ -400,9 +394,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
return ret;
}
- if (mmc_pdata(host)->after_set_reg)
- mmc_pdata(host)->after_set_reg(host->dev, power_on, vdd);
-
return 0;
err_set_voltage:
@@ -469,8 +460,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
int ret;
struct mmc_host *mmc = host->mmc;
- if (mmc_pdata(host)->set_power)
- return 0;
ret = mmc_regulator_get_supply(mmc);
if (ret == -EPROBE_DEFER)
@@ -2097,7 +2086,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
mmc->max_seg_size = mmc->max_req_size;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
- MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
+ MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
mmc->caps |= mmc_pdata(host)->caps;
if (mmc->caps & MMC_CAP_8_BIT_DATA)
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 7611fd679f1a..1485530c3592 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -31,6 +31,7 @@
#define SDMMC_MC1R 0x204
#define SDMMC_MC1R_DDR BIT(3)
+#define SDMMC_MC1R_FCD BIT(7)
#define SDMMC_CACR 0x230
#define SDMMC_CACR_CAPWREN BIT(0)
#define SDMMC_CACR_KEY (0x46 << 8)
@@ -43,6 +44,15 @@ struct sdhci_at91_priv {
struct clk *mainck;
};
+static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
+{
+ u8 mc1r;
+
+ mc1r = readb(host->ioaddr + SDMMC_MC1R);
+ mc1r |= SDMMC_MC1R_FCD;
+ writeb(mc1r, host->ioaddr + SDMMC_MC1R);
+}
+
static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
{
u16 clk;
@@ -110,10 +120,18 @@ void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
sdhci_set_uhs_signaling(host, timing);
}
+static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
+{
+ sdhci_reset(host, mask);
+
+ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ sdhci_at91_set_force_card_detect(host);
+}
+
static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
.set_clock = sdhci_at91_set_clock,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = sdhci_at91_reset,
.set_uhs_signaling = sdhci_at91_set_uhs_signaling,
.set_power = sdhci_at91_set_power,
};
@@ -324,6 +342,21 @@ static int sdhci_at91_probe(struct platform_device *pdev)
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
}
+ /*
+ * If the device attached to the MMC bus is not removable, it is safer
+ * to set the Force Card Detect bit. People often don't connect the
+ * card detect signal and use this pin for another purpose. If the card
+ * detect pin is not muxed to SDHCI controller, a default value is
+ * used. This value can be different from a SoC revision to another
+ * one. Problems come when this default value is not card present. To
+ * avoid this case, if the device is non removable then the card
+ * detection procedure using the SDMCC_CD signal is bypassed.
+ * This bit is reset when a software reset for all command is performed
+ * so we need to implement our own reset function to set back this bit.
+ */
+ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ sdhci_at91_set_force_card_detect(host);
+
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index d6fa2214aaae..0fb4e4c119e1 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -793,8 +793,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
}
mmc_writel(host, REG_CLKCR, rval);
- if (host->cfg->needs_new_timings)
- mmc_writel(host, REG_SD_NTSR, SDXC_2X_TIMING_MODE);
+ if (host->cfg->needs_new_timings) {
+ /* Don't touch the delay bits */
+ rval = mmc_readl(host, REG_SD_NTSR);
+ rval |= SDXC_2X_TIMING_MODE;
+ mmc_writel(host, REG_SD_NTSR, rval);
+ }
ret = sunxi_mmc_clk_set_phase(host, ios, rate);
if (ret)
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f336a9b85576..9ec8f033ac5f 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -113,6 +113,7 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->writesect(dev, block, buf))
return BLK_STS_IOERR;
+ return BLK_STS_OK;
default:
return BLK_STS_IOERR;
}
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index d922a88e407f..2c8baa0c2c4e 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -1201,7 +1201,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
* tRC < 30ns implies EDO mode. This controller does not support this
* mode.
*/
- if (conf->timings.sdr.tRC_min < 30)
+ if (conf->timings.sdr.tRC_min < 30000)
return -ENOTSUPP;
atmel_smc_cs_conf_init(smcconf);
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
index 55a8ee5306ea..8c210a5776bc 100644
--- a/drivers/mtd/nand/atmel/pmecc.c
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -945,6 +945,7 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
*/
struct platform_device *pdev = to_platform_device(userdev);
const struct atmel_pmecc_caps *caps;
+ const struct of_device_id *match;
/* No PMECC engine available. */
if (!of_property_read_bool(userdev->of_node,
@@ -953,21 +954,11 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
caps = &at91sam9g45_caps;
- /*
- * Try to find the NFC subnode and extract the associated caps
- * from there.
- */
- np = of_find_compatible_node(userdev->of_node, NULL,
- "atmel,sama5d3-nfc");
- if (np) {
- const struct of_device_id *match;
-
- match = of_match_node(atmel_pmecc_legacy_match, np);
- if (match && match->data)
- caps = match->data;
-
- of_node_put(np);
- }
+ /* Find the caps associated to the NAND dev node. */
+ match = of_match_node(atmel_pmecc_legacy_match,
+ userdev->of_node);
+ if (match && match->data)
+ caps = match->data;
pmecc = atmel_pmecc_create(pdev, caps, 1, 2);
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 5fa5ddc94834..c6c18b82f8f4 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -65,8 +65,14 @@ static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
if (!section) {
oobregion->offset = 0;
- oobregion->length = 4;
+ if (mtd->oobsize == 16)
+ oobregion->length = 4;
+ else
+ oobregion->length = 3;
} else {
+ if (mtd->oobsize == 8)
+ return -ERANGE;
+
oobregion->offset = 6;
oobregion->length = ecc->total - 4;
}
@@ -1125,7 +1131,9 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
* Ensure the timing mode has been changed on the chip side
* before changing timings on the controller side.
*/
- if (chip->onfi_version) {
+ if (chip->onfi_version &&
+ (le16_to_cpu(chip->onfi_params.opt_cmd) &
+ ONFI_OPT_CMD_SET_GET_FEATURES)) {
u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
chip->onfi_timing_mode_default,
};
@@ -2741,7 +2749,6 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
* @buf: the data to write
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
- * @cached: cached programming
* @raw: use _raw version of write_page
*/
static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index f06312df3669..7e36d7d13c26 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -311,9 +311,9 @@ int onfi_init_data_interface(struct nand_chip *chip,
struct nand_sdr_timings *timings = &iface->timings.sdr;
/* microseconds -> picoseconds */
- timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog);
- timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers);
- timings->tR_max = 1000000UL * le16_to_cpu(params->t_r);
+ timings->tPROG_max = 1000000ULL * le16_to_cpu(params->t_prog);
+ timings->tBERS_max = 1000000ULL * le16_to_cpu(params->t_bers);
+ timings->tR_max = 1000000ULL * le16_to_cpu(params->t_r);
/* nanoseconds -> picoseconds */
timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs);
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index d0b6f8f9f297..6abd142b1324 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -1728,6 +1728,10 @@ static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
*/
chip->clk_rate = NSEC_PER_SEC / min_clk_period;
real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
+ if (real_clk_rate <= 0) {
+ dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate);
+ return -EINVAL;
+ }
/*
* ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 181839d6fbea..9bee6c1c70cc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2050,6 +2050,7 @@ static int bond_miimon_inspect(struct bonding *bond)
continue;
bond_propose_link_state(slave, BOND_LINK_FAIL);
+ commit++;
slave->delay = bond->params.downdelay;
if (slave->delay) {
netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
@@ -2088,6 +2089,7 @@ static int bond_miimon_inspect(struct bonding *bond)
continue;
bond_propose_link_state(slave, BOND_LINK_BACK);
+ commit++;
slave->delay = bond->params.updelay;
if (slave->delay) {
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1e46418a3b74..264b281eb86b 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -625,6 +625,44 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
* all finished.
*/
mt7623_pad_clk_setup(ds);
+ } else {
+ u16 lcl_adv = 0, rmt_adv = 0;
+ u8 flowctrl;
+ u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ mcr |= PMCR_FORCE_SPEED_1000;
+ break;
+ case SPEED_100:
+ mcr |= PMCR_FORCE_SPEED_100;
+ break;
+ };
+
+ if (phydev->link)
+ mcr |= PMCR_FORCE_LNK;
+
+ if (phydev->duplex) {
+ mcr |= PMCR_FORCE_FDX;
+
+ if (phydev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (phydev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ if (phydev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (phydev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+ if (flowctrl & FLOW_CTRL_TX)
+ mcr |= PMCR_TX_FC_EN;
+ if (flowctrl & FLOW_CTRL_RX)
+ mcr |= PMCR_RX_FC_EN;
+ }
+ mt7530_write(priv, MT7530_PMCR_P(port), mcr);
}
}
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index b83d76b99802..74db9822eb40 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -151,6 +151,7 @@ enum mt7530_stp_state {
#define PMCR_TX_FC_EN BIT(5)
#define PMCR_RX_FC_EN BIT(4)
#define PMCR_FORCE_SPEED_1000 BIT(3)
+#define PMCR_FORCE_SPEED_100 BIT(2)
#define PMCR_FORCE_FDX BIT(1)
#define PMCR_FORCE_LNK BIT(0)
#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 86058a9f3417..1d307f2def2d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1785,9 +1785,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
xgene_enet_gpiod_get(pdata);
- if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
- pdata->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pdata->clk)) {
+ pdata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pdata->clk)) {
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
/* Abort if the clock is defined but couldn't be
* retrived. Always abort if the clock is missing on
* DT system as the driver can't cope with this case.
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 041cfb7952f8..e94159507847 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev)
mac_mode |= HALF_DUPLEX;
if (gigabit) {
- if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_is_rgmii(dev->phydev))
mac_mode |= RGMII_MODE;
mac_mode |= GMAC_MODE;
@@ -1268,11 +1268,10 @@ static int nb8800_tangox_init(struct net_device *dev)
break;
case PHY_INTERFACE_MODE_RGMII:
- pad_mode = PAD_MODE_RGMII;
- break;
-
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
+ pad_mode = PAD_MODE_RGMII;
break;
default:
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index f411936b744c..a1125d10c825 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2368,6 +2368,7 @@ static int b44_init_one(struct ssb_device *sdev,
bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
spin_lock_init(&bp->lock);
+ u64_stats_init(&bp->hw_stats.syncp);
bp->rx_pending = B44_DEF_RX_RING_PENDING;
bp->tx_pending = B44_DEF_TX_RING_PENDING;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5333601f855f..dc3052751bc1 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -449,6 +449,10 @@ static void bcm_sysport_get_stats(struct net_device *dev,
p = (char *)&dev->stats;
else
p = (char *)priv;
+
+ if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
+ continue;
+
p += s->stat_offset;
data[j] = *(unsigned long *)p;
j++;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 7b0b399aaedd..a981c4ee9d72 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3669,7 +3669,7 @@ static int bcmgenet_resume(struct device *d)
phy_init_hw(priv->phydev);
/* Speed settings must be restored */
- bcmgenet_mii_config(priv->dev);
+ bcmgenet_mii_config(priv->dev, false);
/* disable ethernet MAC while updating its registers */
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index b9344de669f8..3a34fdba5301 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -698,7 +698,7 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
/* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_config(struct net_device *dev, bool init);
int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
void bcmgenet_mii_reset(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 071fcbd14e6a..30cb97b4a1d7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -238,7 +238,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
bcmgenet_fixed_phy_link_update);
}
-int bcmgenet_mii_config(struct net_device *dev)
+int bcmgenet_mii_config(struct net_device *dev, bool init)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
@@ -327,7 +327,8 @@ int bcmgenet_mii_config(struct net_device *dev)
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
- dev_info_once(kdev, "configuring instance for %s\n", phy_name);
+ if (init)
+ dev_info(kdev, "configuring instance for %s\n", phy_name);
return 0;
}
@@ -375,7 +376,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
* PHY speed which is needed for bcmgenet_mii_config() to configure
* things appropriately.
*/
- ret = bcmgenet_mii_config(dev);
+ ret = bcmgenet_mii_config(dev, true);
if (ret) {
phy_disconnect(priv->phydev);
return ret;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 79112563a25a..5e5c4d7796b8 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -292,11 +292,30 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
u64 cmr_cfg;
u64 port_cfg = 0;
u64 misc_ctl = 0;
+ bool tx_en, rx_en;
cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
- cmr_cfg &= ~CMR_EN;
+ tx_en = cmr_cfg & CMR_PKT_TX_EN;
+ rx_en = cmr_cfg & CMR_PKT_RX_EN;
+ cmr_cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+ /* Wait for BGX RX to be idle */
+ if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
+ GMI_PORT_CFG_RX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI RX not idle\n",
+ bgx->bgx_id, lmac->lmacid);
+ return;
+ }
+
+ /* Wait for BGX TX to be idle */
+ if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
+ GMI_PORT_CFG_TX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI TX not idle\n",
+ bgx->bgx_id, lmac->lmacid);
+ return;
+ }
+
port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
@@ -347,10 +366,8 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
- port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
-
- /* Re-enable lmac */
- cmr_cfg |= CMR_EN;
+ /* Restore CMR config settings */
+ cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0);
bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 6b7fe6fdd13b..23acdc5ab896 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -170,6 +170,8 @@
#define GMI_PORT_CFG_DUPLEX BIT_ULL(2)
#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3)
#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
+#define GMI_PORT_CFG_RX_IDLE BIT_ULL(12)
+#define GMI_PORT_CFG_TX_IDLE BIT_ULL(13)
#define BGX_GMP_GMI_RXX_JABBER 0x38038
#define BGX_GMP_GMI_TXX_THRESH 0x38210
#define BGX_GMP_GMI_TXX_APPEND 0x38218
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 95bf5e89cfd1..34dae51effd4 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -125,7 +125,7 @@ static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
priv->base + FTGMAC100_OFFSET_MACCR);
- for (i = 0; i < 50; i++) {
+ for (i = 0; i < 200; i++) {
unsigned int maccr;
maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
@@ -392,7 +392,7 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
struct net_device *netdev = priv->netdev;
struct sk_buff *skb;
dma_addr_t map;
- int err;
+ int err = 0;
skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
if (unlikely(!skb)) {
@@ -428,7 +428,7 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
else
rxdes->rxdes0 = 0;
- return 0;
+ return err;
}
static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv,
@@ -1682,6 +1682,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
priv->mii_bus->name = "ftgmac100_mdio";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
pdev->name, pdev->id);
+ priv->mii_bus->parent = priv->dev;
priv->mii_bus->priv = priv->netdev;
priv->mii_bus->read = ftgmac100_mdiobus_read;
priv->mii_bus->write = ftgmac100_mdiobus_write;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a3e694679635..c45e8e3b82d3 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -111,6 +111,7 @@ static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
static void send_request_unmap(struct ibmvnic_adapter *, u8);
static void send_login(struct ibmvnic_adapter *adapter);
static void send_cap_queries(struct ibmvnic_adapter *adapter);
+static int init_sub_crqs(struct ibmvnic_adapter *);
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
static int ibmvnic_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *);
@@ -651,6 +652,7 @@ static int ibmvnic_login(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
unsigned long timeout = msecs_to_jiffies(30000);
struct device *dev = &adapter->vdev->dev;
+ int rc;
do {
if (adapter->renegotiate) {
@@ -664,6 +666,18 @@ static int ibmvnic_login(struct net_device *netdev)
dev_err(dev, "Capabilities query timeout\n");
return -1;
}
+ rc = init_sub_crqs(adapter);
+ if (rc) {
+ dev_err(dev,
+ "Initialization of SCRQ's failed\n");
+ return -1;
+ }
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(dev,
+ "Initialization of SCRQ's irqs failed\n");
+ return -1;
+ }
}
reinit_completion(&adapter->init_done);
@@ -3004,7 +3018,6 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
*req_value,
(long int)be64_to_cpu(crq->request_capability_rsp.
number), name);
- release_sub_crqs(adapter);
*req_value = be64_to_cpu(crq->request_capability_rsp.number);
ibmvnic_send_req_caps(adapter, 1);
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b936febc315a..2194960d5855 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1113,6 +1113,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
if (!tx_ring->tx_bi)
goto err;
+ u64_stats_init(&tx_ring->syncp);
+
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
/* add u32 for head writeback, align after this takes care of
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 084c53582793..032f8ac06357 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2988,6 +2988,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
if (!tx_ring->tx_buffer_info)
goto err;
+ u64_stats_init(&tx_ring->syncp);
+
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -3046,6 +3048,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
if (!rx_ring->rx_buffer_info)
goto err;
+ u64_stats_init(&rx_ring->syncp);
+
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 5794d98d946f..9c94ea9b2b80 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2734,7 +2734,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
ppd.shared = pdev;
memset(&res, 0, sizeof(res));
- if (!of_irq_to_resource(pnp, 0, &res)) {
+ if (of_irq_to_resource(pnp, 0, &res) <= 0) {
dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b3d0c2e6347a..e588a0cdb074 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -22,6 +22,7 @@
#include <linux/if_vlan.h>
#include <linux/reset.h>
#include <linux/tcp.h>
+#include <linux/interrupt.h>
#include "mtk_eth_soc.h"
@@ -947,6 +948,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
RX_DMA_FPORT_MASK;
mac--;
+ if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
+ !eth->netdev[mac]))
+ goto release_desc;
+
netdev = eth->netdev[mac];
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c751a1d434ad..3d4e4a5d00d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -223,6 +223,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct mlx4_caps *caps = &priv->mdev->dev->caps;
int err = 0;
u64 config = 0;
u64 mask;
@@ -235,24 +236,24 @@ static void mlx4_en_get_wol(struct net_device *netdev,
mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
MLX4_DEV_CAP_FLAG_WOL_PORT2;
- if (!(priv->mdev->dev->caps.flags & mask)) {
+ if (!(caps->flags & mask)) {
wol->supported = 0;
wol->wolopts = 0;
return;
}
+ if (caps->wol_port[priv->port])
+ wol->supported = WAKE_MAGIC;
+ else
+ wol->supported = 0;
+
err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
if (err) {
en_err(priv, "Failed to get WoL information\n");
return;
}
- if (config & MLX4_EN_WOL_MAGIC)
- wol->supported = WAKE_MAGIC;
- else
- wol->supported = 0;
-
- if (config & MLX4_EN_WOL_ENABLED)
+ if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
wol->wolopts = WAKE_MAGIC;
else
wol->wolopts = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 436f7689a032..bf1638044a7a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -574,16 +574,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
* header, the HW adds it. To address that, we are subtracting the pseudo
* header checksum from the checksum value provided by the HW.
*/
-static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
- struct iphdr *iph)
+static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
+ struct iphdr *iph)
{
__u16 length_for_csum = 0;
__wsum csum_pseudo_header = 0;
+ __u8 ipproto = iph->protocol;
+
+ if (unlikely(ipproto == IPPROTO_SCTP))
+ return -1;
length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
- length_for_csum, iph->protocol, 0);
+ length_for_csum, ipproto, 0);
skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
+ return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -594,17 +599,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
struct ipv6hdr *ipv6h)
{
+ __u8 nexthdr = ipv6h->nexthdr;
__wsum csum_pseudo_hdr = 0;
- if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
- ipv6h->nexthdr == IPPROTO_HOPOPTS))
+ if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
+ nexthdr == IPPROTO_HOPOPTS ||
+ nexthdr == IPPROTO_SCTP))
return -1;
- hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
+ hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
- csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));
+ csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
+ (__force __wsum)htons(nexthdr));
skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
@@ -627,11 +635,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
}
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
- get_fixed_ipv4_csum(hw_checksum, skb, hdr);
+ return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6)
- else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
- if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
- return -1;
+ if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
+ return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
#endif
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 37e84a59e751..041c0ed65929 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -159,8 +159,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[32] = "Loopback source checks support",
[33] = "RoCEv2 support",
[34] = "DMFS Sniffer support (UC & MC)",
- [35] = "QinQ VST mode support",
- [36] = "sl to vl mapping table change event support"
+ [35] = "Diag counters per port",
+ [36] = "QinQ VST mode support",
+ [37] = "sl to vl mapping table change event support",
};
int i;
@@ -764,6 +765,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
+#define QUERY_DEV_CAP_WOL_OFFSET 0x43
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@@ -920,6 +922,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
dev_cap->flags = flags | (u64)ext_flags << 32;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
+ dev_cap->wol_port[1] = !!(field & 0x20);
+ dev_cap->wol_port[2] = !!(field & 0x40);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
dev_cap->reserved_uars = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 5343a0599253..b52ba01aa486 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -129,6 +129,7 @@ struct mlx4_dev_cap {
u32 dmfs_high_rate_qpn_range;
struct mlx4_rate_limit_caps rl_caps;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
+ bool wol_port[MLX4_MAX_PORTS + 1];
};
struct mlx4_func_cap {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index a27c9c13a36e..09b9bc17bce9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -424,6 +424,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
+ dev->caps.wol_port[1] = dev_cap->wol_port[1];
+ dev->caps.wol_port[2] = dev_cap->wol_port[2];
/* Save uar page shift */
if (!mlx4_is_slave(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index f5a2c605749f..31cbe5e86a01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -786,6 +786,10 @@ static void cb_timeout_handler(struct work_struct *work)
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
}
+static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
+static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_msg *msg);
+
static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
@@ -796,17 +800,28 @@ static void cmd_work_handler(struct work_struct *work)
struct semaphore *sem;
unsigned long flags;
bool poll_cmd = ent->polling;
+ int alloc_ret;
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
if (!ent->page_queue) {
- ent->idx = alloc_ent(cmd);
- if (ent->idx < 0) {
+ alloc_ret = alloc_ent(cmd);
+ if (alloc_ret < 0) {
mlx5_core_err(dev, "failed to allocate command entry\n");
+ if (ent->callback) {
+ ent->callback(-EAGAIN, ent->context);
+ mlx5_free_cmd_msg(dev, ent->out);
+ free_msg(dev, ent->in);
+ free_cmd(ent);
+ } else {
+ ent->ret = -EAGAIN;
+ complete(&ent->done);
+ }
up(sem);
return;
}
+ ent->idx = alloc_ret;
} else {
ent->idx = cmd->max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
@@ -967,7 +982,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
err = wait_func(dev, ent);
if (err == -ETIMEDOUT)
- goto out_free;
+ goto out;
ds = ent->ts2 - ent->ts1;
op = MLX5_GET(mbox_in, in->first.data, opcode);
@@ -1430,6 +1445,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
ent->idx);
free_ent(cmd, ent->idx);
+ free_cmd(ent);
}
continue;
}
@@ -1488,7 +1504,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
free_msg(dev, ent->in);
err = err ? err : ent->status;
- free_cmd(ent);
+ if (!forced)
+ free_cmd(ent);
callback(err, context);
} else {
complete(&ent->done);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index e1b7ddfecd01..0039b4725405 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -266,6 +266,14 @@ struct mlx5e_dcbx {
};
#endif
+#define MAX_PIN_NUM 8
+struct mlx5e_pps {
+ u8 pin_caps[MAX_PIN_NUM];
+ struct work_struct out_work;
+ u64 start[MAX_PIN_NUM];
+ u8 enabled;
+};
+
struct mlx5e_tstamp {
rwlock_t lock;
struct cyclecounter cycles;
@@ -277,7 +285,7 @@ struct mlx5e_tstamp {
struct mlx5_core_dev *mdev;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
- u8 *pps_pin_caps;
+ struct mlx5e_pps pps_info;
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 66f432385dbb..84dd63e74041 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -53,6 +53,15 @@ enum {
MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
};
+enum {
+ MLX5E_MTPPS_FS_ENABLE = BIT(0x0),
+ MLX5E_MTPPS_FS_PATTERN = BIT(0x2),
+ MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3),
+ MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4),
+ MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
+ MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
+};
+
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
struct skb_shared_hwtstamps *hwts)
{
@@ -73,17 +82,46 @@ static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
}
+static void mlx5e_pps_out(struct work_struct *work)
+{
+ struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
+ out_work);
+ struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp,
+ pps_info);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
+ u64 tstart;
+
+ write_lock_irqsave(&tstamp->lock, flags);
+ tstart = tstamp->pps_info.start[i];
+ tstamp->pps_info.start[i] = 0;
+ write_unlock_irqrestore(&tstamp->lock, flags);
+ if (!tstart)
+ continue;
+
+ MLX5_SET(mtpps_reg, in, pin, i);
+ MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
+ MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP);
+ mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
+ }
+}
+
static void mlx5e_timestamp_overflow(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
overflow_work);
+ struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
unsigned long flags;
write_lock_irqsave(&tstamp->lock, flags);
timecounter_read(&tstamp->clock);
write_unlock_irqrestore(&tstamp->lock, flags);
- schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
+ queue_delayed_work(priv->wq, &tstamp->overflow_work,
+ msecs_to_jiffies(tstamp->overflow_period * 1000));
}
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
@@ -213,18 +251,6 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
int neg_adj = 0;
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
- struct mlx5e_priv *priv =
- container_of(tstamp, struct mlx5e_priv, tstamp);
-
- if (MLX5_CAP_GEN(priv->mdev, pps_modify)) {
- u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
-
- /* For future use need to add a loop for finding all 1PPS out pins */
- MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
- MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF);
-
- mlx5_set_mtpps(priv->mdev, in, sizeof(in));
- }
if (delta < 0) {
neg_adj = 1;
@@ -253,12 +279,13 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ u32 field_select = 0;
+ u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1;
int err = 0;
- if (!MLX5_CAP_GEN(priv->mdev, pps) ||
- !MLX5_CAP_GEN(priv->mdev, pps_modify))
+ if (!MLX5_PPS_CAP(priv->mdev))
return -EOPNOTSUPP;
if (rq->extts.index >= tstamp->ptp_info.n_pins)
@@ -268,15 +295,21 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0)
return -EBUSY;
+ pin_mode = MLX5E_PIN_MODE_IN;
+ pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
+ field_select = MLX5E_MTPPS_FS_PIN_MODE |
+ MLX5E_MTPPS_FS_PATTERN |
+ MLX5E_MTPPS_FS_ENABLE;
+ } else {
+ pin = rq->extts.index;
+ field_select = MLX5E_MTPPS_FS_ENABLE;
}
- if (rq->extts.flags & PTP_FALLING_EDGE)
- pattern = 1;
-
MLX5_SET(mtpps_reg, in, pin, pin);
- MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN);
+ MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
MLX5_SET(mtpps_reg, in, pattern, pattern);
MLX5_SET(mtpps_reg, in, enable, on);
+ MLX5_SET(mtpps_reg, in, field_select, field_select);
err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
if (err)
@@ -295,14 +328,18 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- u64 nsec_now, nsec_delta, time_stamp;
+ u64 nsec_now, nsec_delta, time_stamp = 0;
u64 cycles_now, cycles_delta;
struct timespec64 ts;
unsigned long flags;
+ u32 field_select = 0;
+ u8 pin_mode = 0;
+ u8 pattern = 0;
int pin = -1;
+ int err = 0;
s64 ns;
- if (!MLX5_CAP_GEN(priv->mdev, pps_modify))
+ if (!MLX5_PPS_CAP(priv->mdev))
return -EOPNOTSUPP;
if (rq->perout.index >= tstamp->ptp_info.n_pins)
@@ -313,32 +350,60 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
rq->perout.index);
if (pin < 0)
return -EBUSY;
- }
- ts.tv_sec = rq->perout.period.sec;
- ts.tv_nsec = rq->perout.period.nsec;
- ns = timespec64_to_ns(&ts);
- if (on)
+ pin_mode = MLX5E_PIN_MODE_OUT;
+ pattern = MLX5E_OUT_PATTERN_PERIODIC;
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ ns = timespec64_to_ns(&ts);
+
if ((ns >> 1) != 500000000LL)
return -EINVAL;
- ts.tv_sec = rq->perout.start.sec;
- ts.tv_nsec = rq->perout.start.nsec;
- ns = timespec64_to_ns(&ts);
- cycles_now = mlx5_read_internal_timer(tstamp->mdev);
- write_lock_irqsave(&tstamp->lock, flags);
- nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
- nsec_delta = ns - nsec_now;
- cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
- tstamp->cycles.mult);
- write_unlock_irqrestore(&tstamp->lock, flags);
- time_stamp = cycles_now + cycles_delta;
+
+ ts.tv_sec = rq->perout.start.sec;
+ ts.tv_nsec = rq->perout.start.nsec;
+ ns = timespec64_to_ns(&ts);
+ cycles_now = mlx5_read_internal_timer(tstamp->mdev);
+ write_lock_irqsave(&tstamp->lock, flags);
+ nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
+ nsec_delta = ns - nsec_now;
+ cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
+ tstamp->cycles.mult);
+ write_unlock_irqrestore(&tstamp->lock, flags);
+ time_stamp = cycles_now + cycles_delta;
+ field_select = MLX5E_MTPPS_FS_PIN_MODE |
+ MLX5E_MTPPS_FS_PATTERN |
+ MLX5E_MTPPS_FS_ENABLE |
+ MLX5E_MTPPS_FS_TIME_STAMP;
+ } else {
+ pin = rq->perout.index;
+ field_select = MLX5E_MTPPS_FS_ENABLE;
+ }
+
MLX5_SET(mtpps_reg, in, pin, pin);
- MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
- MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC);
+ MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
+ MLX5_SET(mtpps_reg, in, pattern, pattern);
MLX5_SET(mtpps_reg, in, enable, on);
MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
+ MLX5_SET(mtpps_reg, in, field_select, field_select);
+
+ err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+ if (err)
+ return err;
- return mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+ return mlx5_set_mtppse(priv->mdev, pin, 0,
+ MLX5E_EVENT_MODE_REPETETIVE & on);
+}
+
+static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5e_tstamp *tstamp =
+ container_of(ptp, struct mlx5e_tstamp, ptp_info);
+
+ tstamp->pps_info.enabled = !!on;
+ return 0;
}
static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
@@ -350,6 +415,8 @@ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
return mlx5e_extts_configure(ptp, rq, on);
case PTP_CLK_REQ_PEROUT:
return mlx5e_perout_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PPS:
+ return mlx5e_pps_configure(ptp, rq, on);
default:
return -EOPNOTSUPP;
}
@@ -395,6 +462,7 @@ static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
return -ENOMEM;
tstamp->ptp_info.enable = mlx5e_ptp_enable;
tstamp->ptp_info.verify = mlx5e_ptp_verify;
+ tstamp->ptp_info.pps = 1;
for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
snprintf(tstamp->ptp_info.pin_config[i].name,
@@ -422,22 +490,56 @@ static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
cap_max_num_of_pps_out_pins);
- tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
- tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
- tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
- tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
- tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
- tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
- tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
- tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
+ tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
+ tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
+ tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
+ tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
+ tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
+ tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
+ tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
+ tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
}
void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event)
{
+ struct net_device *netdev = priv->netdev;
struct mlx5e_tstamp *tstamp = &priv->tstamp;
+ struct timespec64 ts;
+ u64 nsec_now, nsec_delta;
+ u64 cycles_now, cycles_delta;
+ int pin = event->index;
+ s64 ns;
+ unsigned long flags;
- ptp_clock_event(tstamp->ptp, event);
+ switch (tstamp->ptp_info.pin_config[pin].func) {
+ case PTP_PF_EXTTS:
+ if (tstamp->pps_info.enabled) {
+ event->type = PTP_CLOCK_PPSUSR;
+ event->pps_times.ts_real = ns_to_timespec64(event->timestamp);
+ } else {
+ event->type = PTP_CLOCK_EXTTS;
+ }
+ ptp_clock_event(tstamp->ptp, event);
+ break;
+ case PTP_PF_PEROUT:
+ mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
+ cycles_now = mlx5_read_internal_timer(tstamp->mdev);
+ ts.tv_sec += 1;
+ ts.tv_nsec = 0;
+ ns = timespec64_to_ns(&ts);
+ write_lock_irqsave(&tstamp->lock, flags);
+ nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
+ nsec_delta = ns - nsec_now;
+ cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
+ tstamp->cycles.mult);
+ tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
+ queue_work(priv->wq, &tstamp->pps_info.out_work);
+ write_unlock_irqrestore(&tstamp->lock, flags);
+ break;
+ default:
+ netdev_err(netdev, "%s: Unhandled event\n", __func__);
+ }
}
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
@@ -473,9 +575,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
do_div(ns, NSEC_PER_SEC / 2 / HZ);
tstamp->overflow_period = ns;
+ INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
if (tstamp->overflow_period)
- schedule_delayed_work(&tstamp->overflow_work, 0);
+ queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
else
mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
@@ -484,16 +587,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
/* Initialize 1PPS data structures */
-#define MAX_PIN_NUM 8
- tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL);
- if (tstamp->pps_pin_caps) {
- if (MLX5_CAP_GEN(priv->mdev, pps))
- mlx5e_get_pps_caps(priv, tstamp);
- if (tstamp->ptp_info.n_pins)
- mlx5e_init_pin_config(tstamp);
- } else {
- mlx5_core_warn(priv->mdev, "1PPS initialization failed\n");
- }
+ if (MLX5_PPS_CAP(priv->mdev))
+ mlx5e_get_pps_caps(priv, tstamp);
+ if (tstamp->ptp_info.n_pins)
+ mlx5e_init_pin_config(tstamp);
tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
&priv->mdev->pdev->dev);
@@ -516,8 +613,7 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
priv->tstamp.ptp = NULL;
}
- kfree(tstamp->pps_pin_caps);
- kfree(tstamp->ptp_info.pin_config);
-
+ cancel_work_sync(&tstamp->pps_info.out_work);
cancel_delayed_work_sync(&tstamp->overflow_work);
+ kfree(tstamp->ptp_info.pin_config);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index bdd82c9b3992..eafc59280ada 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
static bool outer_header_zero(u32 *match_criteria)
{
- int size = MLX5_ST_SZ_BYTES(fte_match_param);
+ int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers);
@@ -320,7 +320,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
- rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1eac5003084f..57f31fa478ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -377,7 +377,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
break;
case MLX5_DEV_EVENT_PPS:
eqe = (struct mlx5_eqe *)param;
- ptp_event.type = PTP_CLOCK_EXTTS;
ptp_event.index = eqe->data.pps.pin;
ptp_event.timestamp =
timecounter_cyc2time(&priv->tstamp.clock,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index af51a5d2b912..52b9a64cd3a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -698,7 +698,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
else
mlx5_core_dbg(dev, "port_module_event is not set\n");
- if (MLX5_CAP_GEN(dev, pps))
+ if (MLX5_PPS_CAP(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
if (MLX5_CAP_GEN(dev, fpga))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 89bfda419efe..8b18cc9ec026 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1668,7 +1668,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
int i;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
- MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH ||
+ esw->mode == SRIOV_NONE)
return;
esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 1ee5bce85901..85298051a3e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -178,8 +178,6 @@ out:
static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
{
- mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn);
-
mlx5_core_destroy_qp(mdev, qp);
}
@@ -194,8 +192,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return err;
}
- mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
-
err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
@@ -253,6 +249,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
+ struct mlx5i_priv *ipriv = priv->ppriv;
int err;
err = mlx5e_create_indirect_rqt(priv);
@@ -271,12 +268,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_indirect_tirs;
- err = mlx5i_create_flow_steering(priv);
+ err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
if (err)
goto err_destroy_direct_tirs;
+ err = mlx5i_create_flow_steering(priv);
+ if (err)
+ goto err_remove_rx_underlay_qpn;
+
return 0;
+err_remove_rx_underlay_qpn:
+ mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
@@ -290,6 +293,9 @@ err_destroy_indirect_rqts:
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
mlx5i_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index a3a836bdcfd2..f26f97fe4666 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -162,22 +162,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
u8 *port1, u8 *port2)
{
- if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
- if (tracker->netdev_state[0].tx_enabled) {
- *port1 = 1;
- *port2 = 1;
- } else {
- *port1 = 2;
- *port2 = 2;
- }
- } else {
- *port1 = 1;
- *port2 = 2;
- if (!tracker->netdev_state[0].link_up)
- *port1 = 2;
- else if (!tracker->netdev_state[1].link_up)
- *port2 = 1;
+ *port1 = 1;
+ *port2 = 2;
+ if (!tracker->netdev_state[0].tx_enabled ||
+ !tracker->netdev_state[0].link_up) {
+ *port1 = 2;
+ return;
}
+
+ if (!tracker->netdev_state[1].tx_enabled ||
+ !tracker->netdev_state[1].link_up)
+ *port2 = 1;
}
static void mlx5_activate_lag(struct mlx5_lag *ldev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 6a3d6bef7dd4..6a263e8d883a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -154,6 +154,11 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
+#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
+ MLX5_CAP_GEN((mdev), pps_modify) && \
+ MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
+ MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
+
int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
void mlx5e_init(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index bcdf7779c48d..bf99d40e30b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -88,7 +88,11 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
int vf;
if (!sriov->enabled_vfs)
+#ifdef CONFIG_MLX5_CORE_EN
+ goto disable_sriov_resources;
+#else
return;
+#endif
for (vf = 0; vf < sriov->num_vfs; vf++) {
if (!sriov->vfs_ctx[vf].enabled)
@@ -103,6 +107,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
}
#ifdef CONFIG_MLX5_CORE_EN
+disable_sriov_resources:
mlx5_eswitch_disable_sriov(dev->priv.eswitch);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 383fef5a8e24..4b2e0fd7d51e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1512,6 +1512,10 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry);
+static bool
+mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
+ const struct mlxsw_sp_fib_entry *fib_entry);
+
static int
mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
@@ -1520,6 +1524,9 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
int err;
list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
+ if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
+ fib_entry))
+ continue;
err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 656b2d3f1bee..5eb1606765c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -626,8 +626,8 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
- if (WARN_ON(!bridge_port))
- return -EINVAL;
+ if (!bridge_port)
+ return 0;
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
MLXSW_SP_FLOOD_TYPE_UC,
@@ -711,8 +711,8 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
- if (WARN_ON(!bridge_port))
- return -EINVAL;
+ if (!bridge_port)
+ return 0;
if (!bridge_port->bridge_device->multicast_enabled)
return 0;
@@ -1283,15 +1283,15 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
- if (WARN_ON(!bridge_port))
- return -EINVAL;
+ if (!bridge_port)
+ return 0;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
bridge_device,
mdb->vid);
- if (WARN_ON(!mlxsw_sp_port_vlan))
- return -EINVAL;
+ if (!mlxsw_sp_port_vlan)
+ return 0;
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
@@ -1407,15 +1407,15 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
int err = 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
- if (WARN_ON(!bridge_port))
- return -EINVAL;
+ if (!bridge_port)
+ return 0;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
bridge_device,
mdb->vid);
- if (WARN_ON(!mlxsw_sp_port_vlan))
- return -EINVAL;
+ if (!mlxsw_sp_port_vlan)
+ return 0;
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
@@ -1974,6 +1974,17 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
}
+static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_mid *mid, *tmp;
+
+ list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) {
+ list_del(&mid->list);
+ clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
+ kfree(mid);
+ }
+}
+
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_bridge *bridge;
@@ -1996,7 +2007,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_fdb_fini(mlxsw_sp);
- WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list));
+ mlxsw_sp_mids_fini(mlxsw_sp);
WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
kfree(mlxsw_sp->bridge);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 18750ff0ede6..4631ca8b8eb2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -513,6 +513,7 @@ nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
tx_ring->idx = idx;
tx_ring->r_vec = r_vec;
tx_ring->is_xdp = is_xdp;
+ u64_stats_init(&tx_ring->r_vec->tx_sync);
tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
@@ -532,6 +533,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
rx_ring->idx = idx;
rx_ring->r_vec = r_vec;
+ u64_stats_init(&rx_ring->r_vec->rx_sync);
rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 9da91045d167..3eb241657368 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -253,7 +253,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
- if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+ if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
goto err;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 22cf6353ba04..7ecf549c7f1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -205,7 +205,7 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
{
int i;
- for (i = 0; i < 23; i++)
+ for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++)
if ((i < 12) || (i > 17))
reg_space[DMA_BUS_MODE / 4 + i] =
readl(ioaddr + DMA_BUS_MODE + i * 4);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index eef2f222ce9a..6502b9aa3bf5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -70,7 +70,7 @@ static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
{
int i;
- for (i = 0; i < 9; i++)
+ for (i = 0; i < NUM_DWMAC100_DMA_REGS; i++)
reg_space[DMA_BUS_MODE / 4 + i] =
readl(ioaddr + DMA_BUS_MODE + i * 4);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 9091df86723a..adc54006f884 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -136,6 +136,9 @@
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
+#define NUM_DWMAC100_DMA_REGS 9
+#define NUM_DWMAC1000_DMA_REGS 23
+
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index babb39c646ff..af30b4857c3b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -33,6 +33,8 @@
#define MAC100_ETHTOOL_NAME "st_mac100"
#define GMAC_ETHTOOL_NAME "st_gmac"
+#define ETHTOOL_DMA_OFFSET 55
+
struct stmmac_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -442,6 +444,9 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
priv->hw->mac->dump_regs(priv->hw, reg_space);
priv->hw->dma->dump_regs(priv->ioaddr, reg_space);
+ /* Copy DMA registers to where ethtool expects them */
+ memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4],
+ NUM_DWMAC1000_DMA_REGS * 4);
}
static void
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index 3af540adb3c5..fca1bca7f69d 100644
--- a/drivers/net/ethernet/sun/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -13,9 +13,9 @@
/* Happy Meal global registers. */
#define GREG_SWRESET 0x000UL /* Software Reset */
#define GREG_CFG 0x004UL /* Config Register */
-#define GREG_STAT 0x108UL /* Status */
-#define GREG_IMASK 0x10cUL /* Interrupt Mask */
-#define GREG_REG_SIZE 0x110UL
+#define GREG_STAT 0x100UL /* Status */
+#define GREG_IMASK 0x104UL /* Interrupt Mask */
+#define GREG_REG_SIZE 0x108UL
/* Global reset register. */
#define GREG_RESET_ETX 0x01
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 32279d21c836..c2121d214f08 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -31,9 +31,18 @@
#include "cpts.h"
+#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+
+struct cpts_skb_cb_data {
+ unsigned long tmo;
+};
+
#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
+static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
+ u16 ts_seqid, u8 ts_msgtype);
+
static int event_expired(struct cpts_event *event)
{
return time_after(jiffies, event->tmo);
@@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts)
return removed ? 0 : -1;
}
+static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
+{
+ struct sk_buff *skb, *tmp;
+ u16 seqid;
+ u8 mtype;
+ bool found = false;
+
+ mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
+ seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
+
+ /* no need to grab txq.lock as access is always done under cpts->lock */
+ skb_queue_walk_safe(&cpts->txq, skb, tmp) {
+ struct skb_shared_hwtstamps ssh;
+ unsigned int class = ptp_classify_raw(skb);
+ struct cpts_skb_cb_data *skb_cb =
+ (struct cpts_skb_cb_data *)skb->cb;
+
+ if (cpts_match(skb, class, seqid, mtype)) {
+ u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &ssh);
+ found = true;
+ __skb_unlink(skb, &cpts->txq);
+ dev_consume_skb_any(skb);
+ dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
+ mtype, seqid);
+ } else if (time_after(jiffies, skb_cb->tmo)) {
+ /* timeout any expired skbs over 1s */
+ dev_dbg(cpts->dev,
+ "expiring tx timestamp mtype %u seqid %04x\n",
+ mtype, seqid);
+ __skb_unlink(skb, &cpts->txq);
+ dev_consume_skb_any(skb);
+ }
+ }
+
+ return found;
+}
+
/*
* Returns zero if matching event type was found.
*/
@@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
event->low = lo;
type = event_type(event);
switch (type) {
+ case CPTS_EV_TX:
+ if (cpts_match_tx_ts(cpts, event)) {
+ /* if the new event matches an existing skb,
+ * then don't queue it
+ */
+ break;
+ }
case CPTS_EV_PUSH:
case CPTS_EV_RX:
- case CPTS_EV_TX:
list_del_init(&event->list);
list_add_tail(&event->list, &cpts->events);
break;
@@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
+static long cpts_overflow_check(struct ptp_clock_info *ptp)
+{
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+ unsigned long delay = cpts->ov_check_period;
+ struct timespec64 ts;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ ts = ns_to_timespec64(timecounter_read(&cpts->tc));
+
+ if (!skb_queue_empty(&cpts->txq))
+ delay = CPTS_SKB_TX_WORK_TIMEOUT;
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+ return (long)delay;
+}
+
static struct ptp_clock_info cpts_info = {
.owner = THIS_MODULE,
.name = "CTPS timer",
@@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = {
.gettime64 = cpts_ptp_gettime,
.settime64 = cpts_ptp_settime,
.enable = cpts_ptp_enable,
+ .do_aux_work = cpts_overflow_check,
};
-static void cpts_overflow_check(struct work_struct *work)
-{
- struct timespec64 ts;
- struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
-
- cpts_ptp_gettime(&cpts->info, &ts);
- pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
- schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
-}
-
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
u16 ts_seqid, u8 ts_msgtype)
{
@@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
return 0;
spin_lock_irqsave(&cpts->lock, flags);
- cpts_fifo_read(cpts, CPTS_EV_PUSH);
+ cpts_fifo_read(cpts, -1);
list_for_each_safe(this, next, &cpts->events) {
event = list_entry(this, struct cpts_event, list);
if (event_expired(event)) {
@@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
break;
}
}
+
+ if (ev_type == CPTS_EV_TX && !ns) {
+ struct cpts_skb_cb_data *skb_cb =
+ (struct cpts_skb_cb_data *)skb->cb;
+ /* Not found, add frame to queue for processing later.
+ * The periodic FIFO check will handle this.
+ */
+ skb_get(skb);
+ /* get the timestamp for timeouts */
+ skb_cb->tmo = jiffies + msecs_to_jiffies(100);
+ __skb_queue_tail(&cpts->txq, skb);
+ ptp_schedule_worker(cpts->clock, 0);
+ }
spin_unlock_irqrestore(&cpts->lock, flags);
return ns;
@@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts)
{
int err, i;
+ skb_queue_head_init(&cpts->txq);
INIT_LIST_HEAD(&cpts->events);
INIT_LIST_HEAD(&cpts->pool);
for (i = 0; i < CPTS_MAX_EVENTS; i++)
@@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts)
}
cpts->phc_index = ptp_clock_index(cpts->clock);
- schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
+ ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
return 0;
err_ptp:
@@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts)
if (WARN_ON(!cpts->clock))
return;
- cancel_delayed_work_sync(&cpts->overflow_work);
-
ptp_clock_unregister(cpts->clock);
cpts->clock = NULL;
cpts_write32(cpts, 0, int_enable);
cpts_write32(cpts, 0, control);
+ /* Drop all packet */
+ skb_queue_purge(&cpts->txq);
+
clk_disable(cpts->refclk);
}
EXPORT_SYMBOL_GPL(cpts_unregister);
@@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->dev = dev;
cpts->reg = (struct cpsw_cpts __iomem *)regs;
spin_lock_init(&cpts->lock);
- INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
ret = cpts_of_parse(cpts, node);
if (ret)
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 01ea82ba9cdc..73d73faf0f38 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -119,13 +119,13 @@ struct cpts {
u32 cc_mult; /* for the nominal frequency */
struct cyclecounter cc;
struct timecounter tc;
- struct delayed_work overflow_work;
int phc_index;
struct clk *refclk;
struct list_head events;
struct list_head pool;
struct cpts_event pool_data[CPTS_MAX_EVENTS];
unsigned long ov_check_period;
+ struct sk_buff_head txq;
};
void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index d9db8a06afd2..cce9c9ed46aa 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1338,7 +1338,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
{
static int count;
- printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):",
+ printk(KERN_WARNING "%s: Fatal Error Interrupt (%#x):",
dev->name, status);
if (status & Int_IntPCI)
printk(" IntPCI");
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index de8156c6b292..2bbda71818ad 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1091,7 +1091,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_GENEVE_ID]) {
__u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]);
- if (vni >= GENEVE_VID_MASK)
+ if (vni >= GENEVE_N_VID)
return -ERANGE;
}
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 1542e837fdfa..f38e32a7ec9c 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -364,7 +364,7 @@ static int gtp_dev_init(struct net_device *dev)
gtp->dev = dev;
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d6c25580f8dd..12cc64bfcff8 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -765,7 +765,8 @@ struct netvsc_device {
u32 max_chn;
u32 num_chn;
- refcount_t sc_offered;
+ atomic_t open_chn;
+ wait_queue_head_t subchan_open;
struct rndis_device *extension;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 0a9167dd72fb..d18c3326a1f7 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -78,6 +78,7 @@ static struct netvsc_device *alloc_net_device(void)
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
init_completion(&net_device->channel_init_wait);
+ init_waitqueue_head(&net_device->subchan_open);
return net_device;
}
@@ -1302,6 +1303,8 @@ int netvsc_device_add(struct hv_device *device,
struct netvsc_channel *nvchan = &net_device->chan_table[i];
nvchan->channel = device->channel;
+ u64_stats_init(&nvchan->tx_stats.syncp);
+ u64_stats_init(&nvchan->rx_stats.syncp);
}
/* Enable NAPI handler before init callbacks */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 63c98bbbc596..0d78727f1a14 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -315,14 +315,34 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
return slots_used;
}
-/* Estimate number of page buffers neede to transmit
- * Need at most 2 for RNDIS header plus skb body and fragments.
- */
-static unsigned int netvsc_get_slots(const struct sk_buff *skb)
+static int count_skb_frag_slots(struct sk_buff *skb)
+{
+ int i, frags = skb_shinfo(skb)->nr_frags;
+ int pages = 0;
+
+ for (i = 0; i < frags; i++) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ unsigned long size = skb_frag_size(frag);
+ unsigned long offset = frag->page_offset;
+
+ /* Skip unused frames from start of page */
+ offset &= ~PAGE_MASK;
+ pages += PFN_UP(offset + size);
+ }
+ return pages;
+}
+
+static int netvsc_get_slots(struct sk_buff *skb)
{
- return PFN_UP(offset_in_page(skb->data) + skb_headlen(skb))
- + skb_shinfo(skb)->nr_frags
- + 2;
+ char *data = skb->data;
+ unsigned int offset = offset_in_page(data);
+ unsigned int len = skb_headlen(skb);
+ int slots;
+ int frag_slots;
+
+ slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+ frag_slots = count_skb_frag_slots(skb);
+ return slots + frag_slots;
}
static u32 net_checksum_info(struct sk_buff *skb)
@@ -360,18 +380,21 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
struct hv_page_buffer *pb = page_buf;
- /* We can only transmit MAX_PAGE_BUFFER_COUNT number
+ /* We will atmost need two pages to describe the rndis
+ * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
* of pages in a single packet. If skb is scattered around
* more pages we try linearizing it.
*/
- num_data_pgs = netvsc_get_slots(skb);
+
+ num_data_pgs = netvsc_get_slots(skb) + 2;
+
if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
++net_device_ctx->eth_stats.tx_scattered;
if (skb_linearize(skb))
goto no_memory;
- num_data_pgs = netvsc_get_slots(skb);
+ num_data_pgs = netvsc_get_slots(skb) + 2;
if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
++net_device_ctx->eth_stats.tx_too_big;
goto drop;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 85c00e1c52b6..d6308ffda53e 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1048,8 +1048,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
else
netif_napi_del(&nvchan->napi);
- if (refcount_dec_and_test(&nvscdev->sc_offered))
- complete(&nvscdev->channel_init_wait);
+ atomic_inc(&nvscdev->open_chn);
+ wake_up(&nvscdev->subchan_open);
}
int rndis_filter_device_add(struct hv_device *dev,
@@ -1090,8 +1090,6 @@ int rndis_filter_device_add(struct hv_device *dev,
net_device->max_chn = 1;
net_device->num_chn = 1;
- refcount_set(&net_device->sc_offered, 0);
-
net_device->extension = rndis_device;
rndis_device->ndev = net;
@@ -1221,11 +1219,11 @@ int rndis_filter_device_add(struct hv_device *dev,
rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
net_device->num_chn);
+ atomic_set(&net_device->open_chn, 1);
num_rss_qs = net_device->num_chn - 1;
if (num_rss_qs == 0)
return 0;
- refcount_set(&net_device->sc_offered, num_rss_qs);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
init_packet = &net_device->channel_init_pkt;
@@ -1242,15 +1240,19 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret)
goto out;
+ wait_for_completion(&net_device->channel_init_wait);
if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
ret = -ENODEV;
goto out;
}
- wait_for_completion(&net_device->channel_init_wait);
net_device->num_chn = 1 +
init_packet->msg.v5_msg.subchn_comp.num_subchannels;
+ /* wait for all sub channels to open */
+ wait_event(net_device->subchan_open,
+ atomic_read(&net_device->open_chn) == net_device->num_chn);
+
/* ignore failues from setting rss parameters, still have channels */
rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
net_device->num_chn);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index f37e3c1fd4e7..8dab74a81303 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -192,7 +192,7 @@ static int ipvlan_init(struct net_device *dev)
netdev_lockdep_set_classes(dev);
- ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);
+ ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
if (!ipvlan->pcpu_stats)
return -ENOMEM;
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 6f6ed75b63c9..765de3bedb88 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
{
struct usb_device *dev = mcs->usbdev;
- int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
- MCS_RD_RTYPE, 0, reg, val, 2,
- msecs_to_jiffies(MCS_CTRL_TIMEOUT));
+ void *dmabuf;
+ int ret;
+
+ dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
+ MCS_RD_RTYPE, 0, reg, dmabuf, 2,
+ msecs_to_jiffies(MCS_CTRL_TIMEOUT));
+
+ memcpy(val, dmabuf, sizeof(__u16));
+ kfree(dmabuf);
return ret;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 2dda72004a7d..928fd892f167 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -7,7 +7,16 @@ menuconfig MDIO_DEVICE
help
MDIO devices and driver infrastructure code.
-if MDIO_DEVICE
+config MDIO_BUS
+ tristate
+ default m if PHYLIB=m
+ default MDIO_DEVICE
+ help
+ This internal symbol is used for link time dependencies and it
+ reflects whether the mdio_bus/mdio_device code is built as a
+ loadable module or built-in.
+
+if MDIO_BUS
config MDIO_BCM_IPROC
tristate "Broadcom iProc MDIO bus controller"
@@ -28,7 +37,6 @@ config MDIO_BCM_UNIMAC
config MDIO_BITBANG
tristate "Bitbanged MDIO buses"
- depends on !(MDIO_DEVICE=y && PHYLIB=m)
help
This module implements the MDIO bus protocol in software,
for use by low level drivers that export the ability to
@@ -127,7 +135,6 @@ config MDIO_THUNDER
tristate "ThunderX SOCs MDIO buses"
depends on 64BIT
depends on PCI
- depends on !(MDIO_DEVICE=y && PHYLIB=m)
select MDIO_CAVIUM
help
This driver supports the MDIO interfaces found on Cavium
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index d0626bf5c540..5068c582d502 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -749,6 +749,9 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
+
+ /* Now we can run the state machine synchronously */
+ phy_state_machine(&phydev->state_queue.work);
}
/**
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index bd4303944e44..a404552555d4 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1915,21 +1915,23 @@ static void __ppp_channel_push(struct channel *pch)
spin_unlock(&pch->downl);
/* see if there is anything from the attached unit to be sent */
if (skb_queue_empty(&pch->file.xq)) {
- read_lock(&pch->upl);
ppp = pch->ppp;
if (ppp)
- ppp_xmit_process(ppp);
- read_unlock(&pch->upl);
+ __ppp_xmit_process(ppp);
}
}
static void ppp_channel_push(struct channel *pch)
{
- local_bh_disable();
-
- __ppp_channel_push(pch);
-
- local_bh_enable();
+ read_lock_bh(&pch->upl);
+ if (pch->ppp) {
+ (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
+ __ppp_channel_push(pch);
+ (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
+ } else {
+ __ppp_channel_push(pch);
+ }
+ read_unlock_bh(&pch->upl);
}
/*
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index eac499c58aa7..6dde9a0cfe76 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -131,7 +131,6 @@ static void del_chan(struct pppox_sock *sock)
clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
spin_unlock(&chan_lock);
- synchronize_rcu();
}
static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
@@ -520,6 +519,7 @@ static int pptp_release(struct socket *sock)
po = pppox_sk(sk);
del_chan(po);
+ synchronize_rcu();
pppox_unbind_sock(sk);
sk->sk_state = PPPOX_DEAD;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 464570409796..ae53e899259f 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -60,11 +60,11 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
static int __set_port_dev_addr(struct net_device *port_dev,
const unsigned char *dev_addr)
{
- struct sockaddr addr;
+ struct sockaddr_storage addr;
- memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
- addr.sa_family = port_dev->type;
- return dev_set_mac_address(port_dev, &addr);
+ memcpy(addr.__data, dev_addr, port_dev->addr_len);
+ addr.ss_family = port_dev->type;
+ return dev_set_mac_address(port_dev, (struct sockaddr *)&addr);
}
static int team_port_set_orig_dev_addr(struct team_port *port)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3d4c24572ecd..32ad87345f57 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2598,8 +2598,16 @@ static int __init tun_init(void)
goto err_misc;
}
- register_netdevice_notifier(&tun_notifier_block);
+ ret = register_netdevice_notifier(&tun_notifier_block);
+ if (ret) {
+ pr_err("Can't register netdevice notifier\n");
+ goto err_notifier;
+ }
+
return 0;
+
+err_notifier:
+ misc_deregister(&tun_miscdev);
err_misc:
rtnl_link_unregister(&tun_link_ops);
err_linkops:
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index d1092421aaa7..9a4171b90947 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -209,6 +209,7 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
struct asix_rx_fixup_info *rx);
int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb);
+void asix_rx_fixup_common_free(struct asix_common_private *dp);
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 7847436c441e..522d2900cd1d 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -75,6 +75,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
value, index, data, size);
}
+static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
+{
+ /* Reset the variables that have a lifetime outside of
+ * asix_rx_fixup_internal() so that future processing starts from a
+ * known set of initial conditions.
+ */
+
+ if (rx->ax_skb) {
+ /* Discard any incomplete Ethernet frame in the netdev buffer */
+ kfree_skb(rx->ax_skb);
+ rx->ax_skb = NULL;
+ }
+
+ /* Assume the Data header 32-bit word is at the start of the current
+ * or next URB socket buffer so reset all the state variables.
+ */
+ rx->remaining = 0;
+ rx->split_head = false;
+ rx->header = 0;
+}
+
int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
struct asix_rx_fixup_info *rx)
{
@@ -99,15 +120,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (size != ((~rx->header >> 16) & 0x7ff)) {
netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
rx->remaining);
- if (rx->ax_skb) {
- kfree_skb(rx->ax_skb);
- rx->ax_skb = NULL;
- /* Discard the incomplete netdev Ethernet frame
- * and assume the Data header is at the start of
- * the current URB socket buffer.
- */
- }
- rx->remaining = 0;
+ reset_asix_rx_fixup_info(rx);
}
}
@@ -139,11 +152,13 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (size != ((~rx->header >> 16) & 0x7ff)) {
netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
rx->header, offset);
+ reset_asix_rx_fixup_info(rx);
return 0;
}
if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
size);
+ reset_asix_rx_fixup_info(rx);
return 0;
}
@@ -168,8 +183,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (rx->ax_skb) {
skb_put_data(rx->ax_skb, skb->data + offset,
copy_length);
- if (!rx->remaining)
+ if (!rx->remaining) {
usbnet_skb_return(dev, rx->ax_skb);
+ rx->ax_skb = NULL;
+ }
}
offset += (copy_length + 1) & 0xfffe;
@@ -178,6 +195,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (skb->len != offset) {
netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
skb->len, offset);
+ reset_asix_rx_fixup_info(rx);
return 0;
}
@@ -192,6 +210,21 @@ int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
return asix_rx_fixup_internal(dev, skb, rx);
}
+void asix_rx_fixup_common_free(struct asix_common_private *dp)
+{
+ struct asix_rx_fixup_info *rx;
+
+ if (!dp)
+ return;
+
+ rx = &dp->rx_fixup_info;
+
+ if (rx->ax_skb) {
+ kfree_skb(rx->ax_skb);
+ rx->ax_skb = NULL;
+ }
+}
+
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags)
{
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index a3aa0a27dfe5..b2ff88e69a81 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -764,6 +764,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
{
+ asix_rx_fixup_common_free(dev->driver_priv);
kfree(dev->driver_priv);
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 5833f7e2a127..b99a7fb09f8e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2367,9 +2367,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
/* Init LTM */
lan78xx_init_ltm(dev);
- dev->net->hard_header_len += TX_OVERHEAD;
- dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
-
if (dev->udev->speed == USB_SPEED_SUPER) {
buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
@@ -2855,16 +2852,19 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
return ret;
}
+ dev->net->hard_header_len += TX_OVERHEAD;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
/* Init all registers */
ret = lan78xx_reset(dev);
- lan78xx_mdio_init(dev);
+ ret = lan78xx_mdio_init(dev);
dev->net->flags |= IFF_MULTICAST;
pdata->wol = WAKE_MAGIC;
- return 0;
+ return ret;
}
static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
@@ -3525,11 +3525,11 @@ static int lan78xx_probe(struct usb_interface *intf,
udev = interface_to_usbdev(intf);
udev = usb_get_dev(udev);
- ret = -ENOMEM;
netdev = alloc_etherdev(sizeof(struct lan78xx_net));
if (!netdev) {
- dev_err(&intf->dev, "Error: OOM\n");
- goto out1;
+ dev_err(&intf->dev, "Error: OOM\n");
+ ret = -ENOMEM;
+ goto out1;
}
/* netdev_printk() needs this */
@@ -3610,7 +3610,7 @@ static int lan78xx_probe(struct usb_interface *intf,
ret = register_netdev(netdev);
if (ret != 0) {
netif_err(dev, probe, netdev, "couldn't register the device\n");
- goto out2;
+ goto out3;
}
usb_set_intfdata(intf, dev);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 5894e3c9468f..8c3733608271 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
+ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@@ -1340,10 +1341,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
static void qmi_wwan_disconnect(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
- struct qmi_wwan_state *info = (void *)&dev->data;
+ struct qmi_wwan_state *info;
struct list_head *iter;
struct net_device *ldev;
+ /* called twice if separate control and data intf */
+ if (!dev)
+ return;
+ info = (void *)&dev->data;
if (info->flags & QMI_WWAN_FLAG_MUX) {
if (!rtnl_trylock()) {
restart_syscall();
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 99a26a9efec1..98f17b05c68b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -889,21 +889,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
buf += headroom; /* advance address leaving hole at front of pkt */
- ctx = (void *)(unsigned long)len;
get_page(alloc_frag->page);
alloc_frag->offset += len + headroom;
hole = alloc_frag->size - alloc_frag->offset;
if (hole < len + headroom) {
/* To avoid internal fragmentation, if there is very likely not
* enough space for another buffer, add the remaining space to
- * the current buffer. This extra space is not included in
- * the truesize stored in ctx.
+ * the current buffer.
*/
len += hole;
alloc_frag->offset += hole;
}
sg_init_one(rq->sg, buf, len);
+ ctx = (void *)(unsigned long)len;
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
@@ -2743,9 +2742,9 @@ module_init(virtio_net_driver_init);
static __exit void virtio_net_driver_exit(void)
{
+ unregister_virtio_driver(&virtio_net_driver);
cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
cpuhp_remove_multi_state(virtionet_online);
- unregister_virtio_driver(&virtio_net_driver);
}
module_exit(virtio_net_driver_exit);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 96aa7e6cf214..e17baac70f43 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -623,6 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
out:
skb_gro_remcsum_cleanup(skb, &grc);
+ skb->remcsum_offload = 0;
NAPI_GRO_CB(skb)->flush |= flush;
return pp;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 2153e8062b4c..5cc3a07dda9e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -214,7 +214,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
/* Make sure there's enough writeable headroom */
if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) {
- head_delta = drvr->hdrlen - skb_headroom(skb);
+ head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0);
brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n",
brcmf_ifname(ifp), head_delta);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index fbcbb4325936..f3556122c6ac 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -2053,12 +2053,13 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
atomic_inc(&stats->pktcow_failed);
return -ENOMEM;
}
+ head_pad = 0;
}
skb_push(pkt, head_pad);
dat_buf = (u8 *)(pkt->data);
}
memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
- return 0;
+ return head_pad;
}
/**
@@ -4174,11 +4175,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
goto fail;
}
- /* allocate scatter-gather table. sg support
- * will be disabled upon allocation failure.
- */
- brcmf_sdiod_sgtable_alloc(bus->sdiodev);
-
/* Query the F2 block size, set roundup accordingly */
bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
bus->roundup = min(max_roundup, bus->blocksize);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index adaa2f0097cc..fb40ddfced99 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -1189,11 +1189,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
next_reclaimed;
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
next_reclaimed);
+ iwlagn_check_ratid_empty(priv, sta_id, tid);
}
iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
- iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
/* process frames */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
index 545d14b0bc92..f5c1127253cb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
@@ -55,8 +55,8 @@ static inline bool iwl_trace_data(struct sk_buff *skb)
/* also account for the RFC 1042 header, of course */
offs += 6;
- return skb->len > offs + 2 &&
- *(__be16 *)(skb->data + offs) == cpu_to_be16(ETH_P_PAE);
+ return skb->len <= offs + 2 ||
+ *(__be16 *)(skb->data + offs) != cpu_to_be16(ETH_P_PAE);
}
static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index bcde1ba0f1c8..c7b1e58e3384 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1084,7 +1084,13 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
- if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
+ /*
+ * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
+ * so later code will - from now on - see that we're doing it.
+ */
+ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
/* Clean up some internal and mac80211 state on restart */
iwl_mvm_restart_cleanup(mvm);
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index eaacfaf37206..ddd8719f27b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1090,6 +1090,7 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
* @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
* @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running
+ * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
* @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
@@ -1101,6 +1102,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
IWL_MVM_STATUS_HW_CTKILL,
IWL_MVM_STATUS_ROC_RUNNING,
+ IWL_MVM_STATUS_HW_RESTART_REQUESTED,
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_IN_D0I3,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 4d1188b8736a..9c175d5e9d67 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1235,9 +1235,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
*/
if (!mvm->fw_restart && fw_error) {
iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
- NULL);
- } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
- &mvm->status)) {
+ NULL);
+ } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
IWL_ERR(mvm,
@@ -1268,6 +1267,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
if (fw_error && mvm->fw_restart > 0)
mvm->fw_restart--;
+ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
ieee80211_restart_hw(mvm->hw);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 4df5f13fcdae..ab66b4394dfc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -277,6 +277,18 @@ static void iwl_mvm_rx_agg_session_expired(unsigned long data)
/* Timer expired */
sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
+
+ /*
+ * sta should be valid unless the following happens:
+ * The firmware asserts which triggers a reconfig flow, but
+ * the reconfig fails before we set the pointer to sta into
+ * the fw_id_to_mac_id pointer table. Mac80211 can't stop
+ * A-MDPU and hence the timer continues to run. Then, the
+ * timer expires and sta is NULL.
+ */
+ if (!sta)
+ goto unlock;
+
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
sta->addr, ba_data->tid);
@@ -2015,7 +2027,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
IWL_MAX_TID_COUNT,
wdg_timeout);
- if (vif->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)
mvm->probe_queue = queue;
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_dev_queue = queue;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 92b3a55d0fbc..f95eec52508e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3150,7 +3150,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
init_waitqueue_head(&trans_pcie->d0i3_waitq);
if (trans_pcie->msix_enabled) {
- if (iwl_pcie_init_msix_handler(pdev, trans_pcie))
+ ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
+ if (ret)
goto out_no_pci;
} else {
ret = iwl_pcie_alloc_ict(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index de50418adae5..034bdb4a0b06 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -298,6 +298,9 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
struct iwl_txq *txq = trans_pcie->txq[i];
+ if (!test_bit(i, trans_pcie->queue_used))
+ continue;
+
spin_lock_bh(&txq->lock);
if (txq->need_update) {
iwl_pcie_txq_inc_wr_ptr(trans, txq);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 2a7ad5ffe997..cd5dc6dcb19f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -846,9 +846,6 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
return false;
}
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv);
-
bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL);
rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3));
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index fb1ebb01133f..70723e67b7d7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2547,7 +2547,6 @@ struct bt_coexist_info {
struct rtl_btc_ops {
void (*btc_init_variables) (struct rtl_priv *rtlpriv);
void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
- void (*btc_power_on_setting)(struct rtl_priv *rtlpriv);
void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 3b77cfe5aa1e..37046ac2c441 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
c.directive.opcode = nvme_admin_directive_recv;
c.directive.nsid = cpu_to_le32(nsid);
- c.directive.numd = cpu_to_le32(sizeof(*s));
+ c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
c.directive.dtype = NVME_DIR_STREAMS;
@@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_write_cache(q, vwc, vwc);
}
-static void nvme_configure_apst(struct nvme_ctrl *ctrl)
+static int nvme_configure_apst(struct nvme_ctrl *ctrl)
{
/*
* APST (Autonomous Power State Transition) lets us program a
@@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
* then don't do anything.
*/
if (!ctrl->apsta)
- return;
+ return 0;
if (ctrl->npss > 31) {
dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
- return;
+ return 0;
}
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
- return;
+ return 0;
if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
/* Turn off APST. */
@@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
kfree(table);
+ return ret;
}
static void nvme_set_latency_tolerance(struct device *dev, s32 val)
@@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
* In fabrics we need to verify the cntlid matches the
* admin connect
*/
- if (ctrl->cntlid != le16_to_cpu(id->cntlid))
+ if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
ret = -EINVAL;
+ goto out_free;
+ }
if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
dev_err(ctrl->device,
"keep-alive support is mandatory for fabrics\n");
ret = -EINVAL;
+ goto out_free;
}
} else {
ctrl->cntlid = le16_to_cpu(id->cntlid);
@@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
else if (!ctrl->apst_enabled && prev_apst_enabled)
dev_pm_qos_hide_latency_tolerance(ctrl->device);
- nvme_configure_apst(ctrl);
- nvme_configure_directives(ctrl);
+ ret = nvme_configure_apst(ctrl);
+ if (ret < 0)
+ return ret;
+
+ ret = nvme_configure_directives(ctrl);
+ if (ret < 0)
+ return ret;
ctrl->identified = true;
+ return 0;
+
+out_free:
+ kfree(id);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_init_identify);
@@ -1995,15 +2008,20 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
int serial_len = sizeof(ctrl->serial);
int model_len = sizeof(ctrl->model);
+ if (!uuid_is_null(&ns->uuid))
+ return sprintf(buf, "uuid.%pU\n", &ns->uuid);
+
if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
return sprintf(buf, "eui.%16phN\n", ns->nguid);
if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
return sprintf(buf, "eui.%8phN\n", ns->eui);
- while (ctrl->serial[serial_len - 1] == ' ')
+ while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||
+ ctrl->serial[serial_len - 1] == '\0'))
serial_len--;
- while (ctrl->model[model_len - 1] == ' ')
+ while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||
+ ctrl->model[model_len - 1] == '\0'))
model_len--;
return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
@@ -2709,7 +2727,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
mutex_lock(&ctrl->namespaces_mutex);
/* Forcibly unquiesce queues to avoid blocking dispatch */
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ if (ctrl->admin_q)
+ blk_mq_unquiesce_queue(ctrl->admin_q);
list_for_each_entry(ns, &ctrl->namespaces, list) {
/*
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index d666ada39a9b..5c2a08ef08ba 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1888,7 +1888,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
* the target device is present
*/
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
- return BLK_STS_IOERR;
+ goto busy;
if (!nvme_fc_ctrl_get(ctrl))
return BLK_STS_IOERR;
@@ -1958,22 +1958,25 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
queue->lldd_handle, &op->fcp_req);
if (ret) {
- if (op->rq) /* normal request */
+ if (!(op->flags & FCOP_FLAGS_AEN))
nvme_fc_unmap_data(ctrl, op->rq, op);
- /* else - aen. no cleanup needed */
nvme_fc_ctrl_put(ctrl);
- if (ret != -EBUSY)
+ if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
+ ret != -EBUSY)
return BLK_STS_IOERR;
- if (op->rq)
- blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
-
- return BLK_STS_RESOURCE;
+ goto busy;
}
return BLK_STS_OK;
+
+busy:
+ if (!(op->flags & FCOP_FLAGS_AEN) && queue->hctx)
+ blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
+
+ return BLK_STS_RESOURCE;
}
static blk_status_t
@@ -2802,66 +2805,70 @@ out_fail:
return ERR_PTR(ret);
}
-enum {
- FCT_TRADDR_ERR = 0,
- FCT_TRADDR_WWNN = 1 << 0,
- FCT_TRADDR_WWPN = 1 << 1,
-};
struct nvmet_fc_traddr {
u64 nn;
u64 pn;
};
-static const match_table_t traddr_opt_tokens = {
- { FCT_TRADDR_WWNN, "nn-%s" },
- { FCT_TRADDR_WWPN, "pn-%s" },
- { FCT_TRADDR_ERR, NULL }
-};
-
static int
-nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
+__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
{
- substring_t args[MAX_OPT_ARGS];
- char *options, *o, *p;
- int token, ret = 0;
u64 token64;
- options = o = kstrdup(buf, GFP_KERNEL);
- if (!options)
- return -ENOMEM;
+ if (match_u64(sstr, &token64))
+ return -EINVAL;
+ *val = token64;
- while ((p = strsep(&o, ":\n")) != NULL) {
- if (!*p)
- continue;
+ return 0;
+}
- token = match_token(p, traddr_opt_tokens, args);
- switch (token) {
- case FCT_TRADDR_WWNN:
- if (match_u64(args, &token64)) {
- ret = -EINVAL;
- goto out;
- }
- traddr->nn = token64;
- break;
- case FCT_TRADDR_WWPN:
- if (match_u64(args, &token64)) {
- ret = -EINVAL;
- goto out;
- }
- traddr->pn = token64;
- break;
- default:
- pr_warn("unknown traddr token or missing value '%s'\n",
- p);
- ret = -EINVAL;
- goto out;
- }
- }
+/*
+ * This routine validates and extracts the WWN's from the TRADDR string.
+ * As kernel parsers need the 0x to determine number base, universally
+ * build string to parse with 0x prefix before parsing name strings.
+ */
+static int
+nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
+{
+ char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
+ substring_t wwn = { name, &name[sizeof(name)-1] };
+ int nnoffset, pnoffset;
+
+ /* validate it string one of the 2 allowed formats */
+ if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
+ !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
+ "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
+ nnoffset = NVME_FC_TRADDR_OXNNLEN;
+ pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
+ NVME_FC_TRADDR_OXNNLEN;
+ } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
+ !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
+ "pn-", NVME_FC_TRADDR_NNLEN))) {
+ nnoffset = NVME_FC_TRADDR_NNLEN;
+ pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
+ } else
+ goto out_einval;
-out:
- kfree(options);
- return ret;
+ name[0] = '0';
+ name[1] = 'x';
+ name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
+
+ memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
+ goto out_einval;
+
+ memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
+ goto out_einval;
+
+ return 0;
+
+out_einval:
+ pr_warn("%s: bad traddr string\n", __func__);
+ return -EINVAL;
}
static struct nvme_ctrl *
@@ -2875,11 +2882,11 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
unsigned long flags;
int ret;
- ret = nvme_fc_parse_address(&raddr, opts->traddr);
+ ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
if (ret || !raddr.nn || !raddr.pn)
return ERR_PTR(-EINVAL);
- ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
+ ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
if (ret || !laddr.nn || !laddr.pn)
return ERR_PTR(-EINVAL);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8569ee771269..74a124a06264 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1558,11 +1558,9 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
if (dev->cmb) {
iounmap(dev->cmb);
dev->cmb = NULL;
- if (dev->cmbsz) {
- sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
- &dev_attr_cmb.attr, NULL);
- dev->cmbsz = 0;
- }
+ sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
+ &dev_attr_cmb.attr, NULL);
+ dev->cmbsz = 0;
}
}
@@ -1619,7 +1617,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
struct nvme_host_mem_buf_desc *descs;
- u32 chunk_size, max_entries;
+ u32 chunk_size, max_entries, len;
int i = 0;
void **bufs;
u64 size = 0, tmp;
@@ -1638,10 +1636,10 @@ retry:
if (!bufs)
goto out_free_descs;
- for (size = 0; size < preferred; size += chunk_size) {
- u32 len = min_t(u64, chunk_size, preferred - size);
+ for (size = 0; size < preferred; size += len) {
dma_addr_t dma_addr;
+ len = min_t(u64, chunk_size, preferred - size);
bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
if (!bufs[i])
@@ -1953,16 +1951,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
/*
* CMBs can currently only exist on >=1.2 PCIe devices. We only
- * populate sysfs if a CMB is implemented. Note that we add the
- * CMB attribute to the nvme_ctrl kobj which removes the need to remove
- * it on exit. Since nvme_dev_attrs_group has no name we can pass
- * NULL as final argument to sysfs_add_file_to_group.
+ * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
+ * has no name we can pass NULL as final argument to
+ * sysfs_add_file_to_group.
*/
if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
dev->cmb = nvme_map_cmb(dev);
-
- if (dev->cmbsz) {
+ if (dev->cmb) {
if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL))
dev_warn(dev->ctrl.device,
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index d5801c150b1c..1b7f2520a20d 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -114,6 +114,11 @@ struct nvmet_fc_tgtport {
struct kref ref;
};
+struct nvmet_fc_defer_fcp_req {
+ struct list_head req_list;
+ struct nvmefc_tgt_fcp_req *fcp_req;
+};
+
struct nvmet_fc_tgt_queue {
bool ninetypercent;
u16 qid;
@@ -132,6 +137,8 @@ struct nvmet_fc_tgt_queue {
struct nvmet_fc_tgt_assoc *assoc;
struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
struct list_head fod_list;
+ struct list_head pending_cmd_list;
+ struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
} __aligned(sizeof(unsigned long long));
@@ -223,6 +230,8 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
+static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod);
/* *********************** FC-NVME DMA Handling **************************** */
@@ -463,9 +472,9 @@ static struct nvmet_fc_fcp_iod *
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
{
static struct nvmet_fc_fcp_iod *fod;
- unsigned long flags;
- spin_lock_irqsave(&queue->qlock, flags);
+ lockdep_assert_held(&queue->qlock);
+
fod = list_first_entry_or_null(&queue->fod_list,
struct nvmet_fc_fcp_iod, fcp_list);
if (fod) {
@@ -477,17 +486,37 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
* will "inherit" that reference.
*/
}
- spin_unlock_irqrestore(&queue->qlock, flags);
return fod;
}
static void
+nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_tgt_queue *queue,
+ struct nvmefc_tgt_fcp_req *fcpreq)
+{
+ struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+
+ /*
+ * put all admin cmds on hw queue id 0. All io commands go to
+ * the respective hw queue based on a modulo basis
+ */
+ fcpreq->hwqid = queue->qid ?
+ ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
+
+ if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
+ queue_work_on(queue->cpu, queue->work_q, &fod->work);
+ else
+ nvmet_fc_handle_fcp_rqst(tgtport, fod);
+}
+
+static void
nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
struct nvmet_fc_fcp_iod *fod)
{
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+ struct nvmet_fc_defer_fcp_req *deferfcp;
unsigned long flags;
fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
@@ -495,21 +524,56 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
fcpreq->nvmet_fc_private = NULL;
- spin_lock_irqsave(&queue->qlock, flags);
- list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
fod->active = false;
fod->abort = false;
fod->aborted = false;
fod->writedataactive = false;
fod->fcpreq = NULL;
+
+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+ struct nvmet_fc_defer_fcp_req, req_list);
+ if (!deferfcp) {
+ list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ /* Release reference taken at queue lookup and fod allocation */
+ nvmet_fc_tgt_q_put(queue);
+ return;
+ }
+
+ /* Re-use the fod for the next pending cmd that was deferred */
+ list_del(&deferfcp->req_list);
+
+ fcpreq = deferfcp->fcp_req;
+
+ /* deferfcp can be reused for another IO at a later date */
+ list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
+
spin_unlock_irqrestore(&queue->qlock, flags);
+ /* Save NVME CMD IO in fod */
+ memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
+
+ /* Setup new fcpreq to be processed */
+ fcpreq->rspaddr = NULL;
+ fcpreq->rsplen = 0;
+ fcpreq->nvmet_fc_private = fod;
+ fod->fcpreq = fcpreq;
+ fod->active = true;
+
+ /* inform LLDD IO is now being processed */
+ tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
+
+ /* Submit deferred IO for processing */
+ nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
+
/*
- * release the reference taken at queue lookup and fod allocation
+ * Leave the queue lookup get reference taken when
+ * fod was originally allocated.
*/
- nvmet_fc_tgt_q_put(queue);
-
- tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
}
static int
@@ -569,6 +633,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
queue->port = assoc->tgtport->port;
queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
INIT_LIST_HEAD(&queue->fod_list);
+ INIT_LIST_HEAD(&queue->avail_defer_list);
+ INIT_LIST_HEAD(&queue->pending_cmd_list);
atomic_set(&queue->connected, 0);
atomic_set(&queue->sqtail, 0);
atomic_set(&queue->rsn, 1);
@@ -638,6 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
{
struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
struct nvmet_fc_fcp_iod *fod = queue->fod;
+ struct nvmet_fc_defer_fcp_req *deferfcp;
unsigned long flags;
int i, writedataactive;
bool disconnect;
@@ -666,6 +733,35 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
}
}
}
+
+ /* Cleanup defer'ed IOs in queue */
+ list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) {
+ list_del(&deferfcp->req_list);
+ kfree(deferfcp);
+ }
+
+ for (;;) {
+ deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+ struct nvmet_fc_defer_fcp_req, req_list);
+ if (!deferfcp)
+ break;
+
+ list_del(&deferfcp->req_list);
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ tgtport->ops->defer_rcv(&tgtport->fc_target_port,
+ deferfcp->fcp_req);
+
+ tgtport->ops->fcp_abort(&tgtport->fc_target_port,
+ deferfcp->fcp_req);
+
+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
+ deferfcp->fcp_req);
+
+ kfree(deferfcp);
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ }
spin_unlock_irqrestore(&queue->qlock, flags);
flush_workqueue(queue->work_q);
@@ -2172,11 +2268,38 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
* Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
* layer for processing.
*
- * The nvmet-fc layer will copy cmd payload to an internal structure for
- * processing. As such, upon completion of the routine, the LLDD may
- * immediately free/reuse the CMD IU buffer passed in the call.
+ * The nvmet_fc layer allocates a local job structure (struct
+ * nvmet_fc_fcp_iod) from the queue for the io and copies the
+ * CMD IU buffer to the job structure. As such, on a successful
+ * completion (returns 0), the LLDD may immediately free/reuse
+ * the CMD IU buffer passed in the call.
*
- * If this routine returns error, the lldd should abort the exchange.
+ * However, in some circumstances, due to the packetized nature of FC
+ * and the api of the FC LLDD which may issue a hw command to send the
+ * response, but the LLDD may not get the hw completion for that command
+ * and upcall the nvmet_fc layer before a new command may be
+ * asynchronously received - its possible for a command to be received
+ * before the LLDD and nvmet_fc have recycled the job structure. It gives
+ * the appearance of more commands received than fits in the sq.
+ * To alleviate this scenario, a temporary queue is maintained in the
+ * transport for pending LLDD requests waiting for a queue job structure.
+ * In these "overrun" cases, a temporary queue element is allocated
+ * the LLDD request and CMD iu buffer information remembered, and the
+ * routine returns a -EOVERFLOW status. Subsequently, when a queue job
+ * structure is freed, it is immediately reallocated for anything on the
+ * pending request list. The LLDDs defer_rcv() callback is called,
+ * informing the LLDD that it may reuse the CMD IU buffer, and the io
+ * is then started normally with the transport.
+ *
+ * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
+ * the completion as successful but must not reuse the CMD IU buffer
+ * until the LLDD's defer_rcv() callback has been called for the
+ * corresponding struct nvmefc_tgt_fcp_req pointer.
+ *
+ * If there is any other condition in which an error occurs, the
+ * transport will return a non-zero status indicating the error.
+ * In all cases other than -EOVERFLOW, the transport has not accepted the
+ * request and the LLDD should abort the exchange.
*
* @target_port: pointer to the (registered) target port the FCP CMD IU
* was received on.
@@ -2194,6 +2317,8 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
struct nvmet_fc_tgt_queue *queue;
struct nvmet_fc_fcp_iod *fod;
+ struct nvmet_fc_defer_fcp_req *deferfcp;
+ unsigned long flags;
/* validate iu, so the connection id can be used to find the queue */
if ((cmdiubuf_len != sizeof(*cmdiu)) ||
@@ -2214,29 +2339,60 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
* when the fod is freed.
*/
+ spin_lock_irqsave(&queue->qlock, flags);
+
fod = nvmet_fc_alloc_fcp_iod(queue);
- if (!fod) {
+ if (fod) {
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ fcpreq->nvmet_fc_private = fod;
+ fod->fcpreq = fcpreq;
+
+ memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+
+ nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
+
+ return 0;
+ }
+
+ if (!tgtport->ops->defer_rcv) {
+ spin_unlock_irqrestore(&queue->qlock, flags);
/* release the queue lookup reference */
nvmet_fc_tgt_q_put(queue);
return -ENOENT;
}
- fcpreq->nvmet_fc_private = fod;
- fod->fcpreq = fcpreq;
- /*
- * put all admin cmds on hw queue id 0. All io commands go to
- * the respective hw queue based on a modulo basis
- */
- fcpreq->hwqid = queue->qid ?
- ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
- memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+ deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
+ struct nvmet_fc_defer_fcp_req, req_list);
+ if (deferfcp) {
+ /* Just re-use one that was previously allocated */
+ list_del(&deferfcp->req_list);
+ } else {
+ spin_unlock_irqrestore(&queue->qlock, flags);
- if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
- queue_work_on(queue->cpu, queue->work_q, &fod->work);
- else
- nvmet_fc_handle_fcp_rqst(tgtport, fod);
+ /* Now we need to dynamically allocate one */
+ deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
+ if (!deferfcp) {
+ /* release the queue lookup reference */
+ nvmet_fc_tgt_q_put(queue);
+ return -ENOMEM;
+ }
+ spin_lock_irqsave(&queue->qlock, flags);
+ }
- return 0;
+ /* For now, use rspaddr / rsplen to save payload information */
+ fcpreq->rspaddr = cmdiubuf;
+ fcpreq->rsplen = cmdiubuf_len;
+ deferfcp->fcp_req = fcpreq;
+
+ /* defer processing till a fod becomes available */
+ list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
+
+ /* NOTE: the queue lookup reference is still valid */
+
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ return -EOVERFLOW;
}
EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
@@ -2293,66 +2449,70 @@ nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
}
EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
-enum {
- FCT_TRADDR_ERR = 0,
- FCT_TRADDR_WWNN = 1 << 0,
- FCT_TRADDR_WWPN = 1 << 1,
-};
struct nvmet_fc_traddr {
u64 nn;
u64 pn;
};
-static const match_table_t traddr_opt_tokens = {
- { FCT_TRADDR_WWNN, "nn-%s" },
- { FCT_TRADDR_WWPN, "pn-%s" },
- { FCT_TRADDR_ERR, NULL }
-};
-
static int
-nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
+__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
{
- substring_t args[MAX_OPT_ARGS];
- char *options, *o, *p;
- int token, ret = 0;
u64 token64;
- options = o = kstrdup(buf, GFP_KERNEL);
- if (!options)
- return -ENOMEM;
+ if (match_u64(sstr, &token64))
+ return -EINVAL;
+ *val = token64;
- while ((p = strsep(&o, ":\n")) != NULL) {
- if (!*p)
- continue;
+ return 0;
+}
- token = match_token(p, traddr_opt_tokens, args);
- switch (token) {
- case FCT_TRADDR_WWNN:
- if (match_u64(args, &token64)) {
- ret = -EINVAL;
- goto out;
- }
- traddr->nn = token64;
- break;
- case FCT_TRADDR_WWPN:
- if (match_u64(args, &token64)) {
- ret = -EINVAL;
- goto out;
- }
- traddr->pn = token64;
- break;
- default:
- pr_warn("unknown traddr token or missing value '%s'\n",
- p);
- ret = -EINVAL;
- goto out;
- }
- }
+/*
+ * This routine validates and extracts the WWN's from the TRADDR string.
+ * As kernel parsers need the 0x to determine number base, universally
+ * build string to parse with 0x prefix before parsing name strings.
+ */
+static int
+nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
+{
+ char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
+ substring_t wwn = { name, &name[sizeof(name)-1] };
+ int nnoffset, pnoffset;
+
+ /* validate it string one of the 2 allowed formats */
+ if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
+ !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
+ "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
+ nnoffset = NVME_FC_TRADDR_OXNNLEN;
+ pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
+ NVME_FC_TRADDR_OXNNLEN;
+ } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
+ !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
+ "pn-", NVME_FC_TRADDR_NNLEN))) {
+ nnoffset = NVME_FC_TRADDR_NNLEN;
+ pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
+ } else
+ goto out_einval;
+
+ name[0] = '0';
+ name[1] = 'x';
+ name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
+
+ memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
+ goto out_einval;
+
+ memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
+ goto out_einval;
-out:
- kfree(options);
- return ret;
+ return 0;
+
+out_einval:
+ pr_warn("%s: bad traddr string\n", __func__);
+ return -EINVAL;
}
static int
@@ -2370,7 +2530,8 @@ nvmet_fc_add_port(struct nvmet_port *port)
/* map the traddr address info to a target port */
- ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
+ ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
+ sizeof(port->disc_addr.traddr));
if (ret)
return ret;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 6ce72aa65425..ab21c846eb27 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -476,7 +476,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
int i;
for (i = 0; i < nr_irqs; i++, res++)
- if (!of_irq_to_resource(dev, i, res))
+ if (of_irq_to_resource(dev, i, res) <= 0)
break;
return i;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index eda50b4be934..067f9fab7b77 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -708,6 +708,15 @@ struct device_node *of_graph_get_port_parent(struct device_node *node)
{
unsigned int depth;
+ if (!node)
+ return NULL;
+
+ /*
+ * Preserve usecount for passed in node as of_get_next_parent()
+ * will do of_node_put() on it.
+ */
+ of_node_get(node);
+
/* Walk 3 levels up only if there is 'ports' node. */
for (depth = 3; depth && node; depth--) {
node = of_get_next_parent(node);
@@ -728,12 +737,16 @@ EXPORT_SYMBOL(of_graph_get_port_parent);
struct device_node *of_graph_get_remote_port_parent(
const struct device_node *node)
{
- struct device_node *np;
+ struct device_node *np, *pp;
/* Get remote endpoint node. */
np = of_graph_get_remote_endpoint(node);
- return of_graph_get_port_parent(np);
+ pp = of_graph_get_port_parent(np);
+
+ of_node_put(np);
+
+ return pp;
}
EXPORT_SYMBOL(of_graph_get_remote_port_parent);
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 055f83fddc18..b1ff46fe4547 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -333,11 +333,11 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun
/* Update the symlink to the real device */
sysfs_remove_link(&entry->kobj, "device");
+ write_unlock(&entry->rw_lock);
+
ret = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device");
WARN_ON(ret);
- write_unlock(&entry->rw_lock);
-
printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" path to \"%s\"\n",
entry->name, buf);
@@ -954,7 +954,7 @@ static struct attribute *pdcs_subsys_attrs[] = {
NULL,
};
-static struct attribute_group pdcs_attr_group = {
+static const struct attribute_group pdcs_attr_group = {
.attrs = pdcs_subsys_attrs,
};
@@ -998,6 +998,7 @@ pdcs_register_pathentries(void)
/* kobject is now registered */
write_lock(&entry->rw_lock);
entry->ready = 2;
+ write_unlock(&entry->rw_lock);
/* Add a nice symlink to the real device */
if (entry->dev) {
@@ -1005,7 +1006,6 @@ pdcs_register_pathentries(void)
WARN_ON(err);
}
- write_unlock(&entry->rw_lock);
kobject_uevent(&entry->kobj, KOBJ_ADD);
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index af0cc3456dc1..b4b7eab29400 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4260,6 +4260,41 @@ int pci_reset_function(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_reset_function);
/**
+ * pci_reset_function_locked - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * This function does not just reset the PCI portion of a device, but
+ * clears all the state associated with the device. This function differs
+ * from __pci_reset_function() in that it saves and restores device state
+ * over the reset. It also differs from pci_reset_function() in that it
+ * requires the PCI device lock to be held.
+ *
+ * Returns 0 if the device function was successfully reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int pci_reset_function_locked(struct pci_dev *dev)
+{
+ int rc;
+
+ rc = pci_probe_reset_function(dev);
+ if (rc)
+ return rc;
+
+ pci_dev_save_and_disable(dev);
+
+ rc = __pci_reset_function_locked(dev);
+
+ pci_dev_restore(dev);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_function_locked);
+
+/**
* pci_try_reset_function - quiesce and reset a PCI device function
* @dev: PCI device to reset
*
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index dc459eb1246b..1c5e0f333779 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -569,22 +569,41 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
if (irq != other_irq) {
pr_warn("mismatched PPIs detected.\n");
err = -EINVAL;
+ goto err_out;
}
} else {
- err = request_irq(irq, handler,
- IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
+ struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu);
+ unsigned long irq_flags;
+
+ err = irq_force_affinity(irq, cpumask_of(cpu));
+
+ if (err && num_possible_cpus() > 1) {
+ pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, cpu);
+ goto err_out;
+ }
+
+ if (platdata && platdata->irq_flags) {
+ irq_flags = platdata->irq_flags;
+ } else {
+ irq_flags = IRQF_PERCPU |
+ IRQF_NOBALANCING |
+ IRQF_NO_THREAD;
+ }
+
+ err = request_irq(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&hw_events->percpu_pmu, cpu));
}
- if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
- return err;
- }
+ if (err)
+ goto err_out;
cpumask_set_cpu(cpu, &armpmu->active_irqs);
-
return 0;
+
+err_out:
+ pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
+ return err;
}
int armpmu_request_irqs(struct arm_pmu *armpmu)
@@ -628,12 +647,6 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
enable_percpu_irq(irq, IRQ_TYPE_NONE);
return 0;
}
-
- if (irq_force_affinity(irq, cpumask_of(cpu)) &&
- num_possible_cpus() > 1) {
- pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, cpu);
- }
}
return 0;
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 69255f53057a..4eafa7a42e52 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -131,8 +131,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
}
if (!pmu_has_irq_affinity(pdev->dev.of_node)) {
- pr_warn("no interrupt-affinity property for %s, guessing.\n",
- of_node_full_name(pdev->dev.of_node));
+ pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
+ pdev->dev.of_node);
}
/*
@@ -211,7 +211,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
}
if (ret) {
- pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
+ pr_info("%pOF: failed to probe PMU!\n", node);
goto out_free;
}
@@ -228,8 +228,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
out_free_irqs:
armpmu_free_irqs(pmu);
out_free:
- pr_info("%s: failed to register PMU devices!\n",
- of_node_full_name(node));
+ pr_info("%pOF: failed to register PMU devices!\n", node);
armpmu_free(pmu);
return ret;
}
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index c259848228b4..b242cce10468 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -546,6 +546,7 @@ static int l2_cache_event_init(struct perf_event *event)
}
if ((event != event->group_leader) &&
+ !is_software_event(event->group_leader) &&
(L2_EVT_GROUP(event->group_leader->attr.config) ==
L2_EVT_GROUP(event->attr.config))) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
@@ -558,6 +559,7 @@ static int l2_cache_event_init(struct perf_event *event)
list_for_each_entry(sibling, &event->group_leader->sibling_list,
group_entry) {
if ((sibling != event) &&
+ !is_software_event(sibling) &&
(L2_EVT_GROUP(sibling->attr.config) ==
L2_EVT_GROUP(event->attr.config))) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index 37371b89b14f..64fc59c3ae6d 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -30,8 +30,8 @@ config PHY_BCM_NS_USB3
tristate "Broadcom Northstar USB 3.0 PHY Driver"
depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on HAS_IOMEM && OF
+ depends on MDIO_BUS
select GENERIC_PHY
- select MDIO_DEVICE
help
Enable this to support Broadcom USB 3.0 PHY connected to the USB
controller on Northstar family.
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 20f1b4493994..04e929fd0ffe 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1548,6 +1548,13 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
},
},
{
+ .ident = "HP Chromebook 11 G5 (Setzer)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
+ },
+ },
+ {
.ident = "Acer Chromebook R11 (Cyan)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index 4d4ef42a39b5..86c4b3fab7b0 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -343,9 +343,9 @@ static const struct pinctrl_pin_desc mrfld_pins[] = {
static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 };
static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 };
-static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 };
-static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 };
-static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 };
+static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 };
+static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 };
+static const unsigned int mrfld_uart2_pins[] = { 123, 124, 125, 126 };
static const unsigned int mrfld_pwm0_pins[] = { 144 };
static const unsigned int mrfld_pwm1_pins[] = { 145 };
static const unsigned int mrfld_pwm2_pins[] = { 132 };
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index f024e25787fc..0c6d7812d6fd 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -37,7 +37,7 @@
#define IRQ_STATUS 0x10
#define IRQ_WKUP 0x18
-#define NB_FUNCS 2
+#define NB_FUNCS 3
#define GPIO_PER_REG 32
/**
@@ -126,6 +126,16 @@ struct armada_37xx_pinctrl {
.funcs = {_func1, "gpio"} \
}
+#define PIN_GRP_GPIO_3(_name, _start, _nr, _mask, _v1, _v2, _v3, _f1, _f2) \
+ { \
+ .name = _name, \
+ .start_pin = _start, \
+ .npins = _nr, \
+ .reg_mask = _mask, \
+ .val = {_v1, _v2, _v3}, \
+ .funcs = {_f1, _f2, "gpio"} \
+ }
+
#define PIN_GRP_EXTRA(_name, _start, _nr, _mask, _v1, _v2, _start2, _nr2, \
_f1, _f2) \
{ \
@@ -171,12 +181,13 @@ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"),
PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"),
PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
- PIN_GRP_EXTRA("rgmii", 6, 12, BIT(3), 0, BIT(3), 23, 1, "mii", "gpio"),
+ PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"),
PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"),
PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"),
PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"),
PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"),
- PIN_GRP("mii_col", 23, 1, BIT(8), "mii", "mii_err"),
+ PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14),
+ "mii", "mii_err"),
};
const struct armada_37xx_pin_data armada_37xx_pin_nb = {
@@ -187,7 +198,7 @@ const struct armada_37xx_pin_data armada_37xx_pin_nb = {
};
const struct armada_37xx_pin_data armada_37xx_pin_sb = {
- .nr_pins = 29,
+ .nr_pins = 30,
.name = "GPIO2",
.groups = armada_37xx_sb_groups,
.ngroups = ARRAY_SIZE(armada_37xx_sb_groups),
@@ -208,7 +219,7 @@ static int armada_37xx_get_func_reg(struct armada_37xx_pin_group *grp,
{
int f;
- for (f = 0; f < NB_FUNCS; f++)
+ for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++)
if (!strcmp(grp->funcs[f], func))
return f;
@@ -795,7 +806,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
for (j = 0; j < grp->extra_npins; j++)
grp->pins[i+j] = grp->extra_pin + j;
- for (f = 0; f < NB_FUNCS; f++) {
+ for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++) {
int ret;
/* check for unique functions and count groups */
ret = armada_37xx_add_function(info->funcs, &funcsize,
@@ -847,7 +858,7 @@ static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
struct armada_37xx_pin_group *gp = &info->groups[g];
int f;
- for (f = 0; f < NB_FUNCS; f++) {
+ for (f = 0; (f < NB_FUNCS) && gp->funcs[f]; f++) {
if (strcmp(gp->funcs[f], name) == 0) {
*groups = gp->name;
groups++;
diff --git a/drivers/pinctrl/stm32/Kconfig b/drivers/pinctrl/stm32/Kconfig
index 3b8026fca057..7e1fe39a56a5 100644
--- a/drivers/pinctrl/stm32/Kconfig
+++ b/drivers/pinctrl/stm32/Kconfig
@@ -6,29 +6,30 @@ config PINCTRL_STM32
select PINMUX
select GENERIC_PINCONF
select GPIOLIB
+ select IRQ_DOMAIN_HIERARCHY
select MFD_SYSCON
config PINCTRL_STM32F429
bool "STMicroelectronics STM32F429 pin control" if COMPILE_TEST && !MACH_STM32F429
- depends on OF && IRQ_DOMAIN_HIERARCHY
+ depends on OF
default MACH_STM32F429
select PINCTRL_STM32
config PINCTRL_STM32F469
bool "STMicroelectronics STM32F469 pin control" if COMPILE_TEST && !MACH_STM32F469
- depends on OF && IRQ_DOMAIN_HIERARCHY
+ depends on OF
default MACH_STM32F469
select PINCTRL_STM32
config PINCTRL_STM32F746
bool "STMicroelectronics STM32F746 pin control" if COMPILE_TEST && !MACH_STM32F746
- depends on OF && IRQ_DOMAIN_HIERARCHY
+ depends on OF
default MACH_STM32F746
select PINCTRL_STM32
config PINCTRL_STM32H743
bool "STMicroelectronics STM32H743 pin control" if COMPILE_TEST && !MACH_STM32H743
- depends on OF && IRQ_DOMAIN_HIERARCHY
+ depends on OF
default MACH_STM32H743
select PINCTRL_STM32
endif
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 159580c04b14..47a392bc73c8 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -918,6 +918,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD1 */
PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */
+ SUNXI_FUNCTION(0x5, "sim"), /* DET */
SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */
SUNXI_FUNCTION(0x7, "csi1")), /* D16 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
index a433a306a2d0..c75e094b2d90 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
@@ -1084,7 +1084,7 @@ static const unsigned usb1_pins[] = {182, 183};
static const int usb1_muxvals[] = {0, 0};
static const unsigned usb2_pins[] = {184, 185};
static const int usb2_muxvals[] = {0, 0};
-static const unsigned usb3_pins[] = {186, 187};
+static const unsigned usb3_pins[] = {187, 188};
static const int usb3_muxvals[] = {0, 0};
static const unsigned port_range0_pins[] = {
300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
index 787e3967bd5c..f828ee340a98 100644
--- a/drivers/pinctrl/zte/pinctrl-zx.c
+++ b/drivers/pinctrl/zte/pinctrl-zx.c
@@ -64,10 +64,8 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
struct zx_pinctrl_soc_info *info = zpctl->info;
const struct pinctrl_pin_desc *pindesc = info->pins + group_selector;
struct zx_pin_data *data = pindesc->drv_data;
- struct zx_mux_desc *mux = data->muxes;
- u32 mask = (1 << data->width) - 1;
- u32 offset = data->offset;
- u32 bitpos = data->bitpos;
+ struct zx_mux_desc *mux;
+ u32 mask, offset, bitpos;
struct function_desc *func;
unsigned long flags;
u32 val, mval;
@@ -76,6 +74,11 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
if (!data)
return -EINVAL;
+ mux = data->muxes;
+ mask = (1 << data->width) - 1;
+ offset = data->offset;
+ bitpos = data->bitpos;
+
func = pinmux_generic_get_function(pctldev, func_selector);
if (!func)
return -EINVAL;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index b04860703740..80b87954f6dd 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -675,6 +675,7 @@ config PEAQ_WMI
tristate "PEAQ 2-in-1 WMI hotkey driver"
depends on ACPI_WMI
depends on INPUT
+ select INPUT_POLLDEV
help
Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s.
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index f8978464df31..dad8f4afa17c 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -626,7 +626,7 @@ static void dell_wmi_input_destroy(struct wmi_device *wdev)
* WMI Interface Version 8 4 <version>
* WMI buffer length 12 4 4096
*/
-static int __init dell_wmi_check_descriptor_buffer(void)
+static int dell_wmi_check_descriptor_buffer(void)
{
struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
@@ -717,9 +717,15 @@ static int dell_wmi_events_set_enabled(bool enable)
static int dell_wmi_probe(struct wmi_device *wdev)
{
+ int err;
+
struct dell_wmi_priv *priv = devm_kzalloc(
&wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL);
+ err = dell_wmi_check_descriptor_buffer();
+ if (err)
+ return err;
+
dev_set_drvdata(&wdev->dev, priv);
return dell_wmi_input_setup(wdev);
@@ -749,10 +755,6 @@ static int __init dell_wmi_init(void)
{
int err;
- err = dell_wmi_check_descriptor_buffer();
- if (err)
- return err;
-
dmi_check_system(dell_wmi_smbios_list);
if (wmi_requires_smbios_request) {
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 61f106377661..480926786cb8 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -36,8 +36,8 @@ static const struct acpi_device_id intel_vbtn_ids[] = {
/* In theory, these are HID usages. */
static const struct key_entry intel_vbtn_keymap[] = {
- { KE_IGNORE, 0xC0, { KEY_POWER } }, /* power key press */
- { KE_KEY, 0xC1, { KEY_POWER } }, /* power key release */
+ { KE_KEY, 0xC0, { KEY_POWER } }, /* power key press */
+ { KE_IGNORE, 0xC1, { KEY_POWER } }, /* power key release */
{ KE_KEY, 0xC4, { KEY_VOLUMEUP } }, /* volume-up key press */
{ KE_IGNORE, 0xC5, { KEY_VOLUMEUP } }, /* volume-up key release */
{ KE_KEY, 0xC6, { KEY_VOLUMEDOWN } }, /* volume-down key press */
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 1a764e311e11..e32ba575e8d9 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -1252,12 +1252,12 @@ static int __init acpi_wmi_init(void)
return 0;
-err_unreg_class:
- class_unregister(&wmi_bus_class);
-
err_unreg_bus:
bus_unregister(&wmi_bus_type);
+err_unreg_class:
+ class_unregister(&wmi_bus_class);
+
return error;
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index b77435783ef3..7eacc1c4b3b1 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
+#include <uapi/linux/sched/types.h>
#include "ptp_private.h"
@@ -184,6 +185,19 @@ static void delete_ptp_clock(struct posix_clock *pc)
kfree(ptp);
}
+static void ptp_aux_kworker(struct kthread_work *work)
+{
+ struct ptp_clock *ptp = container_of(work, struct ptp_clock,
+ aux_work.work);
+ struct ptp_clock_info *info = ptp->info;
+ long delay;
+
+ delay = info->do_aux_work(info);
+
+ if (delay >= 0)
+ kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
+}
+
/* public interface */
struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
@@ -217,6 +231,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
mutex_init(&ptp->pincfg_mux);
init_waitqueue_head(&ptp->tsev_wq);
+ if (ptp->info->do_aux_work) {
+ char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index);
+
+ kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
+ ptp->kworker = kthread_create_worker(0, worker_name ?
+ worker_name : info->name);
+ kfree(worker_name);
+ if (IS_ERR(ptp->kworker)) {
+ err = PTR_ERR(ptp->kworker);
+ pr_err("failed to create ptp aux_worker %d\n", err);
+ goto kworker_err;
+ }
+ }
+
err = ptp_populate_pin_groups(ptp);
if (err)
goto no_pin_groups;
@@ -259,6 +287,9 @@ no_pps:
no_device:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
+ if (ptp->kworker)
+ kthread_destroy_worker(ptp->kworker);
+kworker_err:
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
ida_simple_remove(&ptp_clocks_map, index);
@@ -274,6 +305,11 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
ptp->defunct = 1;
wake_up_interruptible(&ptp->tsev_wq);
+ if (ptp->kworker) {
+ kthread_cancel_delayed_work_sync(&ptp->aux_work);
+ kthread_destroy_worker(ptp->kworker);
+ }
+
/* Release the clock's resources. */
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
@@ -339,6 +375,12 @@ int ptp_find_pin(struct ptp_clock *ptp,
}
EXPORT_SYMBOL(ptp_find_pin);
+int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
+{
+ return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
+}
+EXPORT_SYMBOL(ptp_schedule_worker);
+
/* module operations */
static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index d95888974d0c..b86f1bfecd6f 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -22,6 +22,7 @@
#include <linux/cdev.h>
#include <linux/device.h>
+#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/posix-clock.h>
#include <linux/ptp_clock.h>
@@ -56,6 +57,8 @@ struct ptp_clock {
struct attribute_group pin_attr_group;
/* 1st entry is a pointer to the real group, 2nd is NULL terminator */
const struct attribute_group *pin_attr_groups[2];
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work aux_work;
};
/*
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 7e0d4f724dda..432fc40990bd 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -559,6 +559,7 @@ static void chp_process_crw(struct crw *crw0, struct crw *crw1,
chpid.id = crw0->rsid;
switch (crw0->erc) {
case CRW_ERC_IPARM: /* Path has come. */
+ case CRW_ERC_INIT:
if (!chp_is_registered(chpid))
chp_new(chpid);
chsc_chp_online(chpid);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8975cd321390..d42e758518ed 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2512,7 +2512,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct rtable *rt = (struct rtable *) dst;
__be32 *pkey = &ip_hdr(skb)->daddr;
- if (rt->rt_gateway)
+ if (rt && rt->rt_gateway)
pkey = &rt->rt_gateway;
/* IPv4 */
@@ -2523,7 +2523,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct rt6_info *rt = (struct rt6_info *) dst;
struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
- if (!ipv6_addr_any(&rt->rt6i_gateway))
+ if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
pkey = &rt->rt6i_gateway;
/* IPv6 */
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 04efed171c88..f32765d3cbd8 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -212,8 +212,8 @@ static int d7s_probe(struct platform_device *op)
writeb(regs, p->regs);
- printk(KERN_INFO PFX "7-Segment Display%s at [%s:0x%llx] %s\n",
- op->dev.of_node->full_name,
+ printk(KERN_INFO PFX "7-Segment Display%pOF at [%s:0x%llx] %s\n",
+ op->dev.of_node,
(regs & D7S_FLIP) ? " (FLIPPED)" : "",
op->resource[0].start,
sol_compat ? "in sol_compat mode" : "");
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 216f923161d1..a610b8d3d11f 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -181,8 +181,8 @@ static int flash_probe(struct platform_device *op)
}
flash.busy = 0;
- printk(KERN_INFO "%s: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n",
- op->dev.of_node->full_name,
+ printk(KERN_INFO "%pOF: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n",
+ op->dev.of_node,
flash.read_base, flash.read_size,
flash.write_base, flash.write_size);
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 57696fc0b482..0a5013350acd 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -379,8 +379,8 @@ static int uctrl_probe(struct platform_device *op)
}
sbus_writel(UCTRL_INTR_RXNE_REQ|UCTRL_INTR_RXNE_MSK, &p->regs->uctrl_intr);
- printk(KERN_INFO "%s: uctrl regs[0x%p] (irq %d)\n",
- op->dev.of_node->full_name, p->regs, p->irq);
+ printk(KERN_INFO "%pOF: uctrl regs[0x%p] (irq %d)\n",
+ op->dev.of_node, p->regs, p->irq);
uctrl_get_event_status(p);
uctrl_get_external_status(p);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index d384f4f86c26..f4538d7a3016 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1230,6 +1230,8 @@ config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
+ depends on NVME_TARGET_FC || NVME_TARGET_FC=n
+ depends on NVME_FC || NVME_FC=n
select CRC_T10DIF
---help---
This lpfc driver supports the Emulex LightPulse
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 707ee2f5954d..4591113c49de 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -3198,10 +3198,11 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
return -EBUSY;
if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
return -EFAULT;
- if (qd.cnum == -1)
+ if (qd.cnum == -1) {
+ if (qd.id < 0 || qd.id >= dev->maximum_num_containers)
+ return -EINVAL;
qd.cnum = qd.id;
- else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
- {
+ } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) {
if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
return -EINVAL;
qd.instance = dev->scsi_host_ptr->host_no;
diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile
index 741d81861d17..07b60a780c06 100644
--- a/drivers/scsi/aic7xxx/Makefile
+++ b/drivers/scsi/aic7xxx/Makefile
@@ -55,9 +55,9 @@ aicasm-7xxx-opts-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) := \
ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y)
$(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm
- $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic7xxx_reg.h \
+ $(obj)/aicasm/aicasm -I$(srctree)/$(src) -r $(obj)/aic7xxx_reg.h \
$(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \
- $(src)/aic7xxx.seq
+ $(srctree)/$(src)/aic7xxx.seq
$(aic7xxx-gen-y): $(obj)/aic7xxx_seq.h
else
@@ -72,14 +72,14 @@ aicasm-79xx-opts-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) := \
ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y)
$(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm
- $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic79xx_reg.h \
+ $(obj)/aicasm/aicasm -I$(srctree)/$(src) -r $(obj)/aic79xx_reg.h \
$(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \
- $(src)/aic79xx.seq
+ $(srctree)/$(src)/aic79xx.seq
$(aic79xx-gen-y): $(obj)/aic79xx_seq.h
else
$(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped
endif
-$(obj)/aicasm/aicasm: $(src)/aicasm/*.[chyl]
- $(MAKE) -C $(src)/aicasm
+$(obj)/aicasm/aicasm: $(srctree)/$(src)/aicasm/*.[chyl]
+ $(MAKE) -C $(srctree)/$(src)/aicasm OUTDIR=$(shell pwd)/$(obj)/aicasm/
diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile
index b98c5c1056c3..45e2d49c1fff 100644
--- a/drivers/scsi/aic7xxx/aicasm/Makefile
+++ b/drivers/scsi/aic7xxx/aicasm/Makefile
@@ -1,19 +1,21 @@
PROG= aicasm
+OUTDIR ?= ./
+
.SUFFIXES= .l .y .c .h
CSRCS= aicasm.c aicasm_symbol.c
YSRCS= aicasm_gram.y aicasm_macro_gram.y
LSRCS= aicasm_scan.l aicasm_macro_scan.l
-GENHDRS= aicdb.h $(YSRCS:.y=.h)
-GENSRCS= $(YSRCS:.y=.c) $(LSRCS:.l=.c)
+GENHDRS= $(addprefix ${OUTDIR}/,aicdb.h $(YSRCS:.y=.h))
+GENSRCS= $(addprefix ${OUTDIR}/,$(YSRCS:.y=.c) $(LSRCS:.l=.c))
SRCS= ${CSRCS} ${GENSRCS}
LIBS= -ldb
clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG)
# Override default kernel CFLAGS. This is a userland app.
-AICASM_CFLAGS:= -I/usr/include -I.
+AICASM_CFLAGS:= -I/usr/include -I. -I$(OUTDIR)
LEX= flex
YACC= bison
YFLAGS= -d
@@ -32,22 +34,25 @@ YFLAGS+= -t -v
LFLAGS= -d
endif
-$(PROG): ${GENHDRS} $(SRCS)
- $(AICASM_CC) $(AICASM_CFLAGS) $(SRCS) -o $(PROG) $(LIBS)
+$(PROG): $(OUTDIR) ${GENHDRS} $(SRCS)
+ $(AICASM_CC) $(AICASM_CFLAGS) $(SRCS) -o $(OUTDIR)/$(PROG) $(LIBS)
+
+$(OUTDIR):
+ mkdir -p $(OUTDIR)
-aicdb.h:
+$(OUTDIR)/aicdb.h:
@if [ -e "/usr/include/db4/db_185.h" ]; then \
- echo "#include <db4/db_185.h>" > aicdb.h; \
+ echo "#include <db4/db_185.h>" > $@; \
elif [ -e "/usr/include/db3/db_185.h" ]; then \
- echo "#include <db3/db_185.h>" > aicdb.h; \
+ echo "#include <db3/db_185.h>" > $@; \
elif [ -e "/usr/include/db2/db_185.h" ]; then \
- echo "#include <db2/db_185.h>" > aicdb.h; \
+ echo "#include <db2/db_185.h>" > $@; \
elif [ -e "/usr/include/db1/db_185.h" ]; then \
- echo "#include <db1/db_185.h>" > aicdb.h; \
+ echo "#include <db1/db_185.h>" > $@; \
elif [ -e "/usr/include/db/db_185.h" ]; then \
- echo "#include <db/db_185.h>" > aicdb.h; \
+ echo "#include <db/db_185.h>" > $@; \
elif [ -e "/usr/include/db_185.h" ]; then \
- echo "#include <db_185.h>" > aicdb.h; \
+ echo "#include <db_185.h>" > $@; \
else \
echo "*** Install db development libraries"; \
fi
@@ -58,23 +63,23 @@ clean:
# Create a dependency chain in generated files
# to avoid concurrent invocations of the single
# rule that builds them all.
-aicasm_gram.c: aicasm_gram.h
-aicasm_gram.c aicasm_gram.h: aicasm_gram.y
+$(OUTDIR)/aicasm_gram.c: $(OUTDIR)/aicasm_gram.h
+$(OUTDIR)/aicasm_gram.c $(OUTDIR)/aicasm_gram.h: aicasm_gram.y
$(YACC) $(YFLAGS) -b $(<:.y=) $<
- mv $(<:.y=).tab.c $(<:.y=.c)
- mv $(<:.y=).tab.h $(<:.y=.h)
+ mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c)
+ mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h)
# Create a dependency chain in generated files
# to avoid concurrent invocations of the single
# rule that builds them all.
-aicasm_macro_gram.c: aicasm_macro_gram.h
-aicasm_macro_gram.c aicasm_macro_gram.h: aicasm_macro_gram.y
+$(OUTDIR)/aicasm_macro_gram.c: $(OUTDIR)/aicasm_macro_gram.h
+$(OUTDIR)/aicasm_macro_gram.c $(OUTDIR)/aicasm_macro_gram.h: aicasm_macro_gram.y
$(YACC) $(YFLAGS) -b $(<:.y=) -p mm $<
- mv $(<:.y=).tab.c $(<:.y=.c)
- mv $(<:.y=).tab.h $(<:.y=.h)
+ mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c)
+ mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h)
-aicasm_scan.c: aicasm_scan.l
- $(LEX) $(LFLAGS) -o$@ $<
+$(OUTDIR)/aicasm_scan.c: aicasm_scan.l
+ $(LEX) $(LFLAGS) -o $@ $<
-aicasm_macro_scan.c: aicasm_macro_scan.l
- $(LEX) $(LFLAGS) -Pmm -o$@ $<
+$(OUTDIR)/aicasm_macro_scan.c: aicasm_macro_scan.l
+ $(LEX) $(LFLAGS) -Pmm -o $@ $<
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 7dfe709a7138..6844ba361616 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2624,12 +2624,11 @@ static struct fcoe_transport bnx2fc_transport = {
};
/**
- * bnx2fc_percpu_thread_create - Create a receive thread for an
- * online CPU
+ * bnx2fc_cpu_online - Create a receive thread for an online CPU
*
* @cpu: cpu index for the online cpu
*/
-static void bnx2fc_percpu_thread_create(unsigned int cpu)
+static int bnx2fc_cpu_online(unsigned int cpu)
{
struct bnx2fc_percpu_s *p;
struct task_struct *thread;
@@ -2639,15 +2638,17 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu)
thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
(void *)p, cpu_to_node(cpu),
"bnx2fc_thread/%d", cpu);
+ if (IS_ERR(thread))
+ return PTR_ERR(thread);
+
/* bind thread to the cpu */
- if (likely(!IS_ERR(thread))) {
- kthread_bind(thread, cpu);
- p->iothread = thread;
- wake_up_process(thread);
- }
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ return 0;
}
-static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
+static int bnx2fc_cpu_offline(unsigned int cpu)
{
struct bnx2fc_percpu_s *p;
struct task_struct *thread;
@@ -2661,7 +2662,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
thread = p->iothread;
p->iothread = NULL;
-
/* Free all work in the list */
list_for_each_entry_safe(work, tmp, &p->work_list, list) {
list_del_init(&work->list);
@@ -2673,20 +2673,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
if (thread)
kthread_stop(thread);
-}
-
-
-static int bnx2fc_cpu_online(unsigned int cpu)
-{
- printk(PFX "CPU %x online: Create Rx thread\n", cpu);
- bnx2fc_percpu_thread_create(cpu);
- return 0;
-}
-
-static int bnx2fc_cpu_dead(unsigned int cpu)
-{
- printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
- bnx2fc_percpu_thread_destroy(cpu);
return 0;
}
@@ -2761,30 +2747,16 @@ static int __init bnx2fc_mod_init(void)
spin_lock_init(&p->fp_work_lock);
}
- get_online_cpus();
-
- for_each_online_cpu(cpu)
- bnx2fc_percpu_thread_create(cpu);
-
- rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "scsi/bnx2fc:online",
- bnx2fc_cpu_online, NULL);
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online",
+ bnx2fc_cpu_online, bnx2fc_cpu_offline);
if (rc < 0)
- goto stop_threads;
+ goto stop_thread;
bnx2fc_online_state = rc;
- cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead",
- NULL, bnx2fc_cpu_dead);
- put_online_cpus();
-
cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
-
return 0;
-stop_threads:
- for_each_online_cpu(cpu)
- bnx2fc_percpu_thread_destroy(cpu);
- put_online_cpus();
+stop_thread:
kthread_stop(l2_thread);
free_wq:
destroy_workqueue(bnx2fc_wq);
@@ -2803,7 +2775,6 @@ static void __exit bnx2fc_mod_exit(void)
struct fcoe_percpu_s *bg;
struct task_struct *l2_thread;
struct sk_buff *skb;
- unsigned int cpu = 0;
/*
* NOTE: Since cnic calls register_driver routine rtnl_lock,
@@ -2844,16 +2815,7 @@ static void __exit bnx2fc_mod_exit(void)
if (l2_thread)
kthread_stop(l2_thread);
- get_online_cpus();
- /* Destroy per cpu threads */
- for_each_online_cpu(cpu) {
- bnx2fc_percpu_thread_destroy(cpu);
- }
-
- cpuhp_remove_state_nocalls(bnx2fc_online_state);
- cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD);
-
- put_online_cpus();
+ cpuhp_remove_state(bnx2fc_online_state);
destroy_workqueue(bnx2fc_wq);
/*
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 913c750205ce..26de61d65a4d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
return work;
}
+/* Pending work request completion */
+static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
+{
+ unsigned int cpu = wqe % num_possible_cpus();
+ struct bnx2fc_percpu_s *fps;
+ struct bnx2fc_work *work;
+
+ fps = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&fps->fp_work_lock);
+ if (fps->iothread) {
+ work = bnx2fc_alloc_work(tgt, wqe);
+ if (work) {
+ list_add_tail(&work->list, &fps->work_list);
+ wake_up_process(fps->iothread);
+ spin_unlock_bh(&fps->fp_work_lock);
+ return;
+ }
+ }
+ spin_unlock_bh(&fps->fp_work_lock);
+ bnx2fc_process_cq_compl(tgt, wqe);
+}
+
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
{
struct fcoe_cqe *cq;
@@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
/* Unsolicited event notification */
bnx2fc_process_unsol_compl(tgt, wqe);
} else {
- /* Pending work request completion */
- struct bnx2fc_work *work = NULL;
- struct bnx2fc_percpu_s *fps = NULL;
- unsigned int cpu = wqe % num_possible_cpus();
-
- fps = &per_cpu(bnx2fc_percpu, cpu);
- spin_lock_bh(&fps->fp_work_lock);
- if (unlikely(!fps->iothread))
- goto unlock;
-
- work = bnx2fc_alloc_work(tgt, wqe);
- if (work)
- list_add_tail(&work->list,
- &fps->work_list);
-unlock:
- spin_unlock_bh(&fps->fp_work_lock);
-
- /* Pending work request completion */
- if (fps->iothread && work)
- wake_up_process(fps->iothread);
- else
- bnx2fc_process_cq_compl(tgt, wqe);
+ bnx2fc_pending_work(tgt, wqe);
num_free_sqes++;
}
cqe++;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 86afc002814c..4ebcda8d9500 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -404,12 +404,11 @@ int bnx2i_get_stats(void *handle)
/**
- * bnx2i_percpu_thread_create - Create a receive thread for an
- * online CPU
+ * bnx2i_cpu_online - Create a receive thread for an online CPU
*
* @cpu: cpu index for the online cpu
*/
-static void bnx2i_percpu_thread_create(unsigned int cpu)
+static int bnx2i_cpu_online(unsigned int cpu)
{
struct bnx2i_percpu_s *p;
struct task_struct *thread;
@@ -419,16 +418,17 @@ static void bnx2i_percpu_thread_create(unsigned int cpu)
thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
cpu_to_node(cpu),
"bnx2i_thread/%d", cpu);
+ if (IS_ERR(thread))
+ return PTR_ERR(thread);
+
/* bind thread to the cpu */
- if (likely(!IS_ERR(thread))) {
- kthread_bind(thread, cpu);
- p->iothread = thread;
- wake_up_process(thread);
- }
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ return 0;
}
-
-static void bnx2i_percpu_thread_destroy(unsigned int cpu)
+static int bnx2i_cpu_offline(unsigned int cpu)
{
struct bnx2i_percpu_s *p;
struct task_struct *thread;
@@ -451,19 +451,6 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu)
spin_unlock_bh(&p->p_work_lock);
if (thread)
kthread_stop(thread);
-}
-
-static int bnx2i_cpu_online(unsigned int cpu)
-{
- pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu);
- bnx2i_percpu_thread_create(cpu);
- return 0;
-}
-
-static int bnx2i_cpu_dead(unsigned int cpu)
-{
- pr_info("CPU %x offline: Remove Rx thread\n", cpu);
- bnx2i_percpu_thread_destroy(cpu);
return 0;
}
@@ -511,27 +498,14 @@ static int __init bnx2i_mod_init(void)
p->iothread = NULL;
}
- get_online_cpus();
-
- for_each_online_cpu(cpu)
- bnx2i_percpu_thread_create(cpu);
-
- err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "scsi/bnx2i:online",
- bnx2i_cpu_online, NULL);
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online",
+ bnx2i_cpu_online, bnx2i_cpu_offline);
if (err < 0)
- goto remove_threads;
+ goto unreg_driver;
bnx2i_online_state = err;
-
- cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead",
- NULL, bnx2i_cpu_dead);
- put_online_cpus();
return 0;
-remove_threads:
- for_each_online_cpu(cpu)
- bnx2i_percpu_thread_destroy(cpu);
- put_online_cpus();
+unreg_driver:
cnic_unregister_driver(CNIC_ULP_ISCSI);
unreg_xport:
iscsi_unregister_transport(&bnx2i_iscsi_transport);
@@ -551,7 +525,6 @@ out:
static void __exit bnx2i_mod_exit(void)
{
struct bnx2i_hba *hba;
- unsigned cpu = 0;
mutex_lock(&bnx2i_dev_lock);
while (!list_empty(&adapter_list)) {
@@ -569,14 +542,7 @@ static void __exit bnx2i_mod_exit(void)
}
mutex_unlock(&bnx2i_dev_lock);
- get_online_cpus();
-
- for_each_online_cpu(cpu)
- bnx2i_percpu_thread_destroy(cpu);
-
- cpuhp_remove_state_nocalls(bnx2i_online_state);
- cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD);
- put_online_cpus();
+ cpuhp_remove_state(bnx2i_online_state);
iscsi_unregister_transport(&bnx2i_iscsi_transport);
cnic_unregister_driver(CNIC_ULP_ISCSI);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index e4c83b7c96a8..1a4cfa562a60 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2128,6 +2128,13 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
+ if (!tcp_task || !tdata || (tcp_task->dd_data != tdata)) {
+ pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
+ task, task->sc, tcp_task,
+ tcp_task ? tcp_task->dd_data : NULL, tdata);
+ return;
+ }
+
log_debug(1 << CXGBI_DBG_ISCSI,
"task 0x%p, skb 0x%p, itt 0x%x.\n",
task, tdata->skb, task->hdr_itt);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 8914eab84337..4f7cdb28bd38 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -938,7 +938,7 @@ static struct scsi_host_template hpsa_driver_template = {
#endif
.sdev_attrs = hpsa_sdev_attrs,
.shost_attrs = hpsa_shost_attrs,
- .max_sectors = 8192,
+ .max_sectors = 1024,
.no_write_same = 1,
};
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4ed48ed38e79..7ee1a94c0b33 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP: Rcv %08x Release %08x Drop %08x\n",
+ "FCP: Rcv %08x Defer %08x Release %08x "
+ "Drop %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_defer),
atomic_read(&tgtp->xmt_fcp_release),
atomic_read(&tgtp->rcv_fcp_cmd_drop));
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 5cc8b0f7d885..744f3f395b64 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf + len, size - len,
- "FCP: Rcv %08x Drop %08x\n",
+ "FCP: Rcv %08x Defer %08x Release %08x "
+ "Drop %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_defer),
+ atomic_read(&tgtp->xmt_fcp_release),
atomic_read(&tgtp->rcv_fcp_cmd_drop));
if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index fbeec344c6cc..bbbd0f84160d 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
}
+static void
+lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *rsp)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct lpfc_nvmet_rcv_ctx *ctxp =
+ container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+ struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
+ struct lpfc_hba *phba = ctxp->phba;
+
+ lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
+ ctxp->oxid, ctxp->size, smp_processor_id());
+
+ tgtp = phba->targetport->private;
+ atomic_inc(&tgtp->rcv_fcp_cmd_defer);
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+}
+
static struct nvmet_fc_target_template lpfc_tgttemplate = {
.targetport_delete = lpfc_nvmet_targetport_delete,
.xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
.fcp_op = lpfc_nvmet_xmt_fcp_op,
.fcp_abort = lpfc_nvmet_xmt_fcp_abort,
.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
+ .defer_rcv = lpfc_nvmet_defer_rcv,
.max_hw_queues = 1,
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
return;
}
+ /* Processing of FCP command is deferred */
+ if (rc == -EOVERFLOW) {
+ lpfc_nvmeio_data(phba,
+ "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
+ oxid, size, sid);
+ /* defer reposting rcv buffer till .defer_rcv callback */
+ ctxp->rqb_buffer = nvmebuf;
+ atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ return;
+ }
+
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index e675ef17be08..48a76788b003 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport {
atomic_t rcv_fcp_cmd_in;
atomic_t rcv_fcp_cmd_out;
atomic_t rcv_fcp_cmd_drop;
+ atomic_t rcv_fcp_cmd_defer;
atomic_t xmt_fcp_release;
/* Stats counters - lpfc_nvmet_xmt_fcp_op */
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f990ab4d45e1..985510628f56 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -425,7 +425,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
int
megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
{
- u32 max_mpt_cmd, i;
+ u32 max_mpt_cmd, i, j;
struct fusion_context *fusion;
fusion = instance->ctrl_context;
@@ -450,11 +450,15 @@ megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
GFP_KERNEL);
if (!fusion->cmd_list[i]) {
+ for (j = 0; j < i; j++)
+ kfree(fusion->cmd_list[j]);
+ kfree(fusion->cmd_list);
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
}
+
return 0;
}
int
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 4d038926a455..351f06dfc5a0 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -528,7 +528,8 @@ struct fip_vlan {
#define QEDF_WRITE (1 << 0)
#define MAX_FIBRE_LUNS 0xffffffff
-#define QEDF_MAX_NUM_CQS 8
+#define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \
+ num_online_cpus())
/*
* PCI function probe defines
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 7786c97e033f..1d13c9ca517d 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2760,11 +2760,9 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
* we allocation is the minimum off:
*
* Number of CPUs
- * Number of MSI-X vectors
- * Max number allocated in hardware (QEDF_MAX_NUM_CQS)
+ * Number allocated by qed for our PCI function
*/
- qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS,
- num_online_cpus());
+ qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
qedf->num_queues);
@@ -2962,6 +2960,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
goto err1;
}
+ /* Learn information crucial for qedf to progress */
+ rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
+ goto err1;
+ }
+
/* queue allocation code should come here
* order should be
* slowpath_start
@@ -2977,13 +2982,6 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
}
qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
- /* Learn information crucial for qedf to progress */
- rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
- if (rc) {
- QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
- goto err1;
- }
-
/* Record BDQ producer doorbell addresses */
qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
index 21331453db7b..2ff753ce6e27 100644
--- a/drivers/scsi/qedi/Kconfig
+++ b/drivers/scsi/qedi/Kconfig
@@ -5,6 +5,7 @@ config QEDI
select SCSI_ISCSI_ATTRS
select QED_LL2
select QED_ISCSI
+ select ISCSI_BOOT_SYSFS
---help---
This driver supports iSCSI offload for the QLogic FastLinQ
41000 Series Converged Network Adapters.
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 80edd28b635f..37da9a8b43b1 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -824,7 +824,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
u32 iscsi_cid = QEDI_CID_RESERVED;
u16 len = 0;
char *buf = NULL;
- int ret;
+ int ret, tmp;
if (!shost) {
ret = -ENXIO;
@@ -940,10 +940,10 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
ep_rel_conn:
qedi->ep_tbl[iscsi_cid] = NULL;
- ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
- if (ret)
+ tmp = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
+ if (tmp)
QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n",
- ret);
+ tmp);
ep_free_sq:
qedi_free_sq(qedi, qedi_ep);
ep_conn_exit:
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index b20da0d27ad7..3f82ea1b72dc 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -500,7 +500,6 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
- unsigned long flags;
/*
* Ensure that the complete FCP WRITE payload has been received.
@@ -508,17 +507,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
*/
cmd->cmd_in_wq = 0;
- spin_lock_irqsave(&cmd->cmd_lock, flags);
- cmd->data_work = 1;
- if (cmd->aborted) {
- cmd->data_work_free = 1;
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-
- tcm_qla2xxx_free_cmd(cmd);
- return;
- }
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-
cmd->qpair->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {
/*
@@ -765,31 +753,13 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
qlt_xmit_tm_rsp(mcmd);
}
-#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free)
static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
- unsigned long flags;
if (qlt_abort_cmd(cmd))
return;
-
- spin_lock_irqsave(&cmd->cmd_lock, flags);
- if ((cmd->state == QLA_TGT_STATE_NEW)||
- ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
- DATA_WORK_NOT_FREE(cmd))) {
- cmd->data_work_free = 1;
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- /*
- * cmd has not reached fw, Use this trigger to free it.
- */
- tcm_qla2xxx_free_cmd(cmd);
- return;
- }
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- return;
-
}
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 7e24aa30c3b0..892fbd9800d9 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1286,7 +1286,7 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
- if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING | FC_VPORT_DELETING)) {
spin_unlock_irqrestore(shost->host_lock, flags);
return -EBUSY;
}
@@ -2430,8 +2430,10 @@ fc_remove_host(struct Scsi_Host *shost)
spin_lock_irqsave(shost->host_lock, flags);
/* Remove any vports */
- list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers)
+ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
+ vport->flags |= FC_VPORT_DELETING;
fc_queue_work(shost, &vport->vport_delete_work);
+ }
/* Remove any remote ports */
list_for_each_entry_safe(rport, next_rport,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 1e82d4128a84..d7ff71e0c85c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -751,32 +751,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
return count;
}
-static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
-{
- switch (hp->dxfer_direction) {
- case SG_DXFER_NONE:
- if (hp->dxferp || hp->dxfer_len > 0)
- return false;
- return true;
- case SG_DXFER_FROM_DEV:
- if (hp->dxfer_len < 0)
- return false;
- return true;
- case SG_DXFER_TO_DEV:
- case SG_DXFER_TO_FROM_DEV:
- if (!hp->dxferp || hp->dxfer_len == 0)
- return false;
- return true;
- case SG_DXFER_UNKNOWN:
- if ((!hp->dxferp && hp->dxfer_len) ||
- (hp->dxferp && hp->dxfer_len == 0))
- return false;
- return true;
- default:
- return false;
- }
-}
-
static int
sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking)
@@ -797,7 +771,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
- if (!sg_is_valid_dxfer(hp))
+ if (hp->dxfer_len >= SZ_256M)
return -EINVAL;
k = sg_start_req(srp, cmnd);
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 07ec8a8877de..e164ffade38a 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -690,7 +690,7 @@ struct pqi_config_table_heartbeat {
#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0)
#define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32
-#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
+#define PQI_MAX_TRANSFER_SIZE (1024U * 1024U)
#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U)
#define RAID_MAP_MAX_ENTRIES 1024
diff --git a/drivers/soc/zte/Kconfig b/drivers/soc/zte/Kconfig
index 20bde38ce2f9..e9d750c510cd 100644
--- a/drivers/soc/zte/Kconfig
+++ b/drivers/soc/zte/Kconfig
@@ -2,6 +2,7 @@
# ZTE SoC drivers
#
menuconfig SOC_ZTE
+ depends on ARCH_ZX || COMPILE_TEST
bool "ZTE SoC driver support"
if SOC_ZTE
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index ca11be21f64b..34ca7823255d 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2396,6 +2396,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
continue;
}
+ set_current_state(TASK_RUNNING);
wp = async->buf_write_ptr;
n1 = min(n, async->prealloc_bufsz - wp);
n2 = n - n1;
@@ -2528,6 +2529,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
}
continue;
}
+
+ set_current_state(TASK_RUNNING);
rp = async->buf_read_ptr;
n1 = min(n, async->prealloc_bufsz - rp);
n2 = n - n1;
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index a6a8393d6664..3e00df74b18c 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -472,7 +472,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad2s1210_state *st = iio_priv(indio_dev);
- bool negative;
+ u16 negative;
int ret = 0;
u16 pos;
s16 vel;
diff --git a/drivers/staging/media/atomisp/i2c/ap1302.h b/drivers/staging/media/atomisp/i2c/ap1302.h
index 9341232c580d..4d0b181a9671 100644
--- a/drivers/staging/media/atomisp/i2c/ap1302.h
+++ b/drivers/staging/media/atomisp/i2c/ap1302.h
@@ -158,8 +158,8 @@ struct ap1302_res_struct {
};
struct ap1302_context_res {
- s32 res_num;
- s32 cur_res;
+ u32 res_num;
+ u32 cur_res;
struct ap1302_res_struct *res_table;
};
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.h b/drivers/staging/media/atomisp/i2c/gc0310.h
index f31eb277f542..7d8a0aeecb6c 100644
--- a/drivers/staging/media/atomisp/i2c/gc0310.h
+++ b/drivers/staging/media/atomisp/i2c/gc0310.h
@@ -454,6 +454,6 @@ struct gc0310_resolution gc0310_res_video[] = {
#define N_RES_VIDEO (ARRAY_SIZE(gc0310_res_video))
static struct gc0310_resolution *gc0310_res = gc0310_res_preview;
-static int N_RES = N_RES_PREVIEW;
+static unsigned long N_RES = N_RES_PREVIEW;
#endif
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.h b/drivers/staging/media/atomisp/i2c/gc2235.h
index ccbc757045a5..7c3d994180cc 100644
--- a/drivers/staging/media/atomisp/i2c/gc2235.h
+++ b/drivers/staging/media/atomisp/i2c/gc2235.h
@@ -668,5 +668,5 @@ struct gc2235_resolution gc2235_res_video[] = {
#define N_RES_VIDEO (ARRAY_SIZE(gc2235_res_video))
static struct gc2235_resolution *gc2235_res = gc2235_res_preview;
-static int N_RES = N_RES_PREVIEW;
+static unsigned long N_RES = N_RES_PREVIEW;
#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.h b/drivers/staging/media/atomisp/i2c/imx/imx.h
index 36b3f3a5a41f..41b4133ca995 100644
--- a/drivers/staging/media/atomisp/i2c/imx/imx.h
+++ b/drivers/staging/media/atomisp/i2c/imx/imx.h
@@ -480,7 +480,7 @@ struct imx_device {
struct imx_vcm *vcm_driver;
struct imx_otp *otp_driver;
const struct imx_resolution *curr_res_table;
- int entries_curr_table;
+ unsigned long entries_curr_table;
const struct firmware *fw;
struct imx_reg_addr *reg_addr;
const struct imx_reg *param_hold;
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h
index 944fe8e3bcbf..ab8907e6c9ef 100644
--- a/drivers/staging/media/atomisp/i2c/ov2680.h
+++ b/drivers/staging/media/atomisp/i2c/ov2680.h
@@ -934,7 +934,6 @@ static struct ov2680_resolution ov2680_res_video[] = {
#define N_RES_VIDEO (ARRAY_SIZE(ov2680_res_video))
static struct ov2680_resolution *ov2680_res = ov2680_res_preview;
-static int N_RES = N_RES_PREVIEW;
-
+static unsigned long N_RES = N_RES_PREVIEW;
#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov2722.h b/drivers/staging/media/atomisp/i2c/ov2722.h
index b0d40965d89e..73ecb1679718 100644
--- a/drivers/staging/media/atomisp/i2c/ov2722.h
+++ b/drivers/staging/media/atomisp/i2c/ov2722.h
@@ -1263,5 +1263,5 @@ struct ov2722_resolution ov2722_res_video[] = {
#define N_RES_VIDEO (ARRAY_SIZE(ov2722_res_video))
static struct ov2722_resolution *ov2722_res = ov2722_res_preview;
-static int N_RES = N_RES_PREVIEW;
+static unsigned long N_RES = N_RES_PREVIEW;
#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
index d88ac1777d86..8c2e6794463b 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
@@ -1377,5 +1377,5 @@ struct ov5693_resolution ov5693_res_video[] = {
#define N_RES_VIDEO (ARRAY_SIZE(ov5693_res_video))
static struct ov5693_resolution *ov5693_res = ov5693_res_preview;
-static int N_RES = N_RES_PREVIEW;
+static unsigned long N_RES = N_RES_PREVIEW;
#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov8858.h b/drivers/staging/media/atomisp/i2c/ov8858.h
index 9be6a0e63861..d3fde200c013 100644
--- a/drivers/staging/media/atomisp/i2c/ov8858.h
+++ b/drivers/staging/media/atomisp/i2c/ov8858.h
@@ -266,7 +266,7 @@ struct ov8858_device {
const struct ov8858_reg *regs;
struct ov8858_vcm *vcm_driver;
const struct ov8858_resolution *curr_res_table;
- int entries_curr_table;
+ unsigned long entries_curr_table;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_ctrl *run_mode;
diff --git a/drivers/staging/media/atomisp/i2c/ov8858_btns.h b/drivers/staging/media/atomisp/i2c/ov8858_btns.h
index 09e3cdc1a394..f9a3cf8fbf1a 100644
--- a/drivers/staging/media/atomisp/i2c/ov8858_btns.h
+++ b/drivers/staging/media/atomisp/i2c/ov8858_btns.h
@@ -266,7 +266,7 @@ struct ov8858_device {
const struct ov8858_reg *regs;
struct ov8858_vcm *vcm_driver;
const struct ov8858_resolution *curr_res_table;
- int entries_curr_table;
+ unsigned long entries_curr_table;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_ctrl *run_mode;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
index 726eaa293c55..2bd98f0667ec 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/Makefile
+++ b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
@@ -354,7 +354,9 @@ ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
# HACK! While this driver is in bad shape, don't enable several warnings
# that would be otherwise enabled with W=1
-ccflags-y += -Wno-unused-const-variable -Wno-missing-prototypes \
- -Wno-unused-but-set-variable -Wno-missing-declarations \
- -Wno-suggest-attribute=format -Wno-missing-prototypes \
- -Wno-implicit-fallthrough
+ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
+ccflags-y += $(call cc-disable-warning, missing-prototypes)
+ccflags-y += $(call cc-disable-warning, missing-declarations)
+ccflags-y += $(call cc-disable-warning, suggest-attribute=format)
+ccflags-y += $(call cc-disable-warning, unused-const-variable)
+ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
index d3667132851b..c8e0c4fe3717 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
@@ -275,7 +275,7 @@ struct atomisp_device {
*/
struct mutex streamoff_mutex;
- int input_cnt;
+ unsigned int input_cnt;
struct atomisp_input_subdev inputs[ATOM_ISP_MAX_INPUTS];
struct v4l2_subdev *flash;
struct v4l2_subdev *motor;
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index 370ecb959543..f28916ea69f1 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -1,7 +1,7 @@
/*
* cxd2099.c: Driver for the CXD2099AR Common Interface Controller
*
- * Copyright (C) 2010-2011 Digital Devices GmbH
+ * Copyright (C) 2010-2013 Digital Devices GmbH
*
*
* This program is free software; you can redistribute it and/or
@@ -33,7 +33,10 @@
#include "cxd2099.h"
-#define MAX_BUFFER_SIZE 248
+/* comment this line to deactivate the cxd2099ar buffer mode */
+#define BUFFER_MODE 1
+
+static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount);
struct cxd {
struct dvb_ca_en50221 en;
@@ -48,6 +51,7 @@ struct cxd {
int mode;
int ready;
int dr;
+ int write_busy;
int slot_stat;
u8 amem[1024];
@@ -55,6 +59,9 @@ struct cxd {
int cammode;
struct mutex lock;
+
+ u8 rbuf[1028];
+ u8 wbuf[1028];
};
static int i2c_write_reg(struct i2c_adapter *adapter, u8 adr,
@@ -73,7 +80,7 @@ static int i2c_write_reg(struct i2c_adapter *adapter, u8 adr,
}
static int i2c_write(struct i2c_adapter *adapter, u8 adr,
- u8 *data, u8 len)
+ u8 *data, u16 len)
{
struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = data, .len = len};
@@ -100,12 +107,12 @@ static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr,
}
static int i2c_read(struct i2c_adapter *adapter, u8 adr,
- u8 reg, u8 *data, u8 n)
+ u8 reg, u8 *data, u16 n)
{
struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
- .buf = &reg, .len = 1},
- {.addr = adr, .flags = I2C_M_RD,
- .buf = data, .len = n} };
+ .buf = &reg, .len = 1},
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = data, .len = n} };
if (i2c_transfer(adapter, msgs, 2) != 2) {
dev_err(&adapter->dev, "error in i2c_read\n");
@@ -114,14 +121,26 @@ static int i2c_read(struct i2c_adapter *adapter, u8 adr,
return 0;
}
-static int read_block(struct cxd *ci, u8 adr, u8 *data, u8 n)
+static int read_block(struct cxd *ci, u8 adr, u8 *data, u16 n)
{
- int status;
+ int status = 0;
- status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr);
+ if (ci->lastaddress != adr)
+ status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr);
if (!status) {
ci->lastaddress = adr;
- status = i2c_read(ci->i2c, ci->cfg.adr, 1, data, n);
+
+ while (n) {
+ int len = n;
+
+ if (ci->cfg.max_i2c && (len > ci->cfg.max_i2c))
+ len = ci->cfg.max_i2c;
+ status = i2c_read(ci->i2c, ci->cfg.adr, 1, data, len);
+ if (status)
+ return status;
+ data += len;
+ n -= len;
+ }
}
return status;
}
@@ -182,16 +201,16 @@ static int write_io(struct cxd *ci, u16 address, u8 val)
static int write_regm(struct cxd *ci, u8 reg, u8 val, u8 mask)
{
- int status;
+ int status = 0;
- status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, reg);
+ if (ci->lastaddress != reg)
+ status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, reg);
if (!status && reg >= 6 && reg <= 8 && mask != 0xff)
status = i2c_read_reg(ci->i2c, ci->cfg.adr, 1, &ci->regs[reg]);
+ ci->lastaddress = reg;
ci->regs[reg] = (ci->regs[reg] & (~mask)) | val;
- if (!status) {
- ci->lastaddress = reg;
+ if (!status)
status = i2c_write_reg(ci->i2c, ci->cfg.adr, 1, ci->regs[reg]);
- }
if (reg == 0x20)
ci->regs[reg] &= 0x7f;
return status;
@@ -203,16 +222,29 @@ static int write_reg(struct cxd *ci, u8 reg, u8 val)
}
#ifdef BUFFER_MODE
-static int write_block(struct cxd *ci, u8 adr, u8 *data, int n)
+static int write_block(struct cxd *ci, u8 adr, u8 *data, u16 n)
{
- int status;
- u8 buf[256] = {1};
-
- status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr);
- if (!status) {
- ci->lastaddress = adr;
- memcpy(buf + 1, data, n);
- status = i2c_write(ci->i2c, ci->cfg.adr, buf, n + 1);
+ int status = 0;
+ u8 *buf = ci->wbuf;
+
+ if (ci->lastaddress != adr)
+ status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr);
+ if (status)
+ return status;
+
+ ci->lastaddress = adr;
+ buf[0] = 1;
+ while (n) {
+ int len = n;
+
+ if (ci->cfg.max_i2c && (len + 1 > ci->cfg.max_i2c))
+ len = ci->cfg.max_i2c - 1;
+ memcpy(buf + 1, data, len);
+ status = i2c_write(ci->i2c, ci->cfg.adr, buf, len + 1);
+ if (status)
+ return status;
+ n -= len;
+ data += len;
}
return status;
}
@@ -238,6 +270,8 @@ static void set_mode(struct cxd *ci, int mode)
static void cam_mode(struct cxd *ci, int mode)
{
+ u8 dummy;
+
if (mode == ci->cammode)
return;
@@ -246,16 +280,15 @@ static void cam_mode(struct cxd *ci, int mode)
write_regm(ci, 0x20, 0x80, 0x80);
break;
case 0x01:
-#ifdef BUFFER_MODE
if (!ci->en.read_data)
return;
+ ci->write_busy = 0;
dev_info(&ci->i2c->dev, "enable cam buffer mode\n");
- /* write_reg(ci, 0x0d, 0x00); */
- /* write_reg(ci, 0x0e, 0x01); */
+ write_reg(ci, 0x0d, 0x00);
+ write_reg(ci, 0x0e, 0x01);
write_regm(ci, 0x08, 0x40, 0x40);
- /* read_reg(ci, 0x12, &dummy); */
+ read_reg(ci, 0x12, &dummy);
write_regm(ci, 0x08, 0x80, 0x80);
-#endif
break;
default:
break;
@@ -325,7 +358,10 @@ static int init(struct cxd *ci)
if (status < 0)
break;
- if (ci->cfg.clock_mode) {
+ if (ci->cfg.clock_mode == 2) {
+ /* bitrate*2^13/ 72000 */
+ u32 reg = ((ci->cfg.bitrate << 13) + 71999) / 72000;
+
if (ci->cfg.polarity) {
status = write_reg(ci, 0x09, 0x6f);
if (status < 0)
@@ -335,6 +371,25 @@ static int init(struct cxd *ci)
if (status < 0)
break;
}
+ status = write_reg(ci, 0x20, 0x08);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x21, (reg >> 8) & 0xff);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x22, reg & 0xff);
+ if (status < 0)
+ break;
+ } else if (ci->cfg.clock_mode == 1) {
+ if (ci->cfg.polarity) {
+ status = write_reg(ci, 0x09, 0x6f); /* D */
+ if (status < 0)
+ break;
+ } else {
+ status = write_reg(ci, 0x09, 0x6d);
+ if (status < 0)
+ break;
+ }
status = write_reg(ci, 0x20, 0x68);
if (status < 0)
break;
@@ -346,7 +401,7 @@ static int init(struct cxd *ci)
break;
} else {
if (ci->cfg.polarity) {
- status = write_reg(ci, 0x09, 0x4f);
+ status = write_reg(ci, 0x09, 0x4f); /* C */
if (status < 0)
break;
} else {
@@ -354,7 +409,6 @@ static int init(struct cxd *ci)
if (status < 0)
break;
}
-
status = write_reg(ci, 0x20, 0x28);
if (status < 0)
break;
@@ -401,7 +455,6 @@ static int read_attribute_mem(struct dvb_ca_en50221 *ca,
set_mode(ci, 1);
read_pccard(ci, address, &val, 1);
mutex_unlock(&ci->lock);
- /* printk(KERN_INFO "%02x:%02x\n", address,val); */
return val;
}
@@ -446,6 +499,9 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
{
struct cxd *ci = ca->data;
+ if (ci->cammode)
+ read_data(ca, slot, ci->rbuf, 0);
+
mutex_lock(&ci->lock);
cam_mode(ci, 0);
write_reg(ci, 0x00, 0x21);
@@ -465,7 +521,6 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
}
}
mutex_unlock(&ci->lock);
- /* msleep(500); */
return 0;
}
@@ -474,11 +529,19 @@ static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
struct cxd *ci = ca->data;
dev_info(&ci->i2c->dev, "%s\n", __func__);
+ if (ci->cammode)
+ read_data(ca, slot, ci->rbuf, 0);
mutex_lock(&ci->lock);
+ write_reg(ci, 0x00, 0x21);
+ write_reg(ci, 0x06, 0x1F);
+ msleep(300);
+
write_regm(ci, 0x09, 0x08, 0x08);
write_regm(ci, 0x20, 0x80, 0x80); /* Reset CAM Mode */
write_regm(ci, 0x06, 0x07, 0x07); /* Clear IO Mode */
+
ci->mode = -1;
+ ci->write_busy = 0;
mutex_unlock(&ci->lock);
return 0;
}
@@ -490,9 +553,7 @@ static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
mutex_lock(&ci->lock);
write_regm(ci, 0x09, 0x00, 0x08);
set_mode(ci, 0);
-#ifdef BUFFER_MODE
cam_mode(ci, 1);
-#endif
mutex_unlock(&ci->lock);
return 0;
}
@@ -506,12 +567,10 @@ static int campoll(struct cxd *ci)
return 0;
write_reg(ci, 0x05, istat);
- if (istat & 0x40) {
+ if (istat & 0x40)
ci->dr = 1;
- dev_info(&ci->i2c->dev, "DR\n");
- }
if (istat & 0x20)
- dev_info(&ci->i2c->dev, "WC\n");
+ ci->write_busy = 0;
if (istat & 2) {
u8 slotstat;
@@ -519,7 +578,8 @@ static int campoll(struct cxd *ci)
read_reg(ci, 0x01, &slotstat);
if (!(2 & slotstat)) {
if (!ci->slot_stat) {
- ci->slot_stat = DVB_CA_EN50221_POLL_CAM_PRESENT;
+ ci->slot_stat |=
+ DVB_CA_EN50221_POLL_CAM_PRESENT;
write_regm(ci, 0x03, 0x08, 0x08);
}
@@ -531,8 +591,8 @@ static int campoll(struct cxd *ci)
ci->ready = 0;
}
}
- if (istat & 8 &&
- ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) {
+ if ((istat & 8) &&
+ (ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT)) {
ci->ready = 1;
ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY;
}
@@ -553,7 +613,6 @@ static int poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
return ci->slot_stat;
}
-#ifdef BUFFER_MODE
static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
{
struct cxd *ci = ca->data;
@@ -564,30 +623,38 @@ static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
campoll(ci);
mutex_unlock(&ci->lock);
- dev_info(&ci->i2c->dev, "%s\n", __func__);
if (!ci->dr)
return 0;
mutex_lock(&ci->lock);
read_reg(ci, 0x0f, &msb);
read_reg(ci, 0x10, &lsb);
- len = (msb << 8) | lsb;
+ len = ((u16)msb << 8) | lsb;
+ if (len > ecount || len < 2) {
+ /* read it anyway or cxd may hang */
+ read_block(ci, 0x12, ci->rbuf, len);
+ mutex_unlock(&ci->lock);
+ return -EIO;
+ }
read_block(ci, 0x12, ebuf, len);
ci->dr = 0;
mutex_unlock(&ci->lock);
-
return len;
}
+#ifdef BUFFER_MODE
+
static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
{
struct cxd *ci = ca->data;
+ if (ci->write_busy)
+ return -EAGAIN;
mutex_lock(&ci->lock);
- dev_info(&ci->i2c->dev, "%s %d\n", __func__, ecount);
write_reg(ci, 0x0d, ecount >> 8);
write_reg(ci, 0x0e, ecount & 0xff);
write_block(ci, 0x11, ebuf, ecount);
+ ci->write_busy = 1;
mutex_unlock(&ci->lock);
return ecount;
}
diff --git a/drivers/staging/media/cxd2099/cxd2099.h b/drivers/staging/media/cxd2099/cxd2099.h
index 0eb607c5b423..f4b29b1d6eb8 100644
--- a/drivers/staging/media/cxd2099/cxd2099.h
+++ b/drivers/staging/media/cxd2099/cxd2099.h
@@ -30,8 +30,10 @@
struct cxd2099_cfg {
u32 bitrate;
u8 adr;
- u8 polarity:1;
- u8 clock_mode:1;
+ u8 polarity;
+ u8 clock_mode;
+
+ u32 max_i2c;
};
#if defined(CONFIG_DVB_CXD2099) || \
diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/staging/vboxvideo/vbox_fb.c
index bf6635826159..c1572843003a 100644
--- a/drivers/staging/vboxvideo/vbox_fb.c
+++ b/drivers/staging/vboxvideo/vbox_fb.c
@@ -343,7 +343,7 @@ void vbox_fbdev_fini(struct drm_device *dev)
vbox_bo_unpin(bo);
vbox_bo_unreserve(bo);
}
- drm_gem_object_unreference_unlocked(afb->obj);
+ drm_gem_object_put_unlocked(afb->obj);
afb->obj = NULL;
}
drm_fb_helper_fini(&fbdev->helper);
diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/staging/vboxvideo/vbox_main.c
index d0c6ec75a3c7..80bd039fa08e 100644
--- a/drivers/staging/vboxvideo/vbox_main.c
+++ b/drivers/staging/vboxvideo/vbox_main.c
@@ -40,7 +40,7 @@ static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
if (vbox_fb->obj)
- drm_gem_object_unreference_unlocked(vbox_fb->obj);
+ drm_gem_object_put_unlocked(vbox_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
@@ -198,7 +198,7 @@ static struct drm_framebuffer *vbox_user_framebuffer_create(
err_free_vbox_fb:
kfree(vbox_fb);
err_unref_obj:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
@@ -472,7 +472,7 @@ int vbox_dumb_create(struct drm_file *file,
return ret;
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret)
return ret;
@@ -525,7 +525,7 @@ vbox_dumb_mmap_offset(struct drm_file *file,
bo = gem_to_vbox_bo(obj);
*offset = vbox_bo_mmap_offset(bo);
- drm_gem_object_unreference(obj);
+ drm_gem_object_put(obj);
ret = 0;
out_unlock:
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
index 996da1c79158..e5b6383984e7 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -812,7 +812,7 @@ out_unmap_bo:
out_unreserve_bo:
vbox_bo_unreserve(bo);
out_unref_obj:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index e583dd8a418b..d4fa41be80f9 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -1510,11 +1510,13 @@ cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
if (!cnp) {
pr_info("%s stid %d lookup failure\n", __func__, stid);
- return;
+ goto rel_skb;
}
cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
cxgbit_put_cnp(cnp);
+rel_skb:
+ __kfree_skb(skb);
}
static void
@@ -1530,11 +1532,13 @@ cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
if (!cnp) {
pr_info("%s stid %d lookup failure\n", __func__, stid);
- return;
+ goto rel_skb;
}
cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
cxgbit_put_cnp(cnp);
+rel_skb:
+ __kfree_skb(skb);
}
static void
@@ -1819,12 +1823,16 @@ static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
struct tid_info *t = lldi->tids;
csk = lookup_tid(t, tid);
- if (unlikely(!csk))
+ if (unlikely(!csk)) {
pr_err("can't find connection for tid %u.\n", tid);
- else
+ goto rel_skb;
+ } else {
cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
+ }
cxgbit_put_csk(csk);
+rel_skb:
+ __kfree_skb(skb);
}
static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index dda13f1af38e..514986b57c2d 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -827,7 +827,7 @@ cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
static void
cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
- unsigned int nents)
+ unsigned int nents, u32 skip)
{
struct skb_seq_state st;
const u8 *buf;
@@ -846,7 +846,7 @@ cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
}
consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
- buf_len, consumed);
+ buf_len, skip + consumed);
}
}
@@ -912,7 +912,7 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
- cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents);
+ cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
}
cmd->write_data_done += pdu_cb->dlen;
@@ -1069,11 +1069,13 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
cmd->se_cmd.data_length);
if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
+ u32 skip = data_offset % PAGE_SIZE;
+
sg_off = data_offset / PAGE_SIZE;
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
- sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE));
+ sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE));
- cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents);
+ cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
}
check_payload:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 74e4975dd1b1..5001261f5d69 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -418,6 +418,7 @@ int iscsit_reset_np_thread(
return 0;
}
np->np_thread_state = ISCSI_NP_THREAD_RESET;
+ atomic_inc(&np->np_reset_count);
if (np->np_thread) {
spin_unlock_bh(&np->np_thread_lock);
@@ -2167,6 +2168,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
cmd->data_direction = DMA_NONE;
+ kfree(cmd->text_in_ptr);
cmd->text_in_ptr = NULL;
return 0;
@@ -3487,9 +3489,9 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
return text_length;
if (completed) {
- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
} else {
- hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE;
+ hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
cmd->read_data_done += text_length;
if (cmd->targ_xfer_tag == 0xFFFFFFFF)
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index e9bdc8b86e7d..dc13afbd4c88 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1243,9 +1243,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
flush_signals(current);
spin_lock_bh(&np->np_thread_lock);
- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
+ return 1;
} else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
spin_unlock_bh(&np->np_thread_lock);
goto exit;
@@ -1278,7 +1280,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto exit;
} else if (rc < 0) {
spin_lock_bh(&np->np_thread_lock);
- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
iscsit_put_transport(conn->conn_transport);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 36913734c6bc..02e8a5d86658 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -364,7 +364,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
mutex_lock(&tpg->acl_node_mutex);
if (acl->dynamic_node_acl)
acl->dynamic_node_acl = 0;
- list_del(&acl->acl_list);
+ list_del_init(&acl->acl_list);
mutex_unlock(&tpg->acl_node_mutex);
target_shutdown_sessions(acl);
@@ -548,7 +548,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
* in transport_deregister_session().
*/
list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
- list_del(&nacl->acl_list);
+ list_del_init(&nacl->acl_list);
core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 97fed9a298bd..836d552b0385 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -466,7 +466,7 @@ static void target_complete_nacl(struct kref *kref)
}
mutex_lock(&se_tpg->acl_node_mutex);
- list_del(&nacl->acl_list);
+ list_del_init(&nacl->acl_list);
mutex_unlock(&se_tpg->acl_node_mutex);
core_tpg_wait_for_nacl_pr_ref(nacl);
@@ -538,7 +538,7 @@ void transport_free_session(struct se_session *se_sess)
spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
if (se_nacl->dynamic_stop)
- list_del(&se_nacl->acl_list);
+ list_del_init(&se_nacl->acl_list);
}
mutex_unlock(&se_tpg->acl_node_mutex);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 80ee130f8253..942d094269fb 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -563,8 +563,6 @@ static int scatter_data_area(struct tcmu_dev *udev,
block_remaining);
to_offset = get_block_offset_user(udev, dbi,
block_remaining);
- offset = DATA_BLOCK_SIZE - block_remaining;
- to += offset;
if (*iov_cnt != 0 &&
to_offset == iov_tail(*iov)) {
@@ -575,8 +573,10 @@ static int scatter_data_area(struct tcmu_dev *udev,
(*iov)->iov_len = copy_bytes;
}
if (copy_data) {
- memcpy(to, from + sg->length - sg_remaining,
- copy_bytes);
+ offset = DATA_BLOCK_SIZE - block_remaining;
+ memcpy(to + offset,
+ from + sg->length - sg_remaining,
+ copy_bytes);
tcmu_flush_dcache_range(to, copy_bytes);
}
sg_remaining -= copy_bytes;
@@ -637,9 +637,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
offset = DATA_BLOCK_SIZE - block_remaining;
- from += offset;
tcmu_flush_dcache_range(from, copy_bytes);
- memcpy(to + sg->length - sg_remaining, from,
+ memcpy(to + sg->length - sg_remaining, from + offset,
copy_bytes);
sg_remaining -= copy_bytes;
@@ -1433,6 +1432,8 @@ static int tcmu_update_uio_info(struct tcmu_dev *udev)
if (udev->dev_config[0])
snprintf(str + used, size - used, "/%s", udev->dev_config);
+ /* If the old string exists, free it */
+ kfree(info->name);
info->name = str;
return 0;
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 308b6e17c88a..fe2f00ceafc5 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -333,6 +333,15 @@ static int tb_drom_parse_entry_port(struct tb_switch *sw,
int res;
enum tb_port_type type;
+ /*
+ * Some DROMs list more ports than the controller actually has
+ * so we skip those but allow the parser to continue.
+ */
+ if (header->index > sw->config.max_port_number) {
+ dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
+ return 0;
+ }
+
port = &sw->ports[header->index];
port->disabled = header->port_disabled;
if (port->disabled)
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 8ee340290219..bdaac1ff00a5 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -904,7 +904,14 @@ static int icm_driver_ready(struct tb *tb)
static int icm_suspend(struct tb *tb)
{
- return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
+ int ret;
+
+ ret = nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
+ if (ret)
+ tb_info(tb, "Ignoring mailbox command error (%d) in %s\n",
+ ret, __func__);
+
+ return 0;
}
/*
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 40219a706309..e9391bbd4036 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -30,7 +30,7 @@ static DEFINE_IDA(nvm_ida);
struct nvm_auth_status {
struct list_head list;
- uuid_be uuid;
+ uuid_t uuid;
u32 status;
};
@@ -47,7 +47,7 @@ static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
struct nvm_auth_status *st;
list_for_each_entry(st, &nvm_auth_status_cache, list) {
- if (!uuid_be_cmp(st->uuid, *sw->uuid))
+ if (uuid_equal(&st->uuid, sw->uuid))
return st;
}
@@ -1461,7 +1461,7 @@ struct tb_sw_lookup {
struct tb *tb;
u8 link;
u8 depth;
- const uuid_be *uuid;
+ const uuid_t *uuid;
};
static int tb_switch_match(struct device *dev, void *data)
@@ -1518,7 +1518,7 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
* Returned switch has reference count increased so the caller needs to
* call tb_switch_put() when done with the switch.
*/
-struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid)
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
{
struct tb_sw_lookup lookup;
struct device *dev;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 3d9f64676e58..e0deee4f1eb0 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -101,7 +101,7 @@ struct tb_switch {
struct tb_dma_port *dma_port;
struct tb *tb;
u64 uid;
- uuid_be *uuid;
+ uuid_t *uuid;
u16 vendor;
u16 device;
const char *vendor_name;
@@ -407,7 +407,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw);
struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
-struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid);
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
static inline unsigned int tb_switch_phy_port_from_link(unsigned int link)
{
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 85b6d33c0919..de6441e4a060 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -179,7 +179,7 @@ struct icm_fr_pkg_get_topology_response {
struct icm_fr_event_device_connected {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 link_info;
@@ -193,7 +193,7 @@ struct icm_fr_event_device_connected {
struct icm_fr_pkg_approve_device {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
@@ -207,7 +207,7 @@ struct icm_fr_event_device_disconnected {
struct icm_fr_pkg_add_device_key {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
@@ -216,7 +216,7 @@ struct icm_fr_pkg_add_device_key {
struct icm_fr_pkg_add_device_key_response {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
@@ -224,7 +224,7 @@ struct icm_fr_pkg_add_device_key_response {
struct icm_fr_pkg_challenge_device {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
@@ -233,7 +233,7 @@ struct icm_fr_pkg_challenge_device {
struct icm_fr_pkg_challenge_device_response {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index b5def356af63..1aab3010fbfa 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1043,13 +1043,24 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
if (up->dl_write)
uart->dl_write = up->dl_write;
- if (serial8250_isa_config != NULL)
- serial8250_isa_config(0, &uart->port,
- &uart->capabilities);
+ if (uart->port.type != PORT_8250_CIR) {
+ if (serial8250_isa_config != NULL)
+ serial8250_isa_config(0, &uart->port,
+ &uart->capabilities);
+
+ ret = uart_add_one_port(&serial8250_reg,
+ &uart->port);
+ if (ret == 0)
+ ret = uart->port.line;
+ } else {
+ dev_info(uart->port.dev,
+ "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
+ uart->port.iobase,
+ (unsigned long long)uart->port.mapbase,
+ uart->port.irq);
- ret = uart_add_one_port(&serial8250_reg, &uart->port);
- if (ret == 0)
- ret = uart->port.line;
+ ret = 0;
+ }
}
mutex_unlock(&serial_mutex);
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index b5c98e5bf524..c6360fbdf808 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -261,7 +261,7 @@ __xr17v35x_register_gpio(struct pci_dev *pcidev,
}
static const struct property_entry exar_gpio_properties[] = {
- PROPERTY_ENTRY_U32("linux,first-pin", 0),
+ PROPERTY_ENTRY_U32("exar,first-pin", 0),
PROPERTY_ENTRY_U32("ngpios", 16),
{ }
};
@@ -326,7 +326,7 @@ static int iot2040_rs485_config(struct uart_port *port,
}
static const struct property_entry iot2040_gpio_properties[] = {
- PROPERTY_ENTRY_U32("linux,first-pin", 10),
+ PROPERTY_ENTRY_U32("exar,first-pin", 10),
PROPERTY_ENTRY_U32("ngpios", 1),
{ }
};
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8a857bb34fbb..1888d168a41c 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -142,15 +142,7 @@ static struct vendor_data vendor_sbsa = {
.fixed_options = true,
};
-/*
- * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
- * occasionally getting stuck as 1. To avoid the potential for a hang, check
- * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
- * implementations, so only do so if an affected platform is detected in
- * parse_spcr().
- */
-static bool qdf2400_e44_present = false;
-
+#ifdef CONFIG_ACPI_SPCR_TABLE
static struct vendor_data vendor_qdt_qdf2400_e44 = {
.reg_offset = pl011_std_offsets,
.fr_busy = UART011_FR_TXFE,
@@ -165,6 +157,7 @@ static struct vendor_data vendor_qdt_qdf2400_e44 = {
.always_enabled = true,
.fixed_options = true,
};
+#endif
static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
[REG_DR] = UART01x_DR,
@@ -2375,12 +2368,14 @@ static int __init pl011_console_match(struct console *co, char *name, int idx,
resource_size_t addr;
int i;
- if (strcmp(name, "qdf2400_e44") == 0) {
- pr_info_once("UART: Working around QDF2400 SoC erratum 44");
- qdf2400_e44_present = true;
- } else if (strcmp(name, "pl011") != 0) {
+ /*
+ * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
+ * have a distinct console name, so make sure we check for that.
+ * The actual implementation of the erratum occurs in the probe
+ * function.
+ */
+ if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
return -ENODEV;
- }
if (uart_parse_earlycon(options, &iotype, &addr, &options))
return -ENODEV;
@@ -2734,11 +2729,17 @@ static int sbsa_uart_probe(struct platform_device *pdev)
}
uap->port.irq = ret;
- uap->reg_offset = vendor_sbsa.reg_offset;
- uap->vendor = qdf2400_e44_present ?
- &vendor_qdt_qdf2400_e44 : &vendor_sbsa;
+#ifdef CONFIG_ACPI_SPCR_TABLE
+ if (qdf2400_e44_present) {
+ dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
+ uap->vendor = &vendor_qdt_qdf2400_e44;
+ } else
+#endif
+ uap->vendor = &vendor_sbsa;
+
+ uap->reg_offset = uap->vendor->reg_offset;
uap->fifosize = 32;
- uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM;
+ uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
uap->port.ops = &sbsa_uart_pops;
uap->fixed_baud = baudrate;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ab1bb3b538ac..7f277b092b5b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1888,7 +1888,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev,
/* No more submits can occur */
spin_lock_irq(&hcd_urb_list_lock);
rescan:
- list_for_each_entry (urb, &ep->urb_list, urb_list) {
+ list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {
int is_in;
if (urb->unlinked)
@@ -2485,6 +2485,8 @@ void usb_hc_died (struct usb_hcd *hcd)
}
if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
hcd = hcd->shared_hcd;
+ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
+ set_bit(HCD_FLAG_DEAD, &hcd->flags);
if (hcd->rh_registered) {
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 6e6797d145dd..822f8c50e423 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4725,7 +4725,8 @@ hub_power_remaining(struct usb_hub *hub)
static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
u16 portchange)
{
- int status, i;
+ int status = -ENODEV;
+ int i;
unsigned unit_load;
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
@@ -4929,9 +4930,10 @@ loop:
done:
hub_port_disable(hub, port1, 1);
- if (hcd->driver->relinquish_port && !hub->hdev->parent)
- hcd->driver->relinquish_port(hcd, port1);
-
+ if (hcd->driver->relinquish_port && !hub->hdev->parent) {
+ if (status != -ENOTCONN && status != -ENODEV)
+ hcd->driver->relinquish_port(hcd, port1);
+ }
}
/* Handle physical or logical connection change events.
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 3116edfcdc18..574da2b4529c 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
+ { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
+
/* Avision AV600U */
{ USB_DEVICE(0x0638, 0x0a13), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
@@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
{ USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Optical Mouse M90/M100 */
{ USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 6b299c7b7656..f064f1549333 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -896,9 +896,40 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
if (!node) {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+ /*
+ * USB Specification 2.0 Section 5.9.2 states that: "If
+ * there is only a single transaction in the microframe,
+ * only a DATA0 data packet PID is used. If there are
+ * two transactions per microframe, DATA1 is used for
+ * the first transaction data packet and DATA0 is used
+ * for the second transaction data packet. If there are
+ * three transactions per microframe, DATA2 is used for
+ * the first transaction data packet, DATA1 is used for
+ * the second, and DATA0 is used for the third."
+ *
+ * IOW, we should satisfy the following cases:
+ *
+ * 1) length <= maxpacket
+ * - DATA0
+ *
+ * 2) maxpacket < length <= (2 * maxpacket)
+ * - DATA1, DATA0
+ *
+ * 3) (2 * maxpacket) < length <= (3 * maxpacket)
+ * - DATA2, DATA1, DATA0
+ */
if (speed == USB_SPEED_HIGH) {
struct usb_ep *ep = &dep->endpoint;
- trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
+ unsigned int mult = ep->mult - 1;
+ unsigned int maxp = usb_endpoint_maxp(ep->desc);
+
+ if (length <= (2 * maxp))
+ mult--;
+
+ if (length <= maxp)
+ mult--;
+
+ trb->size |= DWC3_TRB_SIZE_PCM1(mult);
}
} else {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 62dc9c7798e7..e1de8fe599a3 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -838,21 +838,32 @@ static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep
return usb3_req;
}
-static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
- struct renesas_usb3_request *usb3_req, int status)
+static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req,
+ int status)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
- unsigned long flags;
dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n",
usb3_ep->num, usb3_req->req.length, usb3_req->req.actual,
status);
usb3_req->req.status = status;
- spin_lock_irqsave(&usb3->lock, flags);
usb3_ep->started = false;
list_del_init(&usb3_req->queue);
- spin_unlock_irqrestore(&usb3->lock, flags);
+ spin_unlock(&usb3->lock);
usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req);
+ spin_lock(&usb3->lock);
+}
+
+static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req, int status)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ __usb3_request_done(usb3_ep, usb3_req, status);
+ spin_unlock_irqrestore(&usb3->lock, flags);
}
static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3)
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index c8989c62a262..c8f38649f749 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -98,6 +98,7 @@ enum amd_chipset_gen {
AMD_CHIPSET_HUDSON2,
AMD_CHIPSET_BOLTON,
AMD_CHIPSET_YANGTZE,
+ AMD_CHIPSET_TAISHAN,
AMD_CHIPSET_UNKNOWN,
};
@@ -141,6 +142,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
pinfo->sb_type.gen = AMD_CHIPSET_SB700;
else if (rev >= 0x40 && rev <= 0x4f)
pinfo->sb_type.gen = AMD_CHIPSET_SB800;
+ }
+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 0x145c, NULL);
+ if (pinfo->smbus_dev) {
+ pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
} else {
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
@@ -260,11 +266,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
{
/* Make sure amd chipset type has already been initialized */
usb_amd_find_chipset_info();
- if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
- return 0;
-
- dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
- return 1;
+ if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
+ amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
+ dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
+ return 1;
+ }
+ return 0;
}
EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
@@ -1150,3 +1157,23 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
+
+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
+{
+ /*
+ * Our dear uPD72020{1,2} friend only partially resets when
+ * asked to via the XHCI interface, and may end up doing DMA
+ * at the wrong addresses, as it keeps the top 32bit of some
+ * addresses from its previous programming under obscure
+ * circumstances.
+ * Give it a good wack at probe time. Unfortunately, this
+ * needs to happen before we've had a chance to discover any
+ * quirk, or the system will be in a rather bad state.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ (pdev->device == 0x0014 || pdev->device == 0x0015))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 655994480198..5582cbafecd4 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -15,6 +15,7 @@ void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
void sb800_prefetch(struct device *dev, int on);
+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
#else
struct pci_dev;
static inline void usb_amd_quirk_pll_disable(void) {}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5b0fa553c8bc..8071c8fdd15e 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -284,6 +284,13 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
driver = (struct hc_driver *)id->driver_data;
+ /* For some HW implementation, a XHCI reset is just not enough... */
+ if (usb_xhci_needs_pci_reset(dev)) {
+ dev_info(&dev->dev, "Resetting\n");
+ if (pci_reset_function_locked(dev))
+ dev_warn(&dev->dev, "Reset failed");
+ }
+
/* Prevent runtime suspending between USB-2 and USB-3 initialization */
pm_runtime_get_noresume(&dev->dev);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 76decb8011eb..3344ffd5bb13 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -139,6 +139,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
"Could not flush host TX%d fifo: csr: %04x\n",
ep->epnum, csr))
return;
+ mdelay(1);
}
}
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 8fb86a5f458e..3d0dd2f97415 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -197,6 +197,7 @@ struct msm_otg {
struct regulator *v3p3;
struct regulator *v1p8;
struct regulator *vddcx;
+ struct regulator_bulk_data supplies[3];
struct reset_control *phy_rst;
struct reset_control *link_rst;
@@ -1731,7 +1732,6 @@ static int msm_otg_reboot_notify(struct notifier_block *this,
static int msm_otg_probe(struct platform_device *pdev)
{
- struct regulator_bulk_data regs[3];
int ret = 0;
struct device_node *np = pdev->dev.of_node;
struct msm_otg_platform_data *pdata;
@@ -1817,17 +1817,18 @@ static int msm_otg_probe(struct platform_device *pdev)
return motg->irq;
}
- regs[0].supply = "vddcx";
- regs[1].supply = "v3p3";
- regs[2].supply = "v1p8";
+ motg->supplies[0].supply = "vddcx";
+ motg->supplies[1].supply = "v3p3";
+ motg->supplies[2].supply = "v1p8";
- ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
+ ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(motg->supplies),
+ motg->supplies);
if (ret)
return ret;
- motg->vddcx = regs[0].consumer;
- motg->v3p3 = regs[1].consumer;
- motg->v1p8 = regs[2].consumer;
+ motg->vddcx = motg->supplies[0].consumer;
+ motg->v3p3 = motg->supplies[1].consumer;
+ motg->v1p8 = motg->supplies[2].consumer;
clk_set_rate(motg->clk, 60000000);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 93fba9033b00..2c8161bcf5b5 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -639,14 +639,11 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhs_pipe *pipe;
unsigned long flags;
- int ret = 0;
spin_lock_irqsave(&uep->lock, flags);
pipe = usbhsg_uep_to_pipe(uep);
- if (!pipe) {
- ret = -EINVAL;
+ if (!pipe)
goto out;
- }
usbhsg_pipe_disable(uep);
usbhs_pipe_free(pipe);
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index d544b331c9f2..02b67abfc2a1 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -20,9 +20,13 @@
/* Low Power Status register (LPSTS) */
#define LPSTS_SUSPM 0x4000
-/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */
+/*
+ * USB General control register 2 (UGCTRL2)
+ * Remarks: bit[31:11] and bit[9:6] should be 0
+ */
#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
#define UGCTRL2_USB0SEL_OTG 0x00000030
+#define UGCTRL2_VBUSSEL 0x00000400
static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
{
@@ -34,7 +38,8 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
- usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
+ usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG |
+ UGCTRL2_VBUSSEL);
if (enable) {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index f64e914a8985..2d945c9f975c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -142,6 +142,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
+ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ebe51f11105d..fe123153b1a5 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2025,6 +2025,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index c9ebefd8f35f..a585b477415d 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -52,6 +52,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
.driver_info = PL2303_QUIRK_ENDPOINT_HACK },
+ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485),
+ .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 09d9be88209e..3b5a15d1dc0d 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -27,6 +27,7 @@
#define ATEN_VENDOR_ID 0x0557
#define ATEN_VENDOR_ID2 0x0547
#define ATEN_PRODUCT_ID 0x2008
+#define ATEN_PRODUCT_UC485 0x2021
#define ATEN_PRODUCT_ID2 0x2118
#define IODATA_VENDOR_ID 0x04bb
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index cbea9f329e71..cde115359793 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -124,9 +124,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
"Initio Corporation",
- "",
+ "INIC-3069",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_ATA_1X),
+ US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE),
/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 06615934fed1..0dceb9fa3a06 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -315,6 +315,7 @@ static int usb_stor_control_thread(void * __us)
{
struct us_data *us = (struct us_data *)__us;
struct Scsi_Host *host = us_to_host(us);
+ struct scsi_cmnd *srb;
for (;;) {
usb_stor_dbg(us, "*** thread sleeping\n");
@@ -330,6 +331,7 @@ static int usb_stor_control_thread(void * __us)
scsi_lock(host);
/* When we are called with no command pending, we're done */
+ srb = us->srb;
if (us->srb == NULL) {
scsi_unlock(host);
mutex_unlock(&us->dev_mutex);
@@ -398,14 +400,11 @@ static int usb_stor_control_thread(void * __us)
/* lock access to the state */
scsi_lock(host);
- /* indicate that the command is done */
- if (us->srb->result != DID_ABORT << 16) {
- usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
- us->srb->result);
- us->srb->scsi_done(us->srb);
- } else {
+ /* was the command aborted? */
+ if (us->srb->result == DID_ABORT << 16) {
SkipForAbort:
usb_stor_dbg(us, "scsi command aborted\n");
+ srb = NULL; /* Don't call srb->scsi_done() */
}
/*
@@ -429,6 +428,13 @@ SkipForAbort:
/* unlock the device pointers */
mutex_unlock(&us->dev_mutex);
+
+ /* now that the locks are released, notify the SCSI core */
+ if (srb) {
+ usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
+ srb->result);
+ srb->scsi_done(srb);
+ }
} /* for (;;) */
/* Wait until we are told to stop */
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 063c1ce6fa42..f041b1a6cf66 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -226,7 +226,14 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
if (ret)
return ret;
- vdev->reset_works = (pci_reset_function(pdev) == 0);
+ /* If reset fails because of the device lock, fail this path entirely */
+ ret = pci_try_reset_function(pdev);
+ if (ret == -EAGAIN) {
+ pci_disable_device(pdev);
+ return ret;
+ }
+
+ vdev->reset_works = !ret;
pci_save_state(pdev);
vdev->pci_saved_state = pci_store_saved_state(pdev);
if (!vdev->pci_saved_state)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 330a57024cbc..5628fe114347 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -839,7 +839,7 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
/* Permissions for PCI Express capability */
static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
{
- /* Alloc larger of two possible sizes */
+ /* Alloc largest of possible sizes */
if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
return -ENOMEM;
@@ -1243,11 +1243,16 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
vdev->extended_caps = (dword != 0);
}
- /* length based on version */
- if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1)
+ /* length based on version and type */
+ if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) {
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END)
+ return 0xc; /* "All Devices" only, no link */
return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1;
- else
+ } else {
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END)
+ return 0x2c; /* No link */
return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2;
+ }
case PCI_CAP_ID_HT:
ret = pci_read_config_byte(pdev, pos + 3, &byte);
if (ret)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e4613a3c362d..9cb3f722dce1 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -308,7 +308,6 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->avail = NULL;
vq->used = NULL;
vq->last_avail_idx = 0;
- vq->last_used_event = 0;
vq->avail_idx = 0;
vq->last_used_idx = 0;
vq->signalled_used = 0;
@@ -1402,7 +1401,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EINVAL;
break;
}
- vq->last_avail_idx = vq->last_used_event = s.num;
+ vq->last_avail_idx = s.num;
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
break;
@@ -2241,6 +2240,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
__u16 old, new;
__virtio16 event;
bool v;
+ /* Flush out used index updates. This is paired
+ * with the barrier that the Guest executes when enabling
+ * interrupts. */
+ smp_mb();
if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
unlikely(vq->avail_idx == vq->last_avail_idx))
@@ -2248,10 +2251,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
__virtio16 flags;
- /* Flush out used index updates. This is paired
- * with the barrier that the Guest executes when enabling
- * interrupts. */
- smp_mb();
if (vhost_get_avail(vq, flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags");
return true;
@@ -2266,26 +2265,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (unlikely(!v))
return true;
- /* We're sure if the following conditions are met, there's no
- * need to notify guest:
- * 1) cached used event is ahead of new
- * 2) old to new updating does not cross cached used event. */
- if (vring_need_event(vq->last_used_event, new + vq->num, new) &&
- !vring_need_event(vq->last_used_event, new, old))
- return false;
-
- /* Flush out used index updates. This is paired
- * with the barrier that the Guest executes when enabling
- * interrupts. */
- smp_mb();
-
if (vhost_get_avail(vq, event, vhost_used_event(vq))) {
vq_err(vq, "Failed to get used event idx");
return true;
}
- vq->last_used_event = vhost16_to_cpu(vq, event);
-
- return vring_need_event(vq->last_used_event, new, old);
+ return vring_need_event(vhost16_to_cpu(vq, event), new, old);
}
/* This actually signals the guest, using eventfd. */
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index f72095868b93..bb7c29b8b9fc 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -115,9 +115,6 @@ struct vhost_virtqueue {
/* Last index we used. */
u16 last_used_idx;
- /* Last used evet we've seen */
- u16 last_used_event;
-
/* Used flags */
u16 used_flags;
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index ff01bed7112f..1e784adb89b1 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -17,6 +17,7 @@
#include <asm/efi.h>
static bool request_mem_succeeded = false;
+static bool nowc = false;
static struct fb_var_screeninfo efifb_defined = {
.activate = FB_ACTIVATE_NOW,
@@ -99,6 +100,8 @@ static int efifb_setup(char *options)
screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
else if (!strncmp(this_opt, "width:", 6))
screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
+ else if (!strcmp(this_opt, "nowc"))
+ nowc = true;
}
}
@@ -255,7 +258,10 @@ static int efifb_probe(struct platform_device *dev)
info->apertures->ranges[0].base = efifb_fix.smem_start;
info->apertures->ranges[0].size = size_remap;
- info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
+ if (nowc)
+ info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
+ else
+ info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
if (!info->screen_base) {
pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
efifb_fix.smem_len, efifb_fix.smem_start);
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index c166e0725be5..ba82f97fb42b 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -1073,20 +1073,16 @@ static int imxfb_remove(struct platform_device *pdev)
imxfb_disable_controller(fbi);
unregister_framebuffer(info);
-
+ fb_dealloc_cmap(&info->cmap);
pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->exit)
pdata->exit(fbi->pdev);
-
- fb_dealloc_cmap(&info->cmap);
- kfree(info->pseudo_palette);
- framebuffer_release(info);
-
dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
fbi->map_dma);
-
iounmap(fbi->regs);
release_mem_region(res->start, resource_size(res));
+ kfree(info->pseudo_palette);
+ framebuffer_release(info);
return 0;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index eecf695c16f4..09e5bb013d28 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -193,7 +193,6 @@ static struct notifier_block omap_dss_pm_notif_block = {
static int __init omap_dss_probe(struct platform_device *pdev)
{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
int r;
core.pdev = pdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
index 156a254705ea..ec78d61bc551 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
@@ -664,7 +664,7 @@ static int hdmi_audio_register(struct device *dev)
{
struct omap_hdmi_audio_pdata pdata = {
.dev = dev,
- .dss_version = omapdss_get_version(),
+ .version = 4,
.audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp),
.ops = &hdmi_audio_ops,
};
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
index 4da36bcab977..2e2fcc3d6d4f 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
@@ -695,7 +695,7 @@ static int hdmi_audio_register(struct device *dev)
{
struct omap_hdmi_audio_pdata pdata = {
.dev = dev,
- .dss_version = omapdss_get_version(),
+ .version = 5,
.audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp),
.ops = &hdmi_audio_ops,
};
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 22caf808bfab..f0b3a0b9d42f 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -104,12 +104,6 @@ static u32 page_to_balloon_pfn(struct page *page)
return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
}
-static struct page *balloon_pfn_to_page(u32 pfn)
-{
- BUG_ON(pfn % VIRTIO_BALLOON_PAGES_PER_PAGE);
- return pfn_to_page(pfn / VIRTIO_BALLOON_PAGES_PER_PAGE);
-}
-
static void balloon_ack(struct virtqueue *vq)
{
struct virtio_balloon *vb = vq->vdev->priv;
@@ -138,8 +132,10 @@ static void set_page_pfns(struct virtio_balloon *vb,
{
unsigned int i;
- /* Set balloon pfns pointing at this page.
- * Note that the first pfn points at start of the page. */
+ /*
+ * Set balloon pfns pointing at this page.
+ * Note that the first pfn points at start of the page.
+ */
for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
pfns[i] = cpu_to_virtio32(vb->vdev,
page_to_balloon_pfn(page) + i);
@@ -182,18 +178,16 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
return num_allocated_pages;
}
-static void release_pages_balloon(struct virtio_balloon *vb)
+static void release_pages_balloon(struct virtio_balloon *vb,
+ struct list_head *pages)
{
- unsigned int i;
- struct page *page;
+ struct page *page, *next;
- /* Find pfns pointing at start of each page, get pages and free them. */
- for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
- page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev,
- vb->pfns[i]));
+ list_for_each_entry_safe(page, next, pages, lru) {
if (!virtio_has_feature(vb->vdev,
VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
adjust_managed_page_count(page, 1);
+ list_del(&page->lru);
put_page(page); /* balloon reference */
}
}
@@ -203,6 +197,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
unsigned num_freed_pages;
struct page *page;
struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
+ LIST_HEAD(pages);
/* We can only do one array worth at a time. */
num = min(num, ARRAY_SIZE(vb->pfns));
@@ -216,6 +211,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
if (!page)
break;
set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
+ list_add(&page->lru, &pages);
vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
}
@@ -227,7 +223,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
*/
if (vb->num_pfns != 0)
tell_host(vb, vb->deflate_vq);
- release_pages_balloon(vb);
+ release_pages_balloon(vb, &pages);
mutex_unlock(&vb->balloon_lock);
return num_freed_pages;
}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index b241bfa529ce..2d43118077e4 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -343,14 +343,6 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
info->cpu = cpu;
}
-static void xen_evtchn_mask_all(void)
-{
- unsigned int evtchn;
-
- for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
- mask_evtchn(evtchn);
-}
-
/**
* notify_remote_via_irq - send event to remote end of event channel via irq
* @irq: irq of event channel to send event to
@@ -582,7 +574,7 @@ static void shutdown_pirq(struct irq_data *data)
static void enable_pirq(struct irq_data *data)
{
- startup_pirq(data);
+ enable_dynirq(data);
}
static void disable_pirq(struct irq_data *data)
@@ -1573,7 +1565,6 @@ void xen_irq_resume(void)
struct irq_info *info;
/* New event-channel space is not 'live' yet. */
- xen_evtchn_mask_all();
xen_evtchn_resume();
/* No IRQ <-> event-channel mappings. */
@@ -1681,6 +1672,7 @@ module_param(fifo_events, bool, 0);
void __init xen_init_IRQ(void)
{
int ret = -EINVAL;
+ unsigned int evtchn;
if (fifo_events)
ret = xen_evtchn_fifo_init();
@@ -1692,7 +1684,8 @@ void __init xen_init_IRQ(void)
BUG_ON(!evtchn_to_irq);
/* No event channels are 'live' right now. */
- xen_evtchn_mask_all();
+ for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
+ mask_evtchn(evtchn);
pirq_needs_eoi = pirq_needs_eoi_flag;
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 66620713242a..a67e955cacd1 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -151,8 +151,8 @@ static unsigned long frontswap_inertia_counter;
static void frontswap_selfshrink(void)
{
static unsigned long cur_frontswap_pages;
- static unsigned long last_frontswap_pages;
- static unsigned long tgt_frontswap_pages;
+ unsigned long last_frontswap_pages;
+ unsigned long tgt_frontswap_pages;
last_frontswap_pages = cur_frontswap_pages;
cur_frontswap_pages = frontswap_curr_pages();
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index e46080214955..3e59590c7254 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -857,6 +857,8 @@ static int xenwatch_thread(void *unused)
struct list_head *ent;
struct xs_watch_event *event;
+ xenwatch_pid = current->pid;
+
for (;;) {
wait_event_interruptible(watch_events_waitq,
!list_empty(&watch_events));
@@ -925,7 +927,6 @@ int xs_init(void)
task = kthread_run(xenwatch_thread, NULL, "xenwatch");
if (IS_ERR(task))
return PTR_ERR(task);
- xenwatch_pid = task->pid;
/* shutdown watches for kexec boot */
xs_reset_watches();
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 967f069385d0..71ddfb4cf61c 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -87,7 +87,6 @@ static int __init xenfs_init(void)
if (xen_domain())
return register_filesystem(&xenfs_type);
- pr_info("not registering filesystem on non-xen platform\n");
return 0;
}